././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.6254966 openstacksdk-4.0.0/0000775000175000017500000000000000000000000014227 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/.coveragerc0000664000175000017500000000013700000000000016351 0ustar00zuulzuul00000000000000[run] branch = True source = openstack omit = openstack/tests/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/.git-blame-ignore-revs0000664000175000017500000000365400000000000020337 0ustar00zuulzuul00000000000000# You can configure git to automatically use this file with the following config: # git config --global blame.ignoreRevsFile .git-blame-ignore-revs c7010a2f929de9fad4e1a7c7f5a17cb8e210432a # Bump black to 23.3.0 a36f514295a4b4e6157ce69a210f653bcc4df7f2 # Blackify everything else 004c7352d0a4fb467a319ae9743eb6ca5ee9ce7f # Blackify openstack.cloud c2ff7336cecabc665e7bf04cbe87ef8d0c2e6f9f # Blackify openstack.clustering 073abda5a94b12a319c79d6a9b8594036f95fc65 # Blackify openstack.container_infrastructure_management 570b81f0ec3b3876aefbb223c78093f2a957bb01 # Blackify openstack.accelerator 33bed575013f11e4d408593e53c6c99ca66d6110 # Blackify openstack.instance_ha 10018dbf5be5e19c87543a5931f6809006eba4c5 # Blackify openstack.dns 19ec9ba383d14f4af6a1bb78dbbeaa6638ee8a4f # Blackify openstack.database 0e2b5d263fdf12e0c8a67503712afab2816ef2d0 # Blackify openstack.message 9d3d986241ce110e8f6bdf3ecb19609dc417a10a # Blackify openstack.workflow 874ea74103a0c833df7668a45b96b7145a8158a2 # Blackify openstack.orchestration 409f648ce506d7e768305f75025c4b01c5fa3008 # Blackify openstack.placement 93d8f41713ec2128210bf0a8479a5f3872ce0382 # Blackify openstack.key_manager 3d2511f98025d2d2826e13cea8be7545e90990f7 # Blackify openstack.shared_file_system 82c2a534024cff7690620876723422a98e8f371a # Blackify openstack.load_balancer f8e42017e756e383367145c4caf39de796babcba # Blackify openstack.baremetal, openstack.baremetal_introspection 4589e293e829950d2fd4c705cce2f7ce30ca9e29 # Blackify openstack.object_store 34da09f3125ccd0408f2e0019c85d95188fef573 # Blackify openstack.block_storage 542ddaa1ad5cfc9b9876de3de0759941c9a9ea83 # Blackify openstack.identity f526b990f31de03a1b6181a4724976e1b86a654a # Blackify openstack.network bcf99f3433ceecf9a210d0aa0580a67645ccf7ee # Blackify openstack.image 69735d3bd8fd874a9817c26b5b009921110fb416 # Blackify openstack.compute (tests) 395a77298ecd79623b1af75ad0dc7653f5e4eb61 # Blackify openstack.compute ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/.mailmap0000664000175000017500000000033600000000000015652 0ustar00zuulzuul00000000000000# Format is: # # ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/.pre-commit-config.yaml0000664000175000017500000000275200000000000020516 0ustar00zuulzuul00000000000000--- default_language_version: # force all unspecified python hooks to run python3 python: python3 repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.6.0 hooks: - id: trailing-whitespace - id: mixed-line-ending args: ['--fix', 'lf'] exclude: '.*\.(svg)$' - id: check-byte-order-marker - id: check-executables-have-shebangs - id: check-merge-conflict - id: debug-statements - id: check-yaml files: .*\.(yaml|yml)$ exclude: '^zuul.d/.*$' - repo: https://github.com/PyCQA/doc8 rev: v1.1.1 hooks: - id: doc8 - repo: https://github.com/asottile/pyupgrade rev: v3.16.0 hooks: - id: pyupgrade args: ['--py38-plus'] - repo: https://github.com/psf/black rev: 24.4.2 hooks: - id: black args: ['-S', '-l', '79'] - repo: https://opendev.org/openstack/hacking rev: 6.1.0 hooks: - id: hacking additional_dependencies: - flake8-import-order~=0.18.2 exclude: '^(doc|releasenotes|tools)/.*$' - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.10.1 hooks: - id: mypy additional_dependencies: - types-decorator - types-PyYAML - types-requests - types-simplejson # keep this in-sync with '[mypy] exclude' in 'setup.cfg' exclude: | (?x)( doc/.* | examples/.* | releasenotes/.* ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/.stestr.conf0000664000175000017500000000006600000000000016502 0ustar00zuulzuul00000000000000[DEFAULT] test_path=./openstack/tests/unit top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296384.0 openstacksdk-4.0.0/AUTHORS0000664000175000017500000004630000000000000015302 0ustar00zuulzuul00000000000000Aaron-DH Abhijeet Kasurde Adam Gandelman Adam Harwell Adam Sheldon Ade Lee Adrian Turjak Adrian Turjak Adrien Pennsart Adrien Pensart Aija Jauntēva Akihiro Motoki Alberto Gireud Ales Musil Alex Gaynor Alex Schultz Allain Legacy Alon Bar Tzlil Alvaro Aleman Alvaro Lopez Garcia Amol Kahat Andreas Jaeger Andrew Bogott Andrey Shestakov Andy Botting Anindita Das Anita Kuno Ankit Agrawal Ankur Gupta Anne Gentle Anton Sidelnikov Antoni Segura Puimedon Antonia Gaete Anvi Joshi Apoorv Agrawal Areg Grigoryan Arie Arie Bregman Arkady Shtempler Artem Goncharov Artem Goncharov ArtofBugs <74070945+ArtofBugs@users.noreply.github.com> Artom Lifshitz Ashley Rodriguez Atsushi SAKAI B.Haleblian Bailey Miller Balazs Gibizer Bence Romsics Bence Romsics Benoît Knecht Bernard Cafarelli Bharat Kunwar Bharat Kunwar Bo Tran Bob Ball Bodo Petermann Bogdan Dobrelya Bram Verschueren Brian Curtin Brian Curtin Britt Houser Béla Vancsics Caleb Boylan Cao Xuan Hoang Cao Xuan Hoang Carlos Goncalves Carlos Goncalves Cedric Brandily Cenne ChangBo Guo(gcb) Charlie Choe, Cheng-Dae Chris Church Christian Berendt Christian Berendt Christian Rohmann Christian Zunker Cindia-blue Clark Boylan Clayton O'Neill Clint Byrum Colleen Murphy Corey Bryant Corey Wright Cyril Roelandt Daniel Mellado Daniel Speichert Daniel Speichert Daniel Wallace Daniel Wilson Danila Balagansky Dao Cong Tien David Shrewsbury David Shrewsbury Davide Guerri Dean Troyer Dean Troyer Devananda van der Veen Dinesh Bhor Ding Baojian Dirk Mueller Dmitrii Shcherbakov Dmitriy Rabotjagov Dmitriy Rabotyagov Dmitriy Rabotyagov Dmitry Tantsur Dmitry Tantsur Dolph Mathews Dongcan Ye Donovan Jones Doug Goldstein Doug Hellmann Doug Hellmann Doug Wiegley Doug Wiegley Douglas Mendizábal Dr. Jens Harbott Dr. Jens Harbott Duan Jiong Duc Truong Dylan Zapzalka Ebil Jacob EdLeafe Emilien Lefrancois Eric Fried Eric Harney Eric Lafontaine Erik Olof Gunnar Andersson Ethan Lynn Ethan Lynn Ethan Lynn Lin EunYoung Kim Everett Toews Feilong Wang Felix Huettner Flavio Percoco GA EUM KIM Ghanshyam Mann Ghe Rivero Ghislain Bourgeois Gonéri Le Bouder Goutham Pacha Ravi Graham Hayes Gregory Haynes Gregory Thiemonge Grégoire Unbekandt Guang Yee Hael Yoon Haikel Guemar Haiwei Xu Hang Yang Hangdong Zhang Harald Jensås Hardik Italia Hervé Beraud Hideki Saito Hongbin Lu Hoolio Wobbits Hunt Xu ITD27M01 Ian Cordasco Ian Wienand Ian Y. Choi Igor Gnatenko Ilya Margolin Ilya Shakhat Irina Pereyaslavskaya Iswarya_Vakati Iury Gregory Melo Ferreira JP Sullivan Jacky Hu JaeSeong Shin Jakob Meng Jakub Jursa James Denton James E. Blair James E. Blair James Palmer Jamie Lennox Jamie Lennox Jan Hartkopf Javier Pena Jay Faulkner Jens Harbott Jens Rosenboom Jeremy Stanley Jerry Zhao Jesper Schmitz Mouridsen Jesse Noller Jesse Pretorius (odyssey4me) Jesse Proudman Jian Zhao Jihad Dwidari Jim Rollenhagen Joel Capitao Johannes Beisiegel Johannes Kulik John Dennis John Petrini Jon Schlueter Jordan Pittier Jose Delgado Josephine Seifert Joshua Harlow Joshua Harlow Joshua Hesketh Joshua Phillips João Vale Julia Kreger Julien Danjou KIM SOJUNG Kafilat Adeleke Kailun Qin Kajal Sah Kengo Takahara Kiran_totad Kirill Tyugaev Kiseok Kim Kristian Kucerak Kyle Mestery LEE SUN JAE LIU Yulong Lajos Katona Lars Kellogg-Stedman Lee Yarwood LinPeiWen <591171850@qq.com> Lingxian Kong LiuNanke Logan V Lucas Alvares Gomes Luis Morales Mahnoor Asghar Manjeet Singh Bhatia Manuel Osorio Mark Chappell Mark Goddard Markus Zoeller MartaLais Martin Millnert Marvin Vogt Mathieu Bultel Mathieu Gagné Mathieu Gagné Matt Fischer Matt Riedemann Matt Smith Matthew Booth Matthew Edmonds Matthew Treinish Matthew Wagoner Matthias Lisin Maurice Escher Maxim Babushkin Maxime Guyot Maxime Vidori Megharth Michael Gugino Michael Johnson Michael Still Michał Dulko Miguel Angel Ajo Mike Perez Mohammed Naser Mohit Malik Monty Taylor Morgan Fainberg Mridula Joshi Mário Santos Nakul Dahiwade Nate Johnston Nguyen Hai Truong Nick Jones Nils Magnus Noah Mickus Nobuto Murata Nurmatov Mamatisa Omer OpenStack Release Bot Patrik Lundin Paul Belanger Paulo Matias Pavlo Shchelokovskyy Peter BALOGH Pham Le Gia Dai Pierre Riteau Pip Oomen Polina Gubina Polina-Gubina Pooja Jadhav Prashant Bhole Qiming Teng Radoslaw Smigielski Radosław Piliszek Rafael Castillo Rafael Weingärtner Raimund Hook Rajat Dhasmana Rajesh Tailor Rarm Nagalingam Reedip Reedip Reedip René Ribaud Reynaldo Bontje Ricardo Carrillo Cruz Ricardo Carrillo Cruz Riccardo Pittau Richard Theis Roberto Polli Rodion Gyrbu Rodolfo Alonso Hernandez Rodolfo Alonso Hernandez Rodrigo Barbieri Romain Acciari Romain Dupont Roman Dobosz Romil Gupta Rosario Di Somma Rosario Di Somma Ruby Loo Rui Chen Ryan Brady Ryan Zimmerman Ryan Zimmerman Sagi Shnaidman Sahid Orentino Ferdjaoui Sahid Orentino Ferdjaoui Saju Salman Hajizada Sam Morrison Sam Yaple SamYaple Samuel Kunkel Samuel de Medeiros Queiroz Sean Handley Sean M. Collins Sean McGinnis Sebastian Haderecker Sebastian Lohff Sergey Skripnick Sergii Golovatiuk Seungju Baek Shane Wang Shashank Kumar Shankar Shogo Saito Shuquan Huang Sidharth Surana Simon Hensel Simon Leinen Sindhu Devale Slawek Kaplonski Snow Kim Sorin Sbarnea Spencer Krum Stefan Andres Stephen Finucane Steve Baker Steve Heyman Steve Leon Steve Lewis Steve Martinelli Steve Martinelli Steven Relf Swapnil Kulkarni (coolsvap) Sylvain Baubeau Sławek Kapłoński Taehyun Park Takashi Natsume Tang Chen Tang Chen Terry Howe TerryHowe Thanh Ha Theo Gindre Thiago Brito Thomas Bechtold Thomas Bechtold Thomas Bucaioni Thomas Herve ThomasBucaioni Tim Burke Tim Laszlo Timothy Chavez TingtingYu Tino Schmeier Tobias Henkel Tobias Rydberg Tobias Urdin Tobias Urdin Tom Stappaerts Tom Weininger Tony Breeds Tony Xu Toure Dunnon Tristan Cacqueray Trygve Vea Valery Tschopp Victor Coutellier Victor Silva Vieri <15050873171@163.com> Vishakha Agarwal Vu Cong Tuan Xav Paice Yaguang Tang Yan Xing'an Yang JianFeng Yang Youseok YeJun, Jung Yi Zhao Yolanda Robla Yuanbin.Chen Yujia Zheng Yuriy Halytskyy Yuriy Taraday Yuval Shalev Yves-Gwenael Bourhis ZhaoBo ZhiQiang Fan Zhou Zhihong anuradha1904 arkaruki ashrod98 avnish b.haleblian bhagyashris bmike78 brandonzhao caishan cenne cheng-jiab chenpengzi <1523688226@qq.com> chohoor deepakmourya dineshbhor dommgifer elajkat elynn fyx gryf gtema gujin hai huangshan hyemin Choi inspurericzhang james kirsch jihyun huh jolie jonnary lidong lifeless lijunjie likui lingyongxu liuxiaoyang liuzhuangzhuang lixinhui liyi ljhuang lvdongbing lvxianguo maaoyu malei mariojmdavid matthew wagoner melanie witt melissaml miaohb mountainwei niraj singh niuke pangliye pedro pengyuesheng purushothamgk qingszhao rajat29 reedip ricolin ricolin rladntjr4 sean mooney silvacarloss songwenping sonu.kumar subham rai sue suheoon suzhengwei tengqm tianmaofu ting wang tischrei tutkuna wacuuu wangqi wangqiangbj wangweijia wangxiyuan wangxiyuan whoami-rajat wu.chunyang wu.shiming xhzhf xu-haiwei xuanyandong yan.haifeng yanpuqing yanyanhu yatin zengjianfang zhang.lei zhangbailin zhangboye zhangdebo zhangyangyang zhouxinyong zhufl zhurong Édouard Thuleau ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/CONTRIBUTING.rst0000664000175000017500000000236400000000000016675 0ustar00zuulzuul00000000000000.. _contributing: ============================ Contributing to openstacksdk ============================ If you're interested in contributing to the openstacksdk project, the following will help get you started. Contributor License Agreement ----------------------------- .. index:: single: license; agreement In order to contribute to the openstacksdk project, you need to have signed OpenStack's contributor's agreement. Please read `DeveloperWorkflow`_ before sending your first patch for review. Pull requests submitted through GitHub will be ignored. .. seealso:: * https://wiki.openstack.org/wiki/How_To_Contribute * https://wiki.openstack.org/wiki/CLA .. _DeveloperWorkflow: https://docs.openstack.org/infra/manual/developers.html#development-workflow Project Hosting Details ----------------------- Project Documentation https://docs.openstack.org/openstacksdk/latest/ Bug tracker https://bugs.launchpad.net/openstacksdk Mailing list (prefix subjects with ``[sdk]`` for faster responses) https://lists.openstack.org/mailman3/lists/openstack-discuss.lists.openstack.org/ Code Hosting https://opendev.org/openstack/openstacksdk Code Review https://review.opendev.org/#/q/status:open+project:openstack/openstacksdk,n,z ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296383.0 openstacksdk-4.0.0/ChangeLog0000664000175000017500000060303100000000000016004 0ustar00zuulzuul00000000000000CHANGES ======= 4.0.0 ----- * Deprecate 'use\_direct\_get' parameter * Support server unshelve to specific host * Drop support for Python 3.7 * Add Python 3.12 classifier * image: Check path is a file before attempting to use it (redux) * mypy: Enable checks for openstack.tests.functional * tests: Remove TestClustering functional tests * mypy: Enable checks for openstack.cloud * cloud: Combine networking-related classes * cloud: Misc type fixes * cloud: Reorganize \_OpenStackCloudMixin, Connection (2/2) * cloud: Reorganize \_OpenStackCloudMixin, Connection (1/2) * Added missing stack documentation and rewrite stack proxy document * cloud: Make service mixins subclass \_OpenStackCloudMixin * cloud: Make \_OpenStackCloudMixin subclass ServicesMixin * mypy: Enable checks for openstack.tests.fixtures * cloud: Fix annotations for proxy method * cloud: Remove check for nova extensions * pre-commit: Bump versions * cloud: Replace reference to removed variable * block storage: Add missing 'find\_type' proxy API to v2 * block storage: Add 'set\_readonly' action to v2 * baremetal: Enhance VIF attachment with port and portgroup UUIDs * block\_storage: Add support for project\_id in Limits 3.3.0 ----- * compute, volume: Improve 'update\_quota\_set' * compute: Correct base path for default, detailed quotas * zuul: Use more stable dib job * Use mypy syntax compatible with older pythons * Combine 3 auto\_allocated\_topology tests into one test * Run auto\_allocated\_topology functional tests in the own project * tests: Remove errant print * baremetal: Adds list node firmware support * cloud: Fix typo * image: Check path is a file before attempting to use it 3.2.0 ----- * tests: Rework warnings-related tests * exceptions: ResourceNotFound -> NotFoundException * compute, block storage: Add support for quota class sets * compute: Add additional server create parameters * compute: Add additional options to Server.rebuild * compute: Add support for legacy 'onSharedStorage' param * compute: Add server tag proxy methods * compute: Add Server.clear\_password action * compute, block storage: Minor fixes to limits * [func test] Fix race between attachment delete and server delete * Remove retired project Senlin job * baremetal: Decode 'config\_drive' argument to 'set\_provision\_state' * Identity: Add support for inherited\_to for role\_assignments * Add support for federation service providers * docs: Add missing identity v3 proxy API docs * reno: Update master for unmaintained/zed * pre-commit: Add pyupgrade hook * trivial: Prepare for pyupgrade pre-commit hook * tox: Don't install package in pep8 * Fix AttributeError in delete\_limit method * cloud: Do not reference other cloud mixin's methods * cloud: Trivial fixes * cloud: Remove use of OpenStackCloudCreateException * hacking: Add check for deprecated exception types * README: Add guide on raw HTTP layer * pre-commit: Bump versions * Allow filtering BGPVPNs * Extend project cleanup * image: Pass correct stores argument * Add quota class set to shared file system * Add Binero public cloud to vendor * Fix multiple image\_id query mappings * Remove mypy union line which breaks older pythons * Adding SDK support for \`\`glance md-namespace-objects-delete\`\` * Add Binero public cloud to vendor support * volume: Allow passing a volume type object to retype\_volume * volume: Add ability to set, unset image metadata * volume: Allow setting volume statuses individually 3.1.0 ----- * Add Tap Mirrors to SDK * Add sort\_key and sort\_dir to core Neutron resources * image: make sure the target for "clear\_cache" is valid * Update master for stable/2024.1 * reno: Update master for unmaintained/xena * reno: Update master for unmaintained/wallaby * reno: Update master for unmaintained/victoria * Fixed update\_metadef\_object * Allow project switching for Designate API * Add support for showing requested az in output * Fix the mailing list domain in documentation link 3.0.0 ----- * Resource locks and access rules restrictions * Fix list of server migrations * tox: Correct functional test factors * Fix typo * docs: Add docs on configuration of a service user * Incorrect protocol type in create\_security\_group\_rule() * Adding SDK support for \`\`glance md-namespace-properties-delete\`\` * Add missing snapshot parameters * reno: Update master for unmaintained/yoga * Add support for provider\_id for volume objects * Add volume manage support * pre-commit: Bump linter versions * mypy: Add typing to openstack.\_log * mypy: Address issues with remaining service modules * mypy: Address issues with openstack.orchestration * mypy: Address issues with openstack.clustering * mypy: Address issues with openstack.baremetal, baremetal\_introspection * mypy: Address issues with openstack.object\_store * mypy: Address issues with openstack.network * identity: Add test for 'role\_assignments\_filter' proxy method * mypy: Address issues with openstack.identity * mypy: Address issues with openstack.block\_storage * mypy: Address issues with openstack.image * zuul: Switch bifrost job to jammy * config: Add missing space to warning 2.1.0 ----- * Fix markup syntax in docstring * Remove unnecessary \`keys\` calls * Remove resource\_key for dns floating IP * Remove importlib-metadata from requirements * Implement heat 'stack event list' * Implemented heat 'stack suspend' and 'stack resume' function * mypy: Address issues with openstack.compute * Return the results of init attachment request * Enforce endpoint override for services without discovery * cloud: Replace use of aliased exceptions * Drop support for Python 3.6 * requirements: Sort alphabetically * volume: Add Limit to volume v2 API * Add volume transfer support [2/2] * Switch back to LaunchPad for issue tracking * Add volume transfer support [1/2] * Fix project cleanup for routers with static routes * Support the API for managing external gateways * Fix: Update/Delete type encryption operation * Add snapshot manage unmanage support * [baremetal] port add shard, is\_smartnic, name * [baremetal] driver add missing firmware interface * [baremetal] Add missing owner attribute to allocation * [baremetal] Add some missing fields to node object * Check Designate API version for shared zones tests 2.0.0 ----- * compute: Add 'locked\_reason' to Server resource * [baremetal] Add support for service steps * [baremetal] Add firmware\_interface support * [baremetal] Add unhold provision state verb support * [baremetal] Add support for parent\_nodes * Bump API version in Manila functional tests * volume: Add Capability to volume v2 API * Replace appdirs usage with platformdirs * fakes: Generate correct type for list type components * image: Add support for metadef property operations * config loader: Prefer cli/env over clouds.yaml args for some args * [baremetal] Ensure baremetal shard parameter gets passed * Add "hardware\_offload\_type" attribute to "port" * [baremetal] Add support for querying for shards * Update master for stable/2023.2 * Fix: volume request body for scheduler hints * tests: Remove unused hook * connection: Fix typo * tests: Remove references to shade * cloud: Remove old cloud-layer caching functionality * cloud: Don't use dangerous argument defaults * cloud: Remove floating network cache * cloud: Remove remnants of server caching * cloud: Remove remnants of floating IP caching * cloud: Remove remnants of port caching * Add volume attachment support * docs: Rewrite caching docs * Fix bulk\_delete support determination * Fix Swift endpoint conversion to determine info/caps url * Support passing a subnetpool for create\_subnet * cloud: Convert 'get\_volume\_limits' to use proxy layer * cloud: Remove '\_get\_raw\_client' * cloud: Remove '\_orchestration\_client' * cloud: Remove dead method 1.5.0 ----- * block storage: Add query string params for many APIs * docs: Add exception documentation * docs: Document various warnings * baremetal: Add missing docstrings * block storage: Add missing docstrings * compute: Add missing docstrings * docs: Add missing docs for block storage resources * docs: Add missing docs for compute resources * docs: Add missing docs for identity resources * Implement stack export in openstacksdk * support notification vmoves for masakari * Unused param in stack unit test * identity: Add support for domain config * cloud: Remove dead code * Fix heat stack \_action function to handle exception * docs: Use asterix for Network resources doc * Add SFC to SDK * Run mypy as pre-commit * mypy: Address issues with top-level files * openstack.format: Remove 'serialize' classmethod * resource: Remove unused 'Resource.service' attribute * cloud: Remove unnecessary types import * cloud: Remove unnecessary '\_make\_unicode' helper * image: Fixed URL for stores\_info * tests: Move cloud-layer service tests to their own file * tests: Rename file, remove unused file * Add image metadef object operations * Add 2 tls container params for Octavia Pools * Support manage/unmanage shares with manila * fix memory leak of Connections * tests: Skip intermittently failing placement traits test * Stop randomly sorting error lines * Cleanup logic to either dry-run, bulk\_delete or do single object deletes * fix block storage resource registry 1.4.0 ----- * Fix manila access rules functional tests * tox: Add environment documentation * cloud: Remove '\_object\_store\_client' * Rename share group snapshot 'members' method * docs: Improve docs for Connection with CONF object * cloud: Remove '\_{service}\_client' properties * tests: Migrate tests from os-hosts to os-services * block storage: Add support for services * cloud: Remove '\_is\_client\_version' * Implemented methods for share metadata * Return HTTP response for delete\_access\_rule * fakes: Add type hints * Add fake proxy generator * docs: Add documentation for openstack.test.fakes * volume: Add Extension to volume v2 API * Encode type of 'links' for Extension objects * VPN: add missing fields to VpnIpsecPolicy * Remove \_min\_microversion for consistency * Define version for share\_access\_rules * Add support for default security group rules in SDK * Define version for export\_locations * Add Octavia support for HSTS * wokflow: add update\_workflow proxy method * placement: Add support for traits * placement: Add support for resource provider aggregates * placement: Add support for resource provider inventories * tests: Update functional tests for resource providers * Treat server as a dict in add\_server\_interfaces * Add share group snapshots to shared file systems * Adds share group resource to shared file system * Add missing \`return\` in \`upload\_volume\_to\_image\` 1.3.1 ----- * Fix broken python3.6 support * Add missing \`force\` parameter 1.3.0 ----- * fix flavor.swap attribute type * fix connection.Connection finalizer * Allow resources to be skipped on project cleanup * Add fake resources generator * update rackspace profile to specify identity version * Adds support for node hardware inventory * Add block storage summary support * Fix typo * Remove unnecessary quotes * volume: Add missing attributes to Extension * tox: Disable E501 * docs: Replace/remove shade-specific docs * Add image metadef\_resource\_type into the registry * tests: Use uuid, not randint * tests: Enable UserWarning by default * tests: Ignore our own deprecation warnings * Use custom warnings everywhere * cloud: Move identity-related helpers * utils: Add 'supports\_version' * cloud: Remove ClusteringCloudMixin * cloud: Rename shade-specific method * cloud: Remove '\_ShadeAdapter' * cloud: Reduce duplication * cloud: Ignore invalid filters * Migrate warnings to openstack.warnings * cloud: Filter FIPs by valid filters * exceptions: Remove unused exception * tests: Isolate tests from OS\_CLOUD * Allow tags to be passed through to compute.create\_server * FWAAS: add Computed summary field to FirewallRule 1.2.0 ----- * tests: Silence warning * Add 'callback' to 'wait\_for\_delete', 'wait\_for\_status' * Add find\_share() for shared file system share resource * Bump the chunk\_size to use CPU more efficiently * compute: Adds shelve-offload support * identity: Add access rule CRUD support * ironic: Add support for Introspection Rules * nit: Correct name of variable * tox: Bump min\_version to 4.3.0 * Added neutron fields to share network resource * add extended neutron job * Ignore black version bump * Bump black to 23.3.0 * pre-commit: Enable black * Ignore blackify changes * Blackify everything else * Blackify openstack.cloud * Blackify openstack.clustering * Blackify openstack.container\_infrastructure\_management * Blackify openstack.accelerator * Blackify openstack.instance\_ha * Blackify openstack.dns * Blackify openstack.database * Blackify openstack.message * Blackify openstack.workflow * Blackify openstack.orchestration * Blackify openstack.placement * Blackify openstack.key\_manager * Blackify openstack.shared\_file\_system * Blackify openstack.load\_balancer * Blackify openstack.baremetal, openstack.baremetal\_introspection * Blackify openstack.object\_store * Blackify openstack.block\_storage * Blackify openstack.identity * Blackify openstack.network * Blackify openstack.image * Use pre-commit for 'pep8' tox target, bump versions * Prepare for hacking 6.x * Blackify openstack.compute (tests) * Blackify openstack.compute * Add link to image sharing api docs * Add share access rules to shared file system * Add support for Ironic node shard attribute * image: Don't envelope properties * Add share network subnet resource to shared file system * Allow key overrides in create and fetch methods 1.1.0 ----- * Add resize/extend share actions * Add export location resource to shared file system * Microversion 2.91: Support specifying destination host to unshelve * Adds Support for \`\`glance cache-clear\`\` * Removing region Lon1. Updating block\_storage\_api\_version. Adding image\_format * compute: Add Server.restore, restore\_server proxy method * ssh key change * Add Cleura acceptance tests * Add Designate (DNS) zone share API * Change python3 jobs template to latest version * Drop legacy job that is always failing * Rework zuul config * Prepare acceptance tests for real clouds * modify ovh and ovh-us vendor config * Add share instances to shared file systems * Use custom warnings, not logging.warning * Adding support for glance cache-queue Command * config: Load additional options for v3multifactor * config: Split 'OS\_AUTH\_METHODS' * add a new vendor profile for the Swiss Open Telekom Cloud * Add share snapshot instance resource * Adds SDK support for \`\`glance cache-delete\`\` * Add BGPVPN to SDK * Implement acceptance test job * Fix Accept header for deleting Octavia load balancers * Add share network resource to shared file system * Update master for stable/2023.1 * Remove "feature/r1" override from manila job * Include "security\_groups" to "Port" query parameters * Add support for glance cache * Remove usage of deprecated \`sre\_constants\` module * fix docstrings refering to volume attachments instead of server migrations 1.0.1 ----- * Update README to indicate COE resource/proxy support 1.0.0 ----- * Prepare release note for R1.0 * Drop munch dependency * Add Tap Services and Flows to SDK * Finish Magnum rework * Add magnum cluster templates resource * Convert cloud layer to use COE proxy layer * Add BGP Speakers and Peers to SDK * image: Prevent passing conflicts args to stage\_image * image: Remove unsupported parameters from v1 proxy * image: Modify signatures of various image methods * image: Remove unnecessary abstractions * image: Remove \_base\_proxy module * image: Reformat proxy modules * image: Add missing image import options * Add typing information and documentation * Drop \_normalize class * Revert "Add 'details' parameter to various 'find' proxy methods" * Move \_normalize\_coe\_\* into \_coe class * tox: Trivial fixes * Move normalize\_server to compute mixin * Move normalize security group to sg class * Add query mappings for vpnaas resources * Update tox.ini for tox v4 compatibility * Allow passing more arguments to create\_port * compute: Pass microversion for actions * Fix docs for class SecurityGroupRule * Add 'details' parameter to various 'find' proxy methods * Add 'all\_projects' support to proxy layers * Remove unnecessary mocks * Add missing block storage v2 'find\_\*' methods * Deprecate all of the compute image proxy APIs * Normalise query strings passed to 'find\_\*' methods * baremetal: Add Node boot\_device methods * baremetal: Add Node console methods * baremetal: Add Node.inject\_nmi method * Whitelist cloud functional tests in acceptance * Rework network functional tests * Revert "Revert "Add "security\_group\_ids" to Port's query parameters"" * cloud: create\_server: fix scheduler\_hints/group * coe: Add support for clusters 0.103.0 ------- * compute: don't pass networks: auto for older microversions * docs: Add overview of supported services to README * Revert "compute/server: add support of target state for evacuate API" * support None as expected status in wait\_for\_status * docs: Add docstring to 'openstack' module * Stop normalizing floating ips * Fix server topology and diagnostics * Implement unified search\_resources method * Move get\_compute\_usage to use proxy * block storage volume resource - add 'is\_multiattach' parameter (when true, disk will be shareble) * Prevent sending None password on create\_user * add flavor description to flavor\_create * Fix server action request generation * image: Allow providing 'data' argument to image upload, stage * Remove python-dev from bindep * Introduce resource\_registry in the proxies * image: Add metadef resource type operations * Fix backup metadata management and update * Use /servers/detail endpoint in find\_server proxy method * Accept queries when listing migrations * compute: Add functional tests for volume attachments * tests: Avoid potential aliasing of imports * image: Add 'store' argument to 'delete\_image' proxy method * compute/server: add support of target state for evacuate API * image: Allow listing detailed view of stores * image: Correct typo * image: Correct typo with 'get\_import\_info' proxy method * Cron Triggers proxy * Initialize tests of real clouds * Add identity.group\_users method * compute: Fix '\*volume\_attachment' proxy methods * support nat\_destination when attaching existing floating\_ip to a server 0.102.0 ------- * Added Ansible OpenStack Collection to Bifrost's job.required-projects * image: Add metadef schema resource to v2 api * Improve swift headers handling * Fix pre-commit issues * Allow to attach a floating ip to a specific fixed address * network: Rename ikepolicy module * network: Remove duplicate module, update references * Extend project cleanup * Add support for updated\_at field for volume snapshots * Drop query parameter 'id' from identity mapping * Improve project cleanup for cinder * docs: Correct docs for VpnIpsecPolicy * docs: Trivial adjust index structure * tests: Improve functional testing for image methods * image: Add support for other metadef namespace operations * workflow: Trivial fix doc title * docs: Add missing docs for proxy helper methods * Add Python3 antelope unit tests * Update master for stable/zed * Update register\_machine to use the Ironic format for ports * Migrate register\_machine to use the proxy layer * compute: Add support for triggering crash dumps * volume: Trivial docstring fixes to 'wait\_for\_status' * block storage: Add update\_volume proxy method * resource: Reformat calls to request * trivial: Correct some docstrings * resource: Remove unused helper, variable * image: Trivial grouping of image proxy methods * tests: Remove unnecessary service check * added api requirements for new "openstack image metadefs namespace list" command * resource: Fix pagination of nested Glance resources * Unify resource list filtering * Add support for fault object per Server API * Add additional\_vips parameter for Octavia load balancers * Add docstring to wait\_for\_load\_balancer() method * Implement project cleanup for object-store * Fix incremental backups handling in project cleanup * Support unknown attributes in resource.\_\_getitem\_\_ * Use /volumes/detail endpoint in find\_volume proxy method * Actually use openstacksdk from source in the Bifrost job * baremetal: rework node creation to be closer to the backend * Allow passing explicit microversions to Resource methods * Replace base64.encodestring with encodebytes 0.101.0 ------- * remove unicode prefix from code * resource: Merge unnecessary separation of logic * Enable add\_ips\_to\_server() and \_needs\_floating\_ip() for pristine server resources * Replace deprecated failUnlessEqual with assertEqual * Restore functionality to attach multiple floating ips with add\_ip\_list() * Allow to pass description parameter to cloud.create\_server() * block storage: Add support for the GroupSnapshot resource * Reduce list\_router\_interfaces() to necessary API calls * block storage: Add support for the Group resource * compute: Add support for os-simple-tenant-usages API * Allow unknown attributes in project resources * Disable deprecation warning for tenant\_id * tests: Add pointers to docs on running tests 0.100.0 ------- * block storage: Add support for group type specs * docs: Add missing docs for block storage v3 proxy APIs * test: Remove duplicated tests * Allow Resource.to\_dict to allow returning unknown values * Add update capabilities to Snapshots * Add VPNaaS IpsecPolicy resource * Reorg existing vpnaas content * proxy: Resolve a TODO * Add VPNaaS Endpoint Group resource * Add CRUD methods for Neutron router ndp proxy * Add network address\_group proxy doc and unit tests * trivial: Run some files through black * cloud: Remove a load of normalize helpers * Fix Baremetal cloud layer * Make nodepool jobs non voting * Fix object upload for RAX * Warn when no statsd library available * compute: Add support for instance actions * compute: Correct some docstrings * cloud: Update docstrings for compute functions * cloud: Update docstrings for object store functions * cloud: Update docstrings for identity functions * cloud: Update docstrings for block storage functions * cloud: Update docstrings for image functions * cloud: Update docstrings for accelerator functions * cloud: Update docstrings for network functions * Remove unused normalization helpers * Fix python-dev reference in bindep * Do not log to stdout by default * Change title for "unreleased" renos 0.99.0 ------ * network RBAC policy: allow query for target tenant * Fix creation of protected image for old user code * Update Internap auth URL * Improve StatsD metric precision * Add R1 summary release note * Rework caching * image: Add "id" filter for images * Allow to filter endpoints by region\_id * Add QoS rule type filtering keys * fix: improperly encoded object names * Run nodepool job * Add Python3 zed unit tests * Update master for stable/yoga * Identity: Add support for system role assignment * Indentation of the docstrings * network: Fix update of network provider * Correct documentation about Load Balancer API * Improve compute flavor handling * Use empty read\_acl for swift container * Restore get\_compute\_limits backward compatibility * Revive legacy job * Skip tests when needed extensions are disabled * Cloud / Utils service - reindentation of the docstrings * Cloud / Network service - reindentation of the docstrings * Cloud / Identity service - reindentation of the docstrings * tests: Centralize configuration of default flavor, image * Skip qos-pps-minimum tests if extension is missing * doc: Update testing documentation * doc: Remove references to 'examples' test env * Add query parameters to local ip * Cloud / Compute service - reindentation of the docstrings * Cloud / Dns service - reindentation of the docstrings * Cloud / Coe service - reindentation of the docstrings * Cloud / Baremetal service - reindentation of the docstrings * Cloud / Security group service - reindentation of the docstrings * Cloud / Normalize service - reindentation of the docstrings * Cloud / Floating service - reindentation of the docstrings * fix creation of protected image * Revert "Add "security\_group\_ids" to Port's query parameters" * Get rid of normalization in compute CL * Functional tests for vpn ike policy resource * Add QoS min pps rule object and CRUD operations * Get rid of normalization in network CL * Get rid of normalization in further CL services * Get rid of normalization in identity CL * Get rid of normalization in orchestration CL * Get rid of normalization for image service * Object Store - reindentation of the docstrings * Switch quota\_set cloud layer BS methods to proxy * Switch unittests for BS to use v3 * Reindentation of the docstrings for baremetal service * Switch quota methods of cloud layer to proxy * Switch create\_server cloud method to proxy * Switch delete\_server cloud method to rely on proxy * Update python testing classifier * tests: Handle overridden 'verify\_delete' (kw)args * Switch update\_server cloud layer to proxy * Switch rebuild\_server cloud method to rely on proxy * Fix misuse of assertTrue * Identity service - reunite :class: links on single lines * Block storage - reunite :class: links on single lines * Switch cloud.compute.get\_server\_by\_id to use proxy * Add revert share to snapshot to shared file system * Shared file system - reunite :class: links on single lines * Instance HA service - reunite :class: links on single lines * Load balancer - reindentation of the docstrings * Network service - reunite :class: links on single lines * Orchestration service - reunite class links (:class:) on one line * Compute - reindentation of the docstrings 0.61.0 ------ * compute: Default to 2.48 for server diagnostics * Add Neutron Local IP CRUD * Splits class \`TestNetworkProxy\` * Vpn ike policy resource * compute: Add support for server lock reason * Add "check\_limit" to network Quota class * Add Neutron Local IP CRUD * trivial: Correct docstrings for 'delete' proxy calls * compute: Server group rules are dicts, not lists of dicts * compute: Add support for microversion 2.89 * Fix functional test for user message * Adds "test\_update\_zone" test case * Database service - reindentation of the docstrings * Image service - reindentation of the docstrings * Orchestration- reindentation of the docstrings * Keep creating keystone admin endpoint for heat * Switch compute limits cloud to proxy 0.60.0 ------ * Keep creating keystone admin endpoint for heat * Shared File System - reindentation of the docstrings * Add share snapshot to shared file system * compute: Add support for microversion 2.89 * compute: Add support for migrations API * compute: Add support for server migrations API * compute: Regroup proxy methods * DNS - reindentation of the docstrings * Clustering - reindentation of the docstrings * Network - reindentation of the docstrings * trivial: Typo fixes * Remove '\_\_unicode\_\_' helper * exception: Correct argument * Add support for updated\_at field for volume objects * Block storage - reindentation of the docstrings * Reindentation of the docstrings * Reindentation of the docstrings * Reindentation of the docstrings * Reindentation of the docstrings * Reindentation of the docstrings * Reindentation of the docstrings * Reindentation of the docstrings * resource: Rewrap function signatures * resource: Remove deprecated 'allow\_get' attribute * Rely on proxy for compute.flavor cloud layer operations * Support description in sg-rule creation * Switch Swift cloud layer to proxy * Splits class \`TestSharedFileSystemProxy\` * Splits class \`TestPlacementProxy\` * Splits class \`TestOrchestrationProxy\` * Splits class \`TestNetworkProxy\` * Add missing headers in object store * Splits class \`TestMessageProxy\` * Splits class \`TestKeyManagerProxy\` * Splits class \`TestInstanceHaProxy\` * Splits the \`TestBaremetalProxy\` class * Add limit resource to shared file system * Add Python3 yoga unit tests * Update master for stable/xena * Change image.hw\_qemu\_guest\_agent to be string * Fix setting initial object/container metadata * Splits the \`TestAcceleratorProxy\` class * Optimize code by moving misc functions to utils * Start splitting the \`TestImageProxy\` class * Add user message to shared file system * Replace the 'try except' block * Add description args for device profile create * Add user group assignment support in identity * Add a manila functional check job * Fix key generation for caching * Add "security\_group\_ids" to Port's query parameters 0.59.0 ------ * Fix key generation for caching * Fix some docstrings, and a small bug * Add support for changing baremetal node's boot\_mode and secure\_boot states * Add support for reading node's "boot\_mode" and "secure\_boot" fields * Adds storage pools to shared file system * Extend has\_version function to accept version parameter * Temporarily disable nodepool job * Extend has\_version function to accept version parameter * Vpn ike policy resource * Add common metadata mixin to block storage and clustering * Make metadata a common mixin * Introduce QuotaSet in block storage service * Introduce QuotaSet in the compute service * Introduce common class for QuotaSet * Fix import order in block\_storage tests * Fix import order in cloud tests * Move tag mixin into the common * Fix import order in clustering tests * Fix import order in orchestration tests * Fix import order in identity tests * Switch identity roles in the cloud layer to proxy * Fix identity role management * Switch identiy.endpoints in the cloud layer * Add share resource to shared file system * Switch identity.service in cloud layer to proxy * Switch identity.domains in cloud layer * Switch identity groups and users in cloud layer * Add possibility to create subprojects in cloud layer * Switch project management in cloud to proxy * Remove misspelled speccing arguments * Temporarily disable nodepool job * Add cleanup function for DNS resource * Switch BS volume\_attachment operations in cloud layer * Switch BS backup and snapshot methods in cloud layer to proxy * Add BS snapshot and backup actions * Switch BS type access operations in cloud layer * Add BS type access into proxy/resource layer * Switch block\_storage.volume operations in cloud layer * Add block\_storage.volume actions * Copy find\_volume to block\_storage.v2 proxy * Fix active status for block storage * Add scheduler hints on the block\_storage.volume resource * Sort block storage properties alphabetically 0.58.0 ------ * Changed minversion in tox to 3.18.0 * Move to OFTC * Add compute microversion 2.78 * Drop cloud layer methods for Senlin * Fix import order in accelerator and config unit tests * tests: Rename 'TestProxyBase.\_verify2' to '\_verify' * Replace deprecated inspect.getargspec call 0.57.0 ------ * Add IPsecSiteConnection resource and proxy layer functionality for it * Revert "Update TOX\_CONSTRAINTS\_FILE for feature/r1" * Add compute microversion 2.79 * setup.cfg: Replace dashes with underscores * Extend functional test of image * tests: Remove final use of 'TestProxyBase.\_verify' * tests: Sanity check 'test\_proxy\_base' * tests: Remove dead code * placement: Add support for resource classes * Switch object\_store cloud functions to proxy layer * Fix MFA authorization * Fix get\_server\_password method * Remove references to 'sys.version\_info' * Stop sending tenant\_id to Neutron * Switch networking function in cloud layer to proxy * Add access to the resource attribute by server-side name * Fix import order in message and workflow unit tests * Add support for API Extensions * Add support for project options * Fix import order in load\_balancer unit tests * Revert tags query\_params back to tag * Update TOX\_CONSTRAINTS\_FILE for feature/r1 * Update .gitreview for feature/r1 * Adjust image \_base\_proxy condition * Increase RAM for the Ironic CI jobs * Implement driver vendor passthrough * setup.cfg: Replace dashes with underscores * Add min\_count and max\_count attributes to Server * Adding retype\_volume to BlockStorageCloudMixin * Add support for the GroupType resource 0.56.0 ------ * Switch to openstack-python3-xena-jobs template * Drop jobs failing for too long * Fix import order in compute unit tests * Add compute microversion 2.77 * Allow unknown attributes to be included in request body * Adds Node Vendor passthru * Use py3 as the default runtime for tox * statsd: use timedelta and pipeline * Allow for override of statsd/influxdb settings per cloud * Fix import order in network unit tests * Add support for Resource Filters * Drop lower-constraints job * Don't send empty remote\_address\_group\_id for security groups * Fix more import orders * Fixing more import orders * Add deploy\_steps to baremetal node provisioning * Fix typo - \_list lists, doesn't delete * Avoid prometheus metrics explosion * Add compute microversion 2.57 * Add support for the Neutron L3 conntrack helper API * Add tags option to the image upload * placement: Add support for resource providers * Apply import order in more tests * Fix more import order in tests * Apply import order in some tests * tox: Enable parallel docs build * docs: Add intro doc to user guide * Improve README to provide example of Resource usage * Added support for the Capabilities resource * Update master for stable/wallaby * Added support for the Limits resource * Add pre-commit * Apply pep8 import order style * Prepare separate block\_storage v3 documentation 0.55.0 ------ * Cache auth token in keyring * add masakari enabled to segment * Add set\_readonly\_volume to BlockStorageCloudMixin 0.54.0 ------ * Improve Ironic API on OpenStack SDK * Fix a trivial error in one of the error messages * Add shared file systems support * Switch cloud.networking.qos\* operations to rely on proxy layer * Add compute microversion 2.70 * Add TODO to remove md5 wrapper * Set resource URI properties in listing method * Prevent the endless loop in resource listing * Remove unnecessary string formatting * Drop .json suffix from networking URLs * Support Deploy Templates for Ironic API * Change microseconds to total\_seconds() * Move 'collections.Mapping' to 'collections.abc' * Add ALPN support to load balancer pools * Update Open Telekom Cloud vendor docu * Complete compute.hypervisor functions 0.53.0 ------ * Apply urllib.parse.quote in unittests to get\_mock\_url * Add query parameters to listing heat stacks * Complete compute.service operations * New volume availability zone resource, new functional and unit tests * Trival change: Correct some errors * Modify cloud.get\_aggregate to use proxy.find * Support roles 'name' in list\_roles call * encapsulate md5 calls for fips * Refresh deprecated link * Support SNAP\_REAL\_HOME when using openstacksdk inside a snap package * Add \`\`device\_profile\`\` attribute to \`\`port\`\` * Change nodepool job to build CentOS-8-stream (unblock gate) * Fix invalid argument formatting in log messages * Add support for updating Block Storage Volume type extra\_spec attributes * Add id query parameter to sg rules * Drop swift check for volume backup * Add new Open Telekom Cloud region into the profile * Support remote address group in SG rules * Add support for overriding list base\_path in find function * Add support for Block Storage (v3) VolumeType Encyption resources * Fix exception parsing when using WSME * Add tls\_enabled param for Octavia Pools 0.52.0 ------ * Add update\_flavor method * Remove duplicate test\_zone\_create * Complete compute aggregate functions * Fix the invalid if statement * Volume Target support for Ironic on OpenStack SDK * Switch flavor ops in the cloud layer to proxy * Add user\_id as optional param to keypair operations * Fix invalid assertIsNotNone statement * Update TOX\_CONSTRAINTS\_FILE 0.51.0 ------ * Migrate ironic job to focal * Disable dstat on ironic job * Replace assertItemsEqual with assertCountEqual * Support waiting for bare metal power states * Respect default microversion in the microversion negotiation * Expand user path when loading SSL-related files * Add support for updating cinder (v3) volume types * Add "description" to cinder volume types * Add source\_ip\_prefix and destination\_ip\_prefix to metering label rules * Deprecate 'remote\_ip\_prefix' parameter in metering label rules * Skip address group functional tests if no extension * Add 'project\_id' to Snapshot query parameters * Fix hacking min version to 3.0.1 * [Trival]: Add comments in the cloud/accelerator * Add neutron address group CRUD * add cluster\_id to filter by cluster\_id when list actions * Include "fields" to "Port" query parameters * Remove install unnecessary packages * Follow-up on 751234 and 750072 * Increase IRONIC\_VM\_SPECS\_RAM to avoid KP * Don't set list\_type to dict for server groups * Add some compute console operations * Fix l-c testing for ubuntu focal * Add additional compute flavor operations * Update master for stable/victoria * Add \_max\_microversion for aggregates * Repair 2 deprecation warnings * Squeeze SnapshotDetail class into Snapshot * Fix a bogus error in config loader when using several args with dashes * Switch nodepool test to containers * Add ALPN support to load balancer listener 0.49.0 ------ * Update config to Keystone v3 * Add support for filters into the project cleanup * Stop falling back to image import * Add "numa\_affinity\_policy" attribute to "port" * baremetal-introspection: allow fetching unprocessed data * Add compute microversion 2.6 and 2.8 * Use unittest.mock instead of mock * Add func test for compute microversion 2.3 * Add tests for compute microversion 2.2 and 2.10 * Functional tests to explicitly use ML2/OVS * Add block\_storage find functions * Allow passing in a logging handler * Tolerate images created with other means * switch to importlib.metadata for entrypoint loading * Ignore IPv6 addresses if force\_ipv4 is set 0.48.0 ------ * Remove enforcer * Add support for multiple image stores * Upload image via interop import if needed * Don't use random.SystemRandom in tests * Drop python3.5 support * Remove neutron-fwaas from the jobs' required project * baremetal: support for volume connectors API * Add query parameter 'id' for security\_groups 0.47.0 ------ * NIT: Fix application credential * Add user\_projects method to docs index * Do not clean keypairs in the project cleanup * Wait for the project cleanup to complete * Make optional name and admin password * Add some unit tests for config.loader * Fix AttributeError exception during authorization * baremetal: use proxy methods in unregister\_machine * Fix deleting stacks by id when waiting for result * Refresh python versions * Switch to newer openstackdocstheme and reno versions * Extend statistics reporting * add default user and project domain for OVH provider * new ovh-us provider for OVH US regions * added new regions for provider OVH * Add TLS protocol support for Octavia * Fix issues found by latest flake8 * Set BaseImageProxy.create\_image validate\_checksum default to False * Stop subclassing object * Remove use of six * Remove uses of from six.moves * Remove some unneeded things from test-requirements * Fix image owner field * Strip self from incoming glance properties * Add name query filter to keystone service * Don't error if clouds.yaml is not readable * Fix metric names in the object\_store * Add "id" to Port's query parameters * Add cipher list support for octavia * Update docs to work with newer Sphinx * Fix an unstable bare metal unit test * OpenStack port decorator variables * Update master for stable/ussuri 0.46.0 ------ * Volume.backup API attr name fixes * Update to hacking 3.0 * Update local hacking checks * Raise hacking to 2.x * Re-add nodepool functional test * Add availability zone and availability zone profile resources to load balancer * Small cleanups after Python2 drop * Turn off test log capture for now * Add python-requires entry indicating 3.5 is required * Remove kwargs validation for identity project updates * Change default image type in the OTC vendor profile 0.45.0 ------ * Revert "Switch to futurist for concurrency" 0.44.0 ------ * Add unit test for rackspace block-storage workaround * [tests] Improve devstack/post playbook efficiency * Add Release notes entry for Identity v3 IDP, Mapping and Protocol * Add support for Federation Protocols * Update Rackspace vendor profile for cinder v2 * Add ansible stable-2.9 job and run 2.8 and 2.9 * Add support for Identity Providers * Lay a foundation for the project cleanup * Add support for not including the ID in creation requests * Run fetch-subunit-output role conditionally * Add bulk creation of rules for Security Group * Add support for federation mappings * Add Octavia quota to the SDK docs * Import generate\_temp\_url from swiftclient * Set max\_microversion to 2.53 for hypervisors * baremetal: fail-less mode for wait\_for\_nodes\_provision\_state * Rationalize examples and functional extra config loading * Fix microversion negotiation in some bare metal node call * Set min version to test node retirement * Add support for additional volume backup options 0.43.0 ------ * Support for stateless security groups * Consistent normalization of Machine objects in the cloud layer * Fix aggregate functional test for id restriction * Extract check temp\_url\_key logic * Add retired and retired\_reason fields to baremetal node 0.42.0 ------ * Normalise create\_coe\_cluster{,\_template} results * Fix service\_type test for magnum in gate * Switch to futurist for concurrency * Handle old status-less placement service * Return uuid alias for coe\_cluster in non strict mode * Replace assertItemsEqual with assertCountEqual * Add port property: ip\_allocation * Add description field to portforwarding NAT rules * Include "fields" to "SecurityGroup" query parameters * Include user\_id attribute in volume information * Add availability\_zone param to load balancer * Implement If-Match support for Neutron resources * Fix: Set image name correctly if filename is not passed * Add bulk port create * Add method for bulk creating objects * Change of auth url and regions 0.41.0 ------ * Adding basic implementation for Accelerator(Cyborg) * Fix error handling in network trunks operations * Fix error handling on add/remove router iface calls * Fail a job for ansible modules with message 0.40.0 ------ * Use the bifrost bionic CI job * Add allowed\_cidrs param to load balancer listener * "qos\_network\_policy\_id" attribute added to port resource * Replace six.iteritems() with .items() * Bump min version of decorator * Fix bug in object storage container creation setting metadata * Add reset\_interfaces argument to patch\_node * Fix duplicated words issue like "was not not found" * Handle HTTP errors in add/remove router interface calls * Add return cluster\_id when query actions list * Support uploading image from data and stdin * Switch stable ansible job to 2.8 * Remove extra python2 test jobs * Stop supporting python2 * Remove python2 from project-template * Add dns\_publish\_fixed\_ip attribute to subnets * Remove duplicate job definition 0.39.0 ------ * update OVH vendor entry * Fix reno index list indent * Expose baremetal Node.owner * Update deps for tox venv target * Fix .. note:: rendering in doc 0.38.0 ------ * Keep connection backrefs with weakref.proxy * baremetal node: 'error' is a failed state * Add router add/remove route operations * tox: Keeping going with docs * Increase test timeout for 2 tests in TestImageProxy class * Increase dogpile version for Py3.7 compatibility * CI: add ironic-python-agent-builder to the ironic job * Switch to Ussuri jobs 0.37.0 ------ * Re-add functional tests on flavor content * Add support for Node tainted field * Add clustering update\_action * Fixes get\_user when identity responses are paged * Bump the openstackdocstheme extension to 1.20 * Fix server for later microversion * Start supporting type info * Use generated list of services instead of metaclass * Use has\_service in functional test's require\_service * Support vendor data in configdrive building * Add a non-voting ironic-inspector job * Make proxy honor raise\_exc in REST primitives * Make the bifrost job non-voting * Update the constraints url * Update master for stable/train * Fix the wrong doc use oslo\_conf param * fix "How To Contribute" url 0.36.0 ------ * Fix image create with tags * Properly convert baremetal fields to server-side values * baremetal-introspection: add manage\_boot argument to start\_introspection * baremetal-introspection: fix passing a Node to start\_introspection * Cleanup doc/source/conf.py * Build PDF docs * Strip two more accept headers from object-storage * Add a fields meta\_data to result of Senlin API * Remove Accept header with empty value for HEAD and DELETE requests * Rework statistics reporting 0.35.0 ------ * Fix AttributeError bug when creating nested stacks * Add 'tag' support to compute with supported microversion * Replace catalog-v3.json with keystoneauth fixture * Rationalize endpoint\_for and get\_endpoint\_from\_catalog * Add strict\_proxies option for Connection * Avoid unnecessary object meta prefix in proxy * Add header to auto-delete image upload objects * Add 'node' attribute to baremetal Allocation 0.34.0 ------ * Bump keystoneauth1 minimum to 3.16.0 * Add support for fields in baremetal get\_\* resources * Validate that connect\_as connects as the project * Add support for global\_request\_id * Fix discovery cache sharing * Minor refactor cleanup of \_make\_proxy * DRY test\_connection * Retry large object manifest upload * bug: avoid unnecessary object meta prefix * Skip most service\_description for unknown services 0.33.0 ------ * Fix dns return values * Initalize pool\_executor so close works * Update betacloud vendor entry * Allow limiting Connection service\_types from oslo.config * Fix bm tests: sort lists being compared * Add application credential CRUD support * Enable ansible module test for keypair to check return data * Fix DeprecationWarning for using logger.warn * baremetal.configdrive: tolerate user\_data as a string * Add CloudRegion helper method for arbitrary endpoints * Move the history lesson into the docs * Fix README example for cloud layer * Update links 0.32.0 ------ * Add node traits support to baremetal * Added missing {user, project}\_domain\_name in Api * Volume.Backup restore fixes * Fix image deletion with tasks\_api enabled * docs: Add simplified CLI parser docs * Add set-boot-device to baremetal * Update api-ref location 0.31.2 ------ * Correct endpoint\_override discovery for service with dashes in their type * Specify store when importing an image * force specify project\_id during create Sg\_rule may cause issue * Can't create a metadata named key, clear, delete * Repair masakari FT * Return empty lists for resources if neutron doesn't exist * Fix typo for subnet.py This 'slacc' should be 'slaac' * Add Python 3 Train unit tests * Increase randomness in ZONE creation * Fix invalid assert state * Replace nodepool func jobs * Add Python 3 Train unit tests * Allow deeper levels of nesting for pdf builds * Switch cloud layer to use proxy for DNS * from\_conf: fix handling service names with dashes * Use Resource layer for compute KeyPairs 0.31.1 ------ * Set xenapi\_use\_agent to "False" instead of false * Use Resource layer for network SecurityGroups * Add access alias (aka) for the resource attributes * URL encode swift objects endpoints 0.31.0 ------ * baremetal: raise more specific ResourceFailure in wait\_for\_\* methods * Use Resource layer for next compute methods * Pin to latest os-service-types * Add release note for new disable service functionality * Minor fixups from from\_conf changes * Handle oslo.config exceptions in from\_conf * Support deleting all routes in update\_router * Use Resource layer in cloud for SecurityGroups of server 0.30.0 ------ * Get rid of unused \_OpenStackCloudMixin.get\_region * Support Proxy-specific region\_name * Make factory for a CloudRegion from CONF objects * Use Resource layer for the compute Hypervsors and Images * Use Resource layer for compute AZ and Aggregates * Support skipping unknown QP 0.29.0 ------ * Update Limestone Networks vendor config * baremetal: allow updating name and extra fields of an allocation * Add ability to provide qos\_policy\_id for port * Adding dns\_domain parameter into create\_network * Cap sphinx for py2 to match global requirements * Link to baremetal API reference from patch\_node * Add image.stage methods * Add support for vendor hooks * Replace use of log.warn with log.warning * Extract image download method into a mixin 0.28.0 ------ * Continue refactoring of the image * Workaround older octavia version discovery * Skip attaching FloatingIP if it is already attached * Add support for all\_tenants in OpenStackInventory * fixing timing * Uncap jsonschema * Add "name" filter in "list" call when retrieving a single register * Update compute.server resource * Deduplicate next-page URL's query params * Finish updating links to point to opendev * OpenDev Migration Patch * Support for the baremetal introspection service * Remove now unused task\_manager file * Update baremetal to use proxy logger * Actually pass on network\_data when building configdrive * Return None from get\_server\_by\_id on 404 * Try to fix the masakari CI job * Support microversion 2.61 for nova flavors * Add logger to Proxy object * Removing region La1 from the list of regions * baremetal: Add support for mkisofs and xorrisofs for configdrive * Add floating IP port forwarding related methods * Clarify error if no version can be found * Bail earlier on a version mismatch for a supported service * Expand on a brief error message * Move Orchestration methods into Proxy * Expose locked status for Server * Remove and rearrange object bonged proxy tests * Move set\_temp\_url\_key logic into resource objects * Add support for generating form-post signatures * Allow replacing service implementation * Cleanup split of openstackcloud * Make PATCH a first class operation and support it for baremetal * Do not disregard tags when updating stacks * Adds missing "params" attribute for creating a Mistral workflow execution * Split OpenStackCloud into reasonable pieces * add python 3.7 unit test job * Update master for stable/stein * Fix wait\_for\_server docstring * Create runtime descriptor for new service * Add unit tests for connection.add\_service * Add proxy API reference to baremetal user guide * baremetal: implement the correct update of the maintenance\_reason field * Use auth\_url as identity endpoint when not project scoped * Replace openstack.org git:// URLs with https:// * Collect request stats * Collapse OpenStackSDKAdapter into Proxy * added support for binding:profile parameter in create\_port/update\_port * Revert "Revert "Replace TaskManager with a keystoneauth concurrency"" 0.26.0 ------ * Revert "Replace TaskManager with a keystoneauth concurrency" * Move pep8 requirements in to test-requirements * Fix some typos * Deprecate ServerDetails class * Remove outdated devstack section from docs * Swap human-facing links to use opendev.org * Deprecate VolumeDetail and BackupDetail classes * Move object methods to object\_store proxy * Add support to get recovery workflow details * baremetal: support server-side configdrive building (API 1.56) * Make tox tips job actually run sdk tests * Replace TaskManager with a keystoneauth concurrency * Add DNS support * Get rid of setUpClass and block it for forever 0.25.0 ------ * Add image.service\_info resources * Add image tasks schema methods * Add glance image import support * Use retriable\_status\_codes in image upload * Move image methods to sdk image proxy * Tweak find\_image method to search in hidden images * Add agent property: resources-synced * Use mock context in test\_fwaas * Add image.task resource * baremetal: support network\_data when building configdrive * Add missing py37 and corrected default envlist * Fix syntax error with exception handling * handle "paginated" argument in test\_list properly * Fix minor issues in the baremetal proxy docs * Add image attributes from v2.7 * Make sure we pick flavors with disk * Add image.schema resource * baremetal: support for allocation API * baremetal: implement set\_node\_power\_state in the proxy * Stop mocking method in fwaas test * Rename compute.service.zone to availability\_zone * Make all resource locations process project\_id * Added server diagnostics 0.24.0 ------ * Add support for bodyless commits * Add Octavia (load\_balancer) amphora API * Add Octavia (load\_balancer) flavor API * Add Octavia (load\_balancer) flavor profile API * Add Octavia (load\_balancer) provider API support * Add Octavia (load\_balancer) load balancer failover * Fix resource deletion in clustering * Added Octavia load balancer and listener stats * Adds tags support for Octavia (load\_balancer) * Fix indentation for new pycodestyle E117 * fix typo * Use pagination detection by default * Fix raise create\_server and attach to a network given a net-name param * Fixes for Unicode characters in python 2 requests * Adds prefixlen to the request body when creating subnets * Support dict of links in pagination detection * use overriden base\_path in remaining CRUD operations * Add a non-voting job with metalsmith * Update cirros version for functional tests * Fixed incorrect exception raising in configdrive generation * Add baremetal Node fields from versions 1.47 - 1.49 * Return retries on HTTP CONFLICT to baremetal.attach\_vif\_to\_node 0.23.0 ------ * Document "Role Assignment Operations" * Fix for not released thread in get\_session\_client * Fix for not released thread in service\_description * implement identity v3 Proxy group role management * Add block\_storage v3 API support * Fix/Add : Identity V3 validate user role * Compute location properly in server * Bug : identity v3 Proxy role assignments only support instances * Add network segment range resource * Add port property: port-resource-request * Restrict inventory test to devstack-admin * Stop running grenade-py3 * Skip v2 block-storage tests when service is not found * implement identity v3 Proxy "unassign\_project\_role\_from\_user" * Skip block storage v2 functional tests for a minute * Fix pagination key detection * Fix the misspelling of "configuration" * fix typos * Unpin dogpile.cache * Rework orchestration to add update preview * Add possibility to override base\_path for resource operations 0.22.0 ------ * Drop self.conn from base.TestCase * Start using direct REST in normalize tests * Properly munch for resource sub-dicts * Turn off unneeded devstack services * Import code for building ironic-compatible configdrives * Adds kwargs support when creating a Neutron subnet * Fix dogpile.cache 0.7.0 interaction 0.21.0 ------ * Avoid dogpile.cache 0.7.0 * Change openstack-dev to openstack-discuss * Add host aggregate missing functions * Fix requesting specific fields from ironic * Deal with double-normalization of host\_id * Add propagate\_uplink\_status to port * Fix query parameters of network.port\_forwarding * Change openstack-dev to openstack-discuss * block\_storage.backup func tests to use configurable timeout * Replace neutron-grenade job with grenade-py3 * Change openstack-dev to openstack-discuss * Support non-public volume types * Move server munch transformation into normalize * object\_store: exposes the prefix parameter * Add support for octavia's resuorces quota * Transform server with munch before normalizing * syntax errors and undefined exceptions in service\_description.py * Refactor tag support 0.20.0 ------ * Support remote vendor profiles * Slightly refactor vendor profile loading * Add CRUD methods for Neutron Port Forwarding * do not force interface=admin for identity api v3 * Update link address for vendor support * Check result of server metadata operation * Add functional tests for masakari * Make timeouts in functional tests configurable * Change approach to detailed listings of baremetal resources * Add missing properties for role * Add missing seperator between words * Register proxy directly in add\_service * implement block-storage backup resource * Fix some spelling in documentation * new auth\_url for ELASTX * Advancing the protocal of the website to HTTPS in compute.rst * Fix neutron endpoint mangling * Add wait functions to orchestration proxy * Add a \_\_main\_\_ handler, version command * Test python2 with py27 * Document "insecure" flag * Remove unused Task classes * Shift swift segment async code out of adapter * [Trivial Fix] Correct spelling error of "bandwidth" * Update min tox version to 2.0 * Remove mocking workaround from adapter * Explicit set capabilities in VolumeDetail and SnapshotDetail * Fix bugs in debugging with Tox * Fix the conflict of urlparse between python2 and python3 * Add networks to Limestone vendor * Apply list filter fix to servers and floating ips too 0.19.0 ------ * Remove setup.py check from pep8 job * Filter ports in list\_ports when batching is in effect * Make delete\_unattached\_floating\_ips return a count * Fix latest flake8 issues * Use sdk for list\_servers * Make Connection a context manager * Add close method to shutdown threadpool * Added basic CRUD functionality around Host Aggregates * Add port\_security\_enabled to create\_port valid kwargs * Added assign function to identity v3 proxy * Fix incorrect use of flake8:noqa * Add doc depends to tox releasenotes environment * Call pre/post run task calls from TaskManager.submit\_task() * Don't pass disk\_format or container\_format to image task upload * Use python3 format syntax for citycloud * Stop running shade tests * Add support for per-service rate limits * Add vnic\_type to create\_port valid kwargs 0.18.1 ------ * Don't start task managers passed in to Connection * Fix upload of Swift object smaller than segment limit (create\_object) 0.18.0 ------ * Add all\_projects as a preferred alias for all\_tenants * Support v4-fixed-ip and v6-fixed-ip in create\_server * Add limit CRUD support * Add registered limit CRUD support * Update sphinx extension logging * Add stackviz processing to functional tests * Move wait\_for\_baremetal\_node\_lock to the baremetal proxy * Convert inspect\_machine to use the baremetal proxy * Adding two new regions and dynamic auth\_url based on region name * Update ElastX cloud profile * Update Auro cloud profile * Rearrange shade image code * Revert the Proxy metaclass * Make it clear that OpenStackCloud is a mixin * Start shifting cloud object-store methods to proxy * Remove all the deprecated stuff * Switch bare metal NIC actions in OpenStackCloud to baremetal Proxy calls * Use network proxy in openstack.cloud * Remove duplicate code * openstackcloud.py: Implement FWaaS wrapper methods * cloud: rename with deprecation validate\_node -> validate\_machine * Wire in retries for all baremetal actions * Correct updating baremetal nodes by name or ID * Add some warnings and clarifications for discovery * Make RateLimitingTaskManager the TaskManager * Import rate limiting TaskManager from nodepool * Remove api version default values * Use discovery instead of config to create proxies * Set endpoint\_override from endpoint with noauth * Remove profile * Support firewall service for SDK * Clean up python3 test and split networking into a job * Add functional tests for clustering * Clarify error message is from nova * Format URL when updating image props in Glance v1 * Add compute API info and fix provider names * Update vendor support info for vexxhost * Update vendor support info for switchengines * Update vendor support info for ecs * Update vendor support info for catalyst * Restore timeout\_scaling\_factor * Fix location region field in docs * Run all tasks through the threadpool * Update the URL in doc * Turn down stevedore and urllib logging * Handle empty values in regions * Update .zuul.yaml * Update baremetal objects with fields added up to Rocky * Add sjc1 to vexxhost profile * Add support for configured NAT source variable * Explicitly set logging levels for external libs * Implement network update * Normalize security groups when using Neutron * Normalize image when using PUT on Glance v2 * Fix typo * Consolidate cloud/base.py into functional/base.py * Implement volume update * Allow JMESPath on searching networking resources * Allow search on objects * Listing objects to return Munch objects * Allow search on containers * Change the method of role update * Fix list\_recordsets to deal with top-level key * baremetal: implement validate\_node * Support bare metal service error messages * baremetal: support newer microversions in {get,update,patch}\_machine * Run bifrost integration test jobs * compute: fix typo in update\_security\_groups() * Listing containers to return Munch objects * Invalidate cache upon container deletion * Handle missing endpoint\_data in maximum\_supported\_microversions * Add the ability to extend a volume size * Test \_alternate\_id logic * switch documentation job to new PTI * import zuul job settings from project-config * Alias NotFoundException to ResourceNotFound * Remove the duplicated word * baremetal: add support for VIF attach/detach API * Use the base Resource's JSON patch support in Image * Correct update operations for baremetal * Add simple create/show/delete functional tests for all baremetal resources * Fix the heat template resource more cleaner * Make resource a dict subclass usable by shade layer * Remove special handling of stacks * Add computed attribute type and location to base resource * Rename Resource get and update to not clash with dict * Add more options to enable ansible testing feature * Stop using the -consumer devstack jobs * Fix to\_dict recursion issues with circular aliases * Stop calling get\_all\_types when service-type is None * Don't wait for task in submit\_task * Update storyboard links to use name * fix 2 typos in documentation * Pass microversion info through from Profile * python-shade expose MTU setting * Adds toggle port security on network create * Add a simple baremetal functional job * Add support for static routes * Handle image and object key metadata for shade transition * Update config doc url to point to openstacksdk * Remove the auto-creation of containers in create\_object * Add support for streaming object responses * Add method for returning a raw response for an object * Update create\_object to handled chunked data * Support for microversions in baremetal resources * Support for microversions in base Resource * Update reno for stable/rocky * Support passing profile to get\_one * Send disk\_over\_commit if nova api < 2.25 * Docs: Remove duplicate content in connection page * Docs: Include CloudRegion class * baremetal: correct the default timeout in Node.set\_provision\_state 0.17.0 ------ * Add create\_directory\_marker\_object method * Add ability to pass data to create\_object * Add flag for disabling object checksum generation * Use valid filters to list floating IPs in neutron * Add missing release note about vexxhost auth\_url * Add missing swift docstrings * Add /v3 to the auth\_url for vexxhost * Implement signature generation functionality * Add set\_provision\_state and wait\_for\_provision\_state for baremetal Node * Run ansible tests against specific public cloud * meta: don't throw KeyError on misconfigured floating IPs 0.16.0 ------ * openstackcloud: properly handle scheduler\_hints * Add task manager parameter to Connection * Add Magnum /certificates support * Invalid link of doc reference * Support to wait for load balancer to be ACTIVE * Fix Magnum cluster update 0.15.0 ------ * Add support for processing insecure * Fix for passing dict for get\_\* methods * Handle Munch objects in proxies * Only send force parameter to live migration if supported * Add vip\_qos\_policy\_id options for loadbalancer * Implementing solution for 2002563 issue from story board * Add connection backreference to proxy instances * Add release note link in README * Hardcode v2.0 onto end of neutron endpoints * Add Magnum cluster support * Improve Magnum cluster templates functions * Finish migrating image tests to requests-mock * Convert image\_client mocks in test\_shade\_operator * Convert test\_caching to requests-mock * Convert domain params tests to requests\_mock * Move clustering additions from shade directories * Switch to providing created\_at field for servers * fix misspelling of 'server' * Adds Senlin support to openstacksdk * Support port binding extended attributes for querying port * Fix path for Limestone Networks vendor file * Fix clustering profile type miss list operation * Switch VEXXHOST to 'v3password' auth\_type * Change 'Member' role reference to 'member' * Add tests to verify behavior on '' in self link * Add hypervisor details to hypervisors list if requested * Add testing of availability\_zones() "details" argument * Allow configuring status\_code\_retries and connect\_retries via cloud config * Add some backoff to find\_best\_address 0.14.0 ------ * Turn OSC tips jobs non-voting * Throw an error on conflicting microversion config * Rename service\_key to service\_type * Pass default\_microversion to adapter constructor * Honor service-type aliases in config * fix tox python3 overrides * Add ansible functional tests on stable-2.6 * Modify the unhelpful error message when delete network * Modify the error message when unsetting gateway and setting FIP * Add ansible functional tests * Add 'port\_details' to Floating IP * rename vars to defaults to allow overriding in ansible tests * allow passing ansible variables to ansible tests * Several improvements to resource.wait\_for\_status * Added few image properties to Image class * Fix filter style consistency for keystone assignment API * Allow explicitly setting enable\_snat to either value * baremetal: refuse to inspect associated machines * Enable bare metal unit tests * Avoid globally modifying yaml library * Add get\_volume\_limits() support * Reenable osc-functional-devstack-tips and neutron-grenade * Remove default values of router's is\_ha and is\_distributed arguments * Defer all endpoint discovery to keystoneauth * Decode additional heat files * Add ipaddress and futures to lower-constraints * Bump default timeout values * Fix H103 Apache license header check * Remove D exclusions from flake8 config * Add comment about W503 being skipped * Add python 3.6 jobs * Add nodepool-functional-py35-src job * Fix F405 errors * Clean up floating ip tests * Avoid raising exception when comparing resource to None * Bugfix for block\_storage not selecting the correct proxy * Fixing bug where original and new dicts would always be the same * Change clustering example test create parameter * add missing attribute in LBaaS v2 Pool API * close files after open in unit/base * Honor endpoint\_override for get\_session\_client * Drop bogus attributes from network port resource * Flavor: added is\_public query parameter and description property * pypy is not checked at gate * Add Limestone Networks vendor info * Remove DataCentred from list of vendors 0.13.0 ------ * Don't assume a full config dict * Fix bugtracker and documentation references * Fix openstack-inventory * Trivial: Update pypi url to new url * Fix typo in README.rst * Add functional tests for Neutron DNS extension * Add support for DNS attributes for floating IPs * Temporarily disable neutron-grenade * Fix DNS Recordset CRUD * Add timeout options for listener * Allow members to be set as "backup" * Allow cascade deletion of load balancer * Fix wait for futures append 'result' error * Strip the version prefix from the next link for pagination * Fix resource not exist the resource.status error * create\_subnet: Add filter on tenant\_id if specified * Add release note for added masakari support * Use 'none' auth plugin * Refactor \_get\_version\_arguments * Remove the need for OpenStackConfig in CloudRegion * Run normalize\_keys on config for session codepath * Add image\_format for VEXXHOST profile * add lower-constraints job * Add support for trunk ports and subports * Update python-openstacksdk references to openstacksdk * Rename python-openstacksdk to openstacksdk in zuul.yaml * fix doc title format error * Add instance\_ha service * Updated from global requirements * Fix response always being False * Add 409 ConflictException * Provide OpenStackConfigException backwards compat * Use defined version instead of service.version parameter * Fix 'block\_store' aliases define error * Fix TypeError in case of FloatingIP add and remove * Fix devstack tests * Replace old http links with the newest https ones in docs * Updated from global requirements * Allow not resolving outputs on get stacks * Redo role assignment list query filters 0.12.0 ------ * Network: Add tag support for floating ip * Network: Add tag support for security group * Network: Add tag support for QoS policy * Prepare for os-client-config wrapper * Updated from global requirements * Run os-client-config tests on sdk changes * Fix private\_v4 selection related to floating ip matching * Temporarily disable osc-functional-devstack-tips * Functional test for set\_tags on Neutron resources * Update the invalid url in pages * Fix coverage running * Shift tag resource definition to TagMixin * Add support for dns-domain * Run examples tests with functional tests * Fix clustering force delete return error * Update clustering module's \_proxy comment message * Updated from global requirements * Update all test base classes to use base.TestCase * Update base test case to use base from oslotest * Use get\_session\_client in Connection * Calculate name in CloudRegion * Generate proxy methods from resource objects * Rename BaseProxy to Proxy * Shift config exceptions to openstack.exceptions * Collect tox and testr output in functional tests * Fix functional test about port * modify typos of word password * Add shade jobs to openstacksdk zuul config * fix misspelling of 'volume' * Gate on osc-functional-tips * Fix issue with missing url parameters * Use keystone NoAuth plugin for auth\_type none * Add get\_client\_config method to CloudRegion * Fix TypeError for overrided get methods * Add server-side names to query param checking * Let enforcer.py work under both py2 and py3 * Remove openstack\_cloud factory function * resource: don't early terminate list * Implement list projects for user * Add OpenStackCloud object to Connection * Ensure Connection can be made from keyword arguments * Add a descriptor object for each service proxy * orchestration: fix typo in doc * Rename unit test cluster to clustering * Provide compatibility for people passing raw sessions * Update reno for stable/queens 0.11.0 ------ * Move profile helper method to openstack.profile * Add betacloud to the vendors * Clean up the release notes a bit * Add supported method for checking the network exts * Baremetal NIC list should return a list * Add retry logic mechanism * Updated from global requirements * Remove inner\_exceptions plumbing * Use devstack functional test base job * Throw OpenStackCloudCreateException on create errors * Pass through all\_projects for get\_server * Fix batching for floating ips and ports * Raise error when supplying invalid query params * Implement availability\_zone\_hints for networks and routers * Add some docs about not using Profile * Adds get encrypted password support * Do not apply format expansions to passwords * Add resource2/proxy2 wrappers with deprecations * Rename resource2 and proxy2 to resource and proxy * Remove resource and proxy * Update type conversion to handle subtypes more better * Update docs and pep8 tox environments * Add clustering guides node file, examples node code * Add clustering guides cluster file, examples cluster code * Fix clustering detach policy describe error * Add OSC functional tips jobs * Fix an error about listing projects in connection doc * Make floating IP to be prefered over fixed when looking for IP * Make meta.find\_best\_address() more generic * Fixed a few nits in the README * Fix typo in the external service loader code * Clean up a bit after the mapping inversion * v2 image update fix * Fix releasenotes builds * Handle resource deletion properly * Add clustering guides file, Examples code * Replace clustering examples code cluster to clustering * Invert the attribute mapping * Add clustering guides receiver file,examples receiver code * Move openstack\_cloud helper functions * Use version definition from openstack.version * Make sure we use config defaults in profile compat code * change spell error * Migrate object\_store to resource2/proxy2 * Add deprecation warnings to profile * modify spelling error of resource * Update Release Notes links and add bugs links * fix misspelling of 'configuration' * Remove message v1 support * Remove metric service * Remove meter service * Add ability to register non-official services * Rationalize logging helpers and docs * Use Zuul v3 fetch-subunit-output * Change update/create method options * Remove the deprecated "giturl" option 0.10.0 ------ * Re-enable octavia functional tests * Remove legacy client factory functions * Prefer links dicts for pagination * Port wait\_for\_ methods to use iterate\_timeout * Add function to make CloudRegion from session * Rename CloudConfig to CloudRegion * Updated from global requirements * Remove name from zuul project stanza * Update for new docs PTI * Start using Connection in openstack.cloud * Remove self argument from connect helper function * Add \_query\_mapping to identity resources * Allow to pass filters like domain to find\_project/user * Add reno for tag support on heat stacks * Remove python-ironicclient * Make the get\_service\_type() overrides tolernat of no defaults * Add FloatingIP qos\_policy\_id attribute * Updated from global requirements * Updated from global requirements * Add tag support to create\_stack * Remove -U from pip install * Add osc-tox-unit-tips jobs * Shift image tests from test\_operator * Added nat\_source flag for networks * Update make\_rest\_client to work with version discovery * Protect against p\_opt not having prompt attribute * Treat clouds.yaml with one cloud like envvars * Complete move of baremetal machine tests * Add method to cleanup autocreated image objects * Cleanup objects that we create on behalf of images * Remove openstack-tox-pypy jobs * Set empty Tag list if Network Tag API extension not supported * Avoid tox\_install.sh for constraints support * Fix py35 and pypy tox env * Update the shade-merge document * Stop osSDK mangling Swift metadata keys * Add pools attribute to load balancer heath monitor * Remove use of tox-siblings role * Document current\_user\_id in a release note * Remove reference to context-managers from release note * Add helper property to get the current user id * Add ability to work in other auth contexts * Sort image update results before comparing * Adds support to retrieve cinder backend pools information * Add subnet\_id property for FloatingIP * Add block\_store support single volume display image message * Remove setting of version/release from releasenotes * Add cluster support force delete parameter when cluster/node delete * Remove ansible functional tests for now * Updated from global requirements * Avoid default mutable values in arguments * Remove bogus and unneeded role from job definition * Add notes about moving forward * Move task\_manager and adapter up a level from cloud * Rework config and rest layers * Migrate to testtools for functional tests * Support filtering servers in list\_servers using arbitrary parameters * Fix regression for list\_router\_interfaces * Handle glance image pagination links better * Fix magnum functional test * Add jobs for Zuul v3 * Move role normalization to normalize.py * Allow domain\_id for roles * Add method to set bootable flag on volumes * Image should be optional * Add group parameter to create\_server * Fix image task uploads * Temporarily disable volume and os\_image functional tests * Move shade and os-client-config python content 0.9.19 ------ * Add support for network quota details command * Consume publish-openstack-sphinx-docs * Cleanup test-requirements * Imported os-client-config as a subtree * Import shade as a subtree * Remove 'conditions' section in heat stack template * Update links in CONTRIBUTING.rst * Updated from global requirements * Updated from global requirements * Fix requires\_floating\_ip * Record server.id in server creation exception * Stop using openstack-doc-build * Updates for stestr * Add support for network quota details command * Add pypi and doc publication templates * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Fix search\_groups * Remove EndpointCreate and \_project\_manager * Remove use of legacy keystone client in functional tests * Updated from global requirements * Updated from global requirements * Remove keystoneclient dependency * De-client-ify Endpoint Create * Refactor the create endpoint code * Reorganize endpoint create code * Switch to constraints version of tox job * Convert test\_baremetal\_machine\_patch to testscenarios * Add openstack-doc-build to shade * Switch to normal tox-py35 job * Switch to using stestr * Migrate machine tests related to state transitions * Migrate machine inspection tests to requests\_mock * Add getting of QoS rule type details * Migrate additional machine tests * De-client-ify Endpoint Update * De-client-ify List Role Assignments * De-client-ify Endpoint List * De-client-ify List Roles for User in v2.0 * De-client-ify Role Grant and Revoke * De-client-ify Endpoint Delete * De-client-ify User Password Update * Begin converting baremetal node tests * Remove improper exc handling in is\_user\_in\_group * De-client-ify Remove User from Group * Correct baremetal fake data model * De-client-ify Check User in Group * De-client-ify Add User to Group * Use direct calls to get\_\_by\_id * De-client-ify User Update * Use new keystoneauth version discovery * Fix typo in tox.ini * Updated from global requirements * Updated from global requirements * Updated from global requirements * Add tox\_install.sh to deal with upper-constraints * Support domain\_id for user operations * Add domain\_id to groups * Add handling timeout in servers cleanup function * Fix handling timeouts in volume functional tests cleanup * Connection doc add arguments * Fix switched params 0.9.18 ------ * Add parameter\_groups and conditions params for StackTemplate * Allow filtering network ports by fixed\_ips * Switch to \_is\_client\_version in list\_services * De-client-ify Service Delete * De-client-ify Service Update * Fix cleaning of Cinder volumes in functional tests * De-client-ify Service List * Add doc8 rule and check doc/source files * Fix some typos * Fix octavia l7rules * Update links in README * Add option to force delete cinder volume * fix the bug that cannot create a listener by openstacksdk * Introduce L7Rule for Octavia (load balancing) * Introduce L7Policy for Octavia (load balancing) * Updated from global requirements * Introduce Health Monitor for Octavia * Add required pool\_id property to HealthMonitor * Updated from global requirements * fix the bug that cannot create a pool by openstacksdk * Updated from global requirements * Introduce Member for Octavia (load balancing) * Fix determining if IPv6 is supported when it's disabled * Don't determine local IPv6 support if force\_ip4=True * Fix stack\_file function return body * Introduce Pool for Octavia (load balancing) * Introduce Listener for Octavia (load balancing) * Consolidate client version checks in an utility method * Support node-adopt/preview CLI * Add functional tests for Neutron QoS policies and rules * Updated from global requirements * DataCentred supports Keystone V3 and Glance V2 * Support to get resource by id * Make get\_server\_console tests more resilient * Update globals safely * Update the documentation link for doc migration * Remove OSIC * Make QoS rules required parameters to be not optional * Use valid\_kwargs decorator in QoS related functions * Add support for get details of available QoS rule type * Use more specific asserts in tests * Add Neutron QoS minimum bandwidth rule commands * Update reno for stable/pike * Update reno for stable/pike * Add Neutron QoS dscp marking rule commands * Updated from global requirements * Updated from global requirements * Updated from global requirements * router: Ignore L3 HA ports when listing interfaces * Initial commit of zuulv3 jobs * Manually sync with g-r * Update external links which have moved * Updated from global requirements * Update the documentation link for doc migration * Replace six.itervalues with dict.values() * Consolidate the use of self.\_get\_and\_munchify * De-client-ify Role Delete * De-client-ify Role List * De-client-ify Role Create * De-client-ify Group Delete * De-client-ify Group Update * De-client-ify Group List * De-client-ify Group Create * Fix comment in services function * Updated from global requirements * Don't remove top-container element in the adapter * Add config param for cluster object * Update load\_balancer for v2 API * Support to node-adopt and node-adopt-preview * Updated from global requirements * Improve doc formatting a bit * Unify style of 'domain' field * Added useful links to README * Add Neutron QoS bandwidth limit rule commands * De-client-ify Service Create * Add debug to tox environment * Remove hard-coding of timeout from API * Make sure we don't fail open on bad input to validate * Make sure we pass propert dicts to validate * Add flag to include all images in image list * Add support for list available QoS rule types * Add validation of required QoS extensions in Neutron * De-client-ify Domain Search * De-client-ify Domain Get * De-client-ify Domain List * De-client-ify User Create * Use the right variable name in userdata encoding * Add searching for Neutron API extensions * Add Neutron QoS policies commands * De-client-ify Domain Update and Delete * De-client-ify Domain Create * switch from oslosphinx to openstackdocstheme * reorganize docs using the new standard layout * use openstackdocstheme html context * Replace six.iteritems() with .items() * Remove dead links about OpenStack RC file * Don't remove top-container element for flavor, zones and server groups * Updated from global requirements * Updated from global requirements * Don't remove top-container element for flavors and clusters * Add query filters for find\_network * Project update to change enabled only when provided * switch from oslosphinx to openstackdocstheme * turn on warning-is-error in documentation build * rearrange existing documentation to follow the new standard layout * Fix mismatch between port and port-id for REST call * Remove a direct mocking of \_image\_client * Fix image normalization when image has properties property * Fix delete\_ips on delete\_server and add tests * Fix config\_drive, scheduler\_hints and key\_name in create\_server * Don't fail hard on 404 from neutron FIP listing * Only search for floating ips if the server has them * Don't try to delete fips on non-fip clouds * Return an empty list on FIP listing failure * Don't remove top-container element for server REST API calls * base64 encode user\_data sent to create server * Remove novaclient from shade's dependencies * Translate final nova calls to REST * Convert remaining nova tests to requests\_mock * Convert host aggregates calls to REST * Convert host aggregate tests to requests\_mock * Convert hypervisor list to REST * Convert hypervisor test to requests\_mock * Convert Server Groups to REST * Convert server group tests to requests\_mock * Convert FakeSecGroup to dict * Remove use of FakeServer from tests * Don't remove top-container element for user and project REST API calls * Convert keypairs calls to REST * Add normalization and functional tests for keypairs * Remove future document * Add text about microversions * Convert keypairs tests to requests\_mock * Convert list\_servers to REST * Convert list servers tests to requests\_mock * Remove some unused mocks * Break early from volume cleanup loop * Add some release notes we forgot to add * Retry to fetch paginated volumes if we get 404 for next link * docs: make the first example easier to understand * Properly expand server dicts after rebuild and update * Migrate non-list server interactions to REST * Increase timeout for volume tests * Skip pagination test for now * Fix title in Network Agent resource doc 0.9.17 ------ * Add compute support server live migrate operation * Fix urljoin for neutron endpoint * Added server console output method * Add compute support server backup operation * Remove get\_service method from compute * Remove py34 and pypy in tox * Replace six.iteritems() with .items() * Update tests for server calls that aren't list * Convert delete server calls to REST * Convert delete server mocks to requests\_mock * Convert get\_server\_by\_id * RESTify create\_server * Don't fetch extra\_specs in functional tests * Convert create\_server mocks to request\_mock * Add boot from volume unit tests * Cleanup volumes in functional tests in parallel * De-client-ify Project Update * De-client-ify Project Create * De-client-ify Project Delete * De-client-ify Project List * Don't remove top-container element for sec group REST API calls * Improve grant docs on when and how use domain arg * Don't remove top-container for stack and zone REST API calls * Updated from global requirements * Updated from global requirements * Rename obj\_to\_dict and obj\_list\_to\_dict * Don't remove top-container element for network REST API calls * Convert data from raw clients to Munch objects * Remove unneeded calls to shade\_exceptions * Don't remove top-container element for volume REST API calls * Fix update\_image unsupported media type * Remove support for py34 * Use get\_discovery from keystoneauth * De-client-ify User Ops * Add links to user list dict * Avoid keystoneclient making yet another discovery call * Use shade discovery for keystone * Updated from global requirements * Updated from global requirements * Fix py3 compatibility (dict.iteritems()) in object\_store * Migrate dns to new discovery method * Generalize version discovery for re-use * Pass hints to Cinder scheduler in create\_volume * Replace assertRaisesRegexp with assertRaisesRegex * Remove designate client from shade's dependencies * Add cluster support receiver update operation * Do less work when deleting a server and floating ips * Remove designateclient from commands related to recordsets * Add pagination for the list\_volumes call * Handle ports with no 'created\_at' attribute * Update test\_user\_update\_password to overlay clouds.yaml * Fix legacy clients helpers * Remove unused occ version tie * Add new parameter "is\_default" to Network QoS policy * Remove designateclient from commands related to zones * Add documentation about shade's use of logging * Add novaclient interactions to http\_debug * Set some logger names explicitly * Add logging of non-standard error message documents * Log specific error message from RetriableConnectionFailure * Don't pop from os.environ * Updated from global requirements * Fix python3 issues in functional tests * Add time reporting to Connection Retry message * Log cloud name on Connection retry issues * Use catalog endpoint on any errors in image version discovery * Fix cluster action list filter * Pick most recent rather than first fixed address * Allow a user to submit start and end time as strings * Fix get\_compute\_limits error message * Fix get\_compute\_usage normalization problem * update params about cluster filter event * Find private ip addr based on fip attachment * Network tag support * Add ability to run any tox env in python3 * Fix issue with list\_volumes when pagination is used * Add compute support server migrate operation * Make sure security\_groups is always a list * Updated from global requirements * Remove direct uses of nova\_client in functional tests * Keep a singleton to support multiple get\_config calls * Updated from global requirements * Remove designateclient mock from recordset tests * Convert list\_server\_security\_groups to REST * Remove two unused nova tasks * Include error message from server if one exists * Optimize the case of versioned image endpoint in catalog * Fix broken version discovery endpoints * Remove cinderclient from install-tips.sh * Fix tips jobs and convert Nova Floating IP calls * Convert first ironic\_client test to REST * Move mocks of designate API discovery calls to base test class * Fix exception when using boot\_from\_volume for create\_server * Revert "Revert "Use interface not endpoint\_type for keystoneclient"" * Revert "Use interface not endpoint\_type for keystoneclient" * Move legacy client constructors to mixin * Add ironicclient to constructors list * Fix pep8 errors that were lurking * Remove cinder client * Make deprecated client helper method * Add 'public' as a default interface for get\_mock\_url * Add super basic machine normalization * Remove designateclient mock from zones tests * Remove direct calls to cinderclient * Add "Multi Cloud with Shade" presentation * Use REST API for volume quotas calls * Add pprint and pformat helper methods * Add helper method to fetch service catalog * extend security\_group and \_rule with project id * Remove neutronclient from shade's dependencies * Remove cinderclient mocks from quotas tests * Fix Neutron floating IP test * Use REST API for volume snapshot calls * Remove usage of neutron\_client from functional tests * Enable neutron service in server create and rebuild tests * Replace neutronclient with REST API calls in FIP commands * Updated from global requirements * Add assert\_calls check testing volume calls with timeout enabled * Remove has\_service mock from Neutron FIP tests * Remove cinderclient mocks from snapshot tests * Remove neutronclient mocks from floating ips tests * Add 'service list' resource for senlin * Get endpoint versions with domain scope session * Use REST API for volume attach and volume backup calls * Use https instead of http in cluster examples * Specify alternate\_id in network quota * Updated from global requirements * Replace neutronclient with REST API calls in ports commands * Add direction field to QoS bandwidth limit * Don't get ports info from unavailable neutron service * Removing unsed fake methods and classes * Replace neutronclient with REST API calls in quotas commands * Replace neutronclient with REST API calls in security groups commands * Updated from global requirements * Use REST API for volume delete and detach calls * Use REST API for volume type\_access and volume create * Refactor the test\_create\_volume\_invalidates test * Replace neutronclient with REST API calls in router commands * Move REST error\_messages to error\_message argument * Remove two lines that are leftover and broken * Convert test\_role\_assignments to requests mock * Remove neutronclient mocks from sec groups tests * Fix document warnings * functional tests: minor cleanup * Remove neutronclient mocks from quotas tests * Remove neutronclient mocks from ports tests * Add optional error\_message to adapter.request * Fix interactions with keystoneauth from newton * Add in a bunch of TODOs about interface=admin * Set interface=admin for keystonev2 keystone tests * Port unversioned Version resources to resource2 * Port metric v1 to resource2 0.9.16 ------ * Deprecate Message v1 * Port image v1 to resource2 * Port identity v2 to resource2 * Port database v1 to resource2 * Add a \_normalize\_volume\_backups method * Correct Network \`ports\` query parameters * Use requests-mock for the volume backup tests * Remove neutronclient mocks from router tests * Replace neutronclient with REST API calls in subnet commands * Define a base function to remove unneeded attributes * Remove neutronclient mocks from subnet tests * Replace neutronclient with REST API calls in network commands * Move router related tests to separate module * Updated from global requirements * Move subnet related tests to separate module * Fix list\_servers tests to not need a ton of neutron * Remove neutronclient mocks from network create tests * Make \_fix\_argv() somewhat compatible with Argparse action='append' * Remove neutronclient mocks from network exceptions tests * Remove neutronclient mocks from network delete tests * Remove neutronclient mocks from network list tests * Use requests-mock for the list/add/remove volume types tests * Fix create/rebuild tests to not need a ton of neutron * Don't do all the network stuff in the rebuild poll * Move unit tests for list networks to test\_network.py file * Include two transitive dependencies to work around conflicts * Use requests-mock for all the attach/detach/delete tests * Add data plane status support to Network Port obj * Remove stray line * Revert "HAProxy uses milliseconds ..." * Strip trailing slashes in test helper method * Clarify some variable names in glance discovery * Allow router related functions to receive an ID * \_discover\_latest\_version is private and not used * Remove extra unneeded API calls * Change versioned\_endpoint to endpoint\_uri * Futureproof keystone unit tests against new occ * Actually fix the app\_name protection * Replace nova security groups with REST * Transition nova security group tests to REST * Remove dead ImageSnapshotCreate task * Pass in app\_name information to keystoneauth * Use REST for cinder list volumes * Add ability to pass in user\_agent * Upgrade list volumes tests to use requests-mock * Updated from global requirements 0.9.15 ------ * Pass shade version info to session user\_agent * Enable warnings\_as\_errors in doc enforcer * Add is\_profile\_only to Cluster resource * Use keystone\_session in \_get\_raw\_client * Add docs for volume\_attachment compute methods * Add support for volume attachments in compute v2 * Don't fail on security\_groups=None * Updated from global requirements * Stop defaulting container\_format to ovf for vhd * Don't run extra server info on every server in list * Add 'project\_id' to Server query parameters * Use REST for neutron floating IP list * Clean up some errant doc warnings/errors * Add get\_stack\_\* methods to documentation * Migrate create\_image\_snapshot to REST * Introduce Base for Octavia (load balancing) * Add ability to configure extra\_specs to be off * Migrate server snapshot tests to requests\_mock * Add test to validate multi \_ heat stack\_status * Fixed stack\_status.split() exception * Add server security groups to shade * Updated from global requirements * Fix doc build if git is absent * Add bare parameter to get/list/search server * Docs: add a note about rackspace API keys * Update tox build settings * Take care of multiple imports and update explanation * Reenable hacking tests that already pass * Enable H201 - don't throw bare exceptions * Enable H238 - classes should be subclasses of object * Fix a few minor annoyances that snuck in * Add vlan\_transparent property to network resource * Don't use project-id in catalog tests * Change metadata to align with team affiliation * Remove out of date comment * Filtering support by is\_router\_external to network resource * Move futures to requirements * Stop special-casing idenity catalog lookups * Find floating ip by ip address * Remove python-heatclient and replace with REST * Replace heatclient testing with requests\_mock * Add normalization for heat stacks * Add list\_availability\_zone\_names method * Switch list\_floating\_ip\_pools to REST * Strip out novaclient extra attributes * Convert floating\_ip\_pools unittest to requests\_mock * Migrate get\_server\_console to REST * Migrate server console tests to requests\_mock * Fix old-style mocking of nova\_client * Accept device\_id option when updating ports * Get rid of magnumclient dependency * attach\_volume should always return a vol attachment * wait\_for\_server: ensure we sleep a bit when waiting for server * delete\_server: make sure we sleep a bit when waiting for server deletion * Add designateclient to constructors list * Add StackFiles resource to orchestration v1 * Convert magnum service to requests\_mock * RESTify cluster template tests * Add normalization for cluster templates * Get the ball rolling on magnumclient * Use data when the request has a non-json content type * Cleanup some workarounds for old OCC versions * Expose ha\_state property from HA enabled L3 Agents * Remove type restrict of block\_device\_mapping * Add StackEnvironment resource to orchestration v1 * Shift some compute attributes within request body * StackTemplate resource for orchestration * Trivial: fix Template resource in orchestration * Avoid imports in openstack/\_\_init\_\_.py * add separate releasenotes build * Update sphinx and turn on warnings-is-error * Convert test\_identity\_roles to requests mock * Expose OS-EXT-SRV-ATTR:{hypervisor\_hostname,instance\_name} for Server * change test\_endpoints to use requests mock * Add port property: trunk\_details * OVH supports qcow2 * Add image download example * Depend on pbr>=2.0.0 * Fix the network flavor disassociate method * Convert test\_services to requests\_mock * Fix the telemetry statistics test * Only do fnmatch compilation and logging once per loop * Correct a copy/paste mistake in a docstring * Fix the telemetry sample test * Fix network quota test so it works on gate * Use interface not endpoint\_type for keystoneclient * Add support for bailing on invalid service versions * Put fnmatch code back, but safely this time * modify test-requirement according to requirements project * Replace keystone\_client mock in test\_groups * Use unicode match for name\_or\_id * Raise a more specific exception on nova 400 errors * Don't glob match name\_or\_id * Enable streaming responses in download\_image * [Fix gate]Update test requirement * Updated from global requirements * Update devstack config to point to a valid image * Rename ClusterTemplate in OpenStackCloud docs * Fix OpenStack and ID misspellings * Remove service names in OpenStackCloud docs * Add wait\_for\_xxx methods to cluster proxy * Change version of hacking in test-requirements * Reorganize cluster docs * Reorganize object\_store docs * Reorganize workflow docs * Reorganize network docs * Pass ironic microversion through from api\_version * Reorganize telemetry docs * Reorganize block store docs 0.9.14 ------ * Add missing attribute to Subnet resource * Add ability to skip yaml loading * keystone api v2.0 does not paginate roles or users * the role resource should not have put\_create=True * Fix the object store set metadata functional test * Remove unsupported telemetry create\_sample method * Add network flavor associate, disassociate to SDK * Fix problem with update including id * Support profile-only to cluster update * Fix the network auto allocate validate * Remove old telemetry capability * Remove unnecessary get\_id call in \_prepare\_request * Fix the network floating ip test for get * Fix the network service provider test * Fix the network quota tests * Fix the service profile meta info test * Fix the agent add remove test * Fix the nextwork agent add remove test * Update the image used for functional tests * Implement metric docs * Fix function test for compute images * Convert test\_object to use .register\_uris * Convert use of .register\_uri to .register\_uris * Reorganize orchestration docs * Implement message docs * Reorganize key\_manager docs * Change request\_id logging to match nova format * Actually normalize nova usage data * Reorganize identity docs * Reorganize image docs * Reorganize database docs * Reorganize compute docs * Update intersphinx linking to python.org * Fix several concurrent shade gate issues * Reorganize bare\_metal docs * Privatize session instance on Proxy subclasses * Deprecate "wait\_for" methods on ProxyBase * Remove the keystoneclient auth fallback * Remove two remaining doc warnings * Add support for overriding mistral service type * Add helper scripts to print version discovery info * Wait for volumes to detach before deleting them * Deprecate port and ping methods in Network proxy * Add accessor method to pull URLs from the catalog * Convert use of .register\_uri to .register\_uris * Remove keystoneclient mocks in test\_caching for users * Remove mock of keystoneclient for test\_caching for projects * Remove mock of keystone where single projects are consumed * Rename demo\_cloud to user\_cloud * Add all\_projects parameter to list and search servers * Updated from global requirements * Convert test\_project to requests\_mock * convert test\_domain to use requests\_mock * Move mock utilies into base * Convert test\_users to requests\_mock * Add request validation to user v2 test * Enforce inclusion of pulic proxy methods in docs * Updated from global requirements * Convert first V3 keystone test to requests\_mock * Cleanup new requests\_mock stuff for test\_users * First keystone test using request\_mock * Add test of attaching a volume at boot time * Cleanup more Sphinx warnings during doc build * Add support for indicating required floating IPs * pass -1 for boot\_index of non-boot volumes * Adjust some proxy method names in bare\_metal * Adjust some proxy method names in cluster * Pass task to post\_task\_run hook * Rename ENDPOINT to COMPUTE\_ENDPOINT * Transition half of test\_floating\_ip\_neutron to requests\_mock * Start switching neutron tests * Added project role assignment * Port in log-on-failure code from zuul v3 * Honor cloud.private in the check for public connectivity * Cleanup various Sphinx warnings during doc build * Support globbing in name or id checks * Stop spamming logs with unreachable address message * Remove troveclient from the direct dependency list * Move nova flavor interactions to REST * Migrate flavor usage in test\_create\_server to request\_mock * Migrate final flavor tests to requests\_mock * Move flavor cache tests to requests\_mock * Transition nova flavor tests to requests\_mock * Add ability to create image from volume * Use port list to find missing floating ips * Process json based on content-type * Update reno for stable/ocata * fix location of team tags in README * Copy in needed template processing utils from heatclient * Fix exception parsing error * Add 'tags' property to orchestration stack 0.9.13 ------ * Add docs for the workflow service * Initial docs for bare-metal service * Upload images to swift as application/octet-stream * Add ability to stream object directly to file * Update coding document to mention direct REST calls * Fix error messages are not displayed correctly * Add project ID in QuotaDefault requests * Fix Setting Quotas in Neutron * Updated from global requirements * Skip discovery for neutron * Add helper test method for registering REST calls * Do neutron version discovery and change one test * Add raw client constructors for all the things * Replace SwiftService with direct REST uploads * Modified DHCP/Network Resource * Fix spin-lock behavior in \_iterate\_timeout * Fix typo for baremetal\_service\_type * Network L3 Router Commands * Add helper script to install branch tips * Revert "Fix interface\_key for identity clients" * Add support for Murano * Corrections in DHCP Agent Resource listing * Basic volume\_type access * Add OpenTelekomCloud to the vendors * Add support to task manager for async tasks * Updated from global requirements * Add workflow service (mistral) * Add cluster\_operation and node\_operation * Added list\_flavor\_access * Remove 3.4 from tox envlist * Use upper-constraints for tox envs * Removes unnecessary utf-8 encoding * Log request ids when debug logging is enabled * Honor image\_endpoint\_override for image discovery * Add support\_status to policy type and profile type * Rework limits normalization * Handle pagination for glance images 0.9.12 ------ * Add missing query parameters to compute v2 Server * Add support for Role resource in Identity v3 * Add support for using the default subnetpool * Remove unnecessary coding format in the head of files * Add filter "user\_id" for cluster receiver list * Add params to ClusterDelNodes action * Remove discover from test-requirements * Remove link to modindex * Add user\_id in resource class Action/Node * Fix exception name typo * Add failure check to node\_set\_provision\_state * Update swift constructor to be Session aware * Add test to verify devstack keystone config * Make assert\_calls a bit more readable * Update swift exception tests to use 416 * Make delete\_object return True and False * Switch swift calls to REST * Stop using full\_listing in prep for REST calls * Stop calling HEAD before DELETE for objects * Replace mocks of swiftclient with request\_mock * Enable bare-metal service * Proxy module for bare-metal service * Put in magnumclient service\_type workaround * Let use\_glance handle adding the entry to self.calls * Combine list of calls with list of request assertions * Extract helper methods and change test default to v3 * Make munch aware assertEqual test method * Extract assertion method for asserting calls made * Base for workflow service (mistral) * Change get\_object\_metadata to use REST * Update test of object metadata to mock requests * Add release notes and an error message for release * Port resource for bare-metal service * PortGroup resource for bare-metal service * Magnum's service\_type is container\_infra * Add docutils contraint on 0.13.1 to fix building * Add total image import time to debug log * Clear the exception stack when we catch and continue * Magnum's keystone id is container-infra, not container * Stop double-reporting extra\_data in exceptions * Pass md5 and sha256 to create\_object sanely * Updated from global requirements * Add user\_id in resource class Policy * Node resource for bare-metal service * Convert glance parts of task test to requests\_mock * Chassis resource for bare-metal service * Driver resource for bare-metal service * Support for node replace in cluster service * Collapse base classes in test\_image * Skip volume backup tests on clouds without swift * Add new attributes to floating ips 0.9.11 ------ * Rebase network proxy to proxy2 * Add test to trap for missing services * Change fixtures to use https * Honor image\_api\_version when doing version discovery * Replace swift capabilities call with REST * Change register\_uri to use the per-method calls * Convert test\_create\_image\_put\_v2 to requests\_mock * Remove caching config from test\_image * Move image tests from caching to image test file * Remove glanceclient and warlock from shade * Remove a few glance client mocks we missed * Change image update to REST * Make available\_floating\_ips use normalized keys * Fix \_neutron\_available\_floating\_ips filtering * Rebase network resources to resource2 (4) * Rebase network resources to resource2 (3) * Stop telling users to check logs * Plumb nat\_destination through for ip\_pool case * Update image downloads to use direct REST * Move image tasks to REST * Add 'project\_id' field to volume resource * Add support for limits * Rebase network resources onto resource2 (2) * Rebase network resources onto resource2 (1) * Fix interface\_key for identity clients * Tox: optimize the \`docs\` target * Add more server operations based on Nova API * Add user\_id in profile resource * Add filters to the network proxy agents() method * Replace Image Create/Delete v2 PUT with REST calls * Replace Image Creation v1 with direct REST calls * Remove test of having a thundering herd * Pull service\_type directly off of the Adapter * Add auto-allocated-topology to SDK * Add compute usage support * Updated from global requirements * Document the \`synchronized\` parameter * Re-add metadata to image in non-strict mode * Show team and repo badges on README * Add 'project\_id' field to cluster's action resource * Added documentation for delete\_image() * Add QoS support to Network object * Add an e to the word therefore * Allow server to be snapshot to be name, id or dict * Add docstring for create\_image\_snapshot * Allow security\_groups to be a scalar * Remove stray debugging line * Start using requests-mock for REST unit tests * Have OpenStackHTTPError inherit from HTTPError * Use REST for listing images * Create and use a Adapter wrapper for REST in TaskManager * Normalize volumes * Expose visibility on images 0.9.10 ------ * Be specific about protected being bool * Remove pointless and fragile unittest * Revert "Remove validate\_auth\_ksc" * Revert "Display neutron api error message more precisely" * Remove validate\_auth\_ksc * Fail up to date check on one out of sync value * Normalize projects * Cache file checksums by filename and mtime * Only generate checksums if neither is given * Make search\_projects a special case of list\_projects * Make a private method more privater * Updated from global requirements * Add resource for DHCP Agent * Add unit test to show herd protection in action * Refactor out the fallback-to-router logic * Update floating ip polling to account for DOWN status * Use floating-ip-by-router * Don't fail on trying to delete non-existant images * Allow server-side filtering of Neutron floating IPs * Add fuga.io to vendors * Add "sort" in policy binding list * Add filters "policy\_type" and "policy\_name" for policy binding list * list\_servers(): thread safety: never return bogus data * Add filters to the router proxy routers() method * Depend on normalization in list\_flavors * Add unit tests for image and flavor normalization * Add strict mode for trimming out non-API data * list\_security\_groups: enable server-side filtering 0.9.9 ----- * Add support for network Service Flavor Profile * Don't fail image create on failure of cleanup * Add filter "enabled" for cluster-policy-list * Add resources for Service Provider * Fix metadata property of Senlin node resource * Display neutron api error message more precisely * Add list method and query support for cinder volume and snapshot * Add Python 3.5 classifier and venv * Try to return working IP if we get more than one * Add filter options to the network proxy address\_scopes() method() * Support token\_endpoint as an auth\_type * Add test for os\_keystone\_role Ansible module * Document and be more explicit in normalization * Updated from global requirements * Add support for volumev3 service type * Add filters provider-\* to the network proxy networks() method * Normalize cloud config before osc-lib call * Fix a bunch of tests * Clarify how to set SSL settings * Add external\_ipv4\_floating\_networks * Logging: avoid string interpolation when not needed * Add a devstack plugin for shade * Allow setting env variables for functional options * Support to delete claimed message * Update ECS image\_api\_version to 1 * Add test for os\_keystone\_domain Ansible module * Add abililty to find floating IP network by subnet * Remove useless mocking in tests/unit/test\_shade.py * Fix TypeError in list\_router\_interfaces * Fix problem about location header in Zaqar resource2 * Updated from global requirements * Add filter mac\_address to the network proxy ports() method * Add dns-domain support to Network object * Fix a NameError exc in operatorcloud.py * Fix some docstrings * Fix a NameError exception in \_nat\_destination\_port * Implement create/get/list/delete volume backups * Move normalize\_neutron\_floating\_ips to \_normalize * Prepare for baremetal API implementation * Updated from global requirements * Delete image if we timeout waiting for it to upload * Revert "Split auth plugin loading into its own method" * Add reset\_state api for compute * Add description field to create\_user method * Allow boolean values to pass through to glance * Add limit and marker to QueryParameters class * Update location info to include object owner * Move and fix security group normalization * Add location field to flavors * Move normalize\_flavors to \_normalize * Move image normalize calls to \_normalize * Add location to server record * Start splitting normalize functions into a mixin * Make sure we're matching image status properly * Normalize images * Add helper properties to generate location info * Update simple\_logging to not not log request ids by default * Add setter for session constructor * Enable release notes translation * Updated from global requirements * cloud\_config:get\_session\_endpoint: catch Keystone EndpointNotFound * Document network resource query filters used by OSC * Add standard attributes to the core network resources * Add service\_type resource to Subnets * Add simple field for disabled flavors * List py35 in the default tox env list * remove\_router\_interface: check subnet\_id or port\_id is provided 0.9.8 ----- * avoid usage of keystoneauth1 sessions * Clarify argparse connections * Updated from global requirements * Add support for network Flavor * Add test for os\_group Ansible module * Remove dead code * Provide better fallback when finding id values * Updated from global requirements * Remove beta label for network segment resource * Using assertIsNone() instead of assertEqual(None, ...) * Add support for filter "status" in node list * Modified Metering Rule base\_path * Update homepage with developer documentation page * Update homepage with developer documentation page * List py35 in the default tox env list * Fix AttributeError in \`get\_config\` * Modified Metering base\_path * Updated from global requirements * Added is\_shared resource to Metering Label * Add QoS support to Network Port object 0.9.7 ----- * Revert "Event list can not display "timestamp" * Generalize endpoint determination * modify the home-page info with the developer documentation * Event list can not display "timestamp" * Add project\_id field to cluster's policy and profile * Fix the issue non-admin user failed to list trusts * Don't create envvars cloud if cloud or region are set * Fix error in node action * compute/v2/server: add ?all\_tenants=bool to list 0.9.6 ----- * Add extended Glance Image properties * Fix connection init when session is provided * Rebase keystone v3 proxy to proxy2 * Fix 'config\_drive' and 'networks' for compute server * Fix cluster query mapping * Rebase keystone resources onto resource2 * Add new function for router-gateway * Obtain Image checksum via additional GET * Adjust router add/remove interface method names * Add 'dependents' property to Node and Cluster class * Add support for jmespath filter expressions * Add QoS rule type object and CRUD commands * Add QoS bandwidth limit rule object and CRUD commands * Add QoS DSCP marking rule object and CRUD commands * Add QoS minimum bandwidth rule object and CRUD commands * Add libffi-dev to bindep.txt * Add network segment create, delete and update support * Rebase telemetry resources to resource2/proxy2 * Fix telemetry/metering service version * Don't build releasenotes in normal docs build * Update reno for stable/newton * Use list\_servers for polling rather than get\_server\_by\_id * Fix the issue that 'type' field is missing in profile list * Add ability to configure Session constructor * Fix up image and flavor by name in create\_server * Batch calls to list\_floating\_ips * Split auth plugin loading into its own method 0.9.5 ----- * Allow str for ip\_version param in create\_subnet * Skip test creating provider network if one exists * Revert per-resource dogpile.cache work * Updated from global requirements * Fix two minor bugs in generate\_task\_class * Go ahead and handle YAML list in region\_name * Change naming style of submitTask * Add prompting for KSA options * Add submit\_function method to TaskManager * Refactor TaskManager to be more generic * Poll for image to be ready for PUT protocol * Cleanup old internal/external network handling * Support dual-stack neutron networks * Fix issue "SDKException: Connection failure that may be retried." * Rename \_get\_free\_fixed\_port to \_nat\_destination\_port * Log request ids * Detect the need for FIPs better in auto\_ip * Updated from global requirements * Clean up vendor support list * Delete objname in image\_delete 0.9.4 ----- * Refactor Key Manager for resource2 * Move list\_server cache to dogpile * Fix problems about location header in resource2 * Add support for claim for Zaqar V2 API * Ensure per-resource caches work without global cache * Support more than one network in create\_server 0.9.3 ----- * Add support for fetching console logs from servers * Allow image and flavor by name for create\_server * Add support for subscription for Zaqar V2 API * Allow object storage endpoint to return 404 for missing /info endpoint * Add policy validation for senlin * Add profile validation for senlin * Batch calls to list\_floating\_ips * Add QoS policy object and CRUD commands * Get the status of the ip with ip.get('status') * Stop getting extra flavor specs where they're useless * Change deprecated assertEquals to assertEqual * Use cloud fixtures from the unittest base class * Add debug logging to unit test base class * Update HACKING.rst with a couple of shade specific notes * Only run flake8 on shade directory * Add bindep.txt file listing distro depends * Set physical\_network to public in devstack test * Precedence final solution * Updated from global requirements * Add support for configuring split-stack networks * Fix orchestration service initialization * Use "image" as argument for Glance V1 upload error path * Minor network RBAC policy updates * Honor default\_interface OCC setting in create\_server * Validate config vs reality better than length of list * Base auto\_ip on interface\_ip not public\_v4 * Add tests to show IP inference in missed conditions * Deal with clouds that don't have fips betterer * Infer nova-net security groups better * Add update\_endpoint() * Protect cinderclient import * Do not instantiate logging on import * Don't supplement floating ip list on clouds without * Add 'check\_stack' operation to proxy * Tweak endpoint discovery for apache-style services * Move list\_ports to using dogpile.cache * Create and return per-resource caches * Lay the groundwork for per-resource cache * Pop domain-id from the config if we infer values * Rename baymodel to cluster\_template 0.9.2 ----- * Add template validation support to orchestration * Add SoftwareDeployment resource to orchestration * Add SoftwareConfig resource to orchestration * Rebase orchestration to resource2/proxy2 * Relocate alarm service into a submodule * Get endpoints directly from services * Add force-delete into compute service * Make shared an optional keyword param to create\_network * Add services operations into compute service * Fix nova server image and flavor * Add support for message resource of Zaqar v2 API * Add support for Zaqar V2 queue resource * Add a 'meta' passthrough parameter for glance images * Allow creating a floating ip on an arbitrary port * Add collect\_cluster\_attrs API to cluster service * Add ability to upload duplicate images * Updated from global requirements * Update Internap information * Fix requirements for broken os-client-config * Add new test with betamax for create flavors * Stop creating cloud objects in functional tests * Move list\_magnum\_services to OperatorCloud * Add test for precedence rules * Pass the argparse data into to validate\_auth * Revert "Fix precedence for pass-in options" * Add release notes for 1.19.0 release * Add the new DreamCompute cloud * Go ahead and admit that we return Munch objects * Depend on python-heatclient>=1.0.0 * Add update\_server method * Fix precedence for pass-in options * Fix cluster resource in cluster service * Update citycloud to list new regions * Add API microversion support * Updated from global requirements * Refactor image v2 to use resource2/proxy2 0.9.1 ----- * Rebase cluster service to resource2/proxy2 * Improve docstring for some resource2 methods * Add 'to\_dict()' method to resource2.Resource * \_alternate\_id should return a server-side name * Make end-user modules accessible from top level * Remove discover from test-requirements * Updated from global requirements * Replace \_transpose\_component with \_filter\_component * Fix test\_limits functional test failure * Remove update\_flavor method from compute * Expose 'requires\_id' to get\_xxx proxy functions * Update hacking version * Updated from global requirements * Add support for listing a cloud as shut down * Change operating to interacting with in README * Add floating IPs to server dict ourselves * Add support for deprecating cloud profiles * HAProxy uses milliseconds for its timeout values * Support fetching network project default quota 0.9.0 ----- * Refactor compute for new resource/proxy * Allow alternate\_id to be accessed directly * Add neutron rbac support * Updated from global requirements * Treat DELETE\_COMPLETE stacks as NotFound * Updated from global requirements * Add support for changing metadata of compute instances * Refactor fix magic in get\_one\_cloud() * Add temporary test\_proxy\_base2 * Add segment\_id property to subnet resource * Use keystoneauth.betamax for shade mocks * Allow resources to check their equality * Remove type=timestamp usages * Cluster user guide - part 2 * Move version definition * Updated from global requirements * Add network quotas support * Reword the entries in the README a bit * Add shade constructor helper method * Updated from global requirements * Add reno note for create\_object and update\_object * Rename session\_client to make\_rest\_client * Add magnum services call to shade * Add helper method for OpenStack SDK constructor * Add function to update object metadata * incorporate unit test in test\_shade.py, remove test\_router.py fix tenant\_id in router add functional test test\_create\_router\_project to functional/test\_router.py add unit/test\_router.py add project\_id to create\_router * Fix clustering event properties * Add magnum baymodel calls to shade * Updated from global requirements * Updated from global requirements * Make it easier to give swift objects metadata * Updated from global requirements * Add volume quotas support * Add quotas support * Add missing "cloud" argument to \_validate\_auth\_ksc * Add error logging around FIP delete 0.8.6 ----- * Be more precise in our detection of provider networks * Rework delete\_unattached\_floating\_ips function * Implement network agents * Updated from global requirements * Remove data type enforcement on fields (cluster) * Add network segment resource * Make sure Ansible tests only use cirros images * Don't fail getting flavors if extra\_specs is off * Add initial setup for magnum in shade * Updated from global requirements * Workaround bad required params in troveclient * Trivial: Remove 'MANIFEST.in' * Trivial: remove openstack/common from flake8 exclude list * drop python3.3 support in classifier * Set name\_attribute on NetworkIPAvailability * Amend the valid fields to update on recordsets * Move cloud fixtures to independent yaml files * Add support for host aggregates * Add support for server groups * Add release note doc to dev guide * Remove update\_trust method from identity * Updated from global requirements * [Trivial] Remove executable privilege of doc/source/conf.py * Add Designate recordsets support * Remove openstack/common from tox.ini * Fix formatting in readme file * Add support for Designate zones * Fail if FIP doens't have the requested port\_id * Add support for Network IP Availability * Add public helper method for cleaning floating ips * Fix Resource.list usage of limit and marker params * Rework floating ip use test to be neutron based * Delete floating IP on nova refresh failure * Retry floating ip deletion before deleting server * Have delete\_server use the timed server list cache * Document create\_stack * delete\_stack add wait argument * Implement update\_stack * Updated from global requirements * Fix string formatting * Add domain\_id param to project operations * Remove get\_extra parameter from get\_flavor * Honor floating\_ip\_source: nova everywhere * Use configured overrides for internal/external * Don't hide cacert when insecure == False * Start stamping the has\_service debug messages * Consume floating\_ip\_source config value * Honor default\_network for interface\_ip 0.8.5 ----- * Trivial: Fix typo in update\_port() comment * Support :/// endpoints * Refactor the port search logic * Allow passing nat\_destination to get\_active\_server * Use fixtures.TempDir * Use fixtures.EnvironmentVariable * Add nat\_destination filter to floating IP creation * Refactor guts of \_find\_interesting\_networks * Search subnets for gateway\_ip to discover NAT dest * Support client certificate/key * Consume config values for NAT destination * Return boolean from delete\_project * Correct error message when domain is required * Remove discover from test-requirements.txt * Add version string * Add release note about the swift Large Object changes * Delete image objects after failed upload * Add network resource properties * Delete uploaded swift objects on image delete * Add option to control whether SLO or DLO is used * Upload large objects as SLOs * Set min\_segment\_size from the swift capabilities * Don't use singleton dicts unwittingly * Updated from global requirements * Update func tests for latest devstack flavors * Pull the network settings from the actual dict * Fix search\_domains when not passing filters * Properly handle overridden Body properties * Wrap stack operations in a heat\_exceptions * Use event\_utils.poll\_for\_events for stack polling * Clarify one-per-cloud network values * Flesh out netowrk config list 0.8.4 ----- * Follow name\_or\_id pattern on domain operations * Remove conditional blocking on server list * Cache ports like servers * Change network info indication to a generic list * Workaround multiple private network ports * Reset network caches after network create/delete * Fix test\_list\_servers unit test * Fix test\_get\_server\_ip unit test * Remove duplicate FakeServer class from unit tests * BaseProxy refactoring for new Resource * Mutex protect internal/external network detection * Support provider networks in public network detection * Refactor Resource to better serve Proxy * Re-allow list of networks for FIP assignment 0.8.3 ----- * Consistent resource.prop for timestamps and booleans (cluster) * Add address scope CRUD * Support InsecureRequestWarning == None * Add release notes for new create\_image\_snapshot() args * Split waiting for images into its own method * Add wait support to create\_image\_snapshot() * Also add server interfaces for server get * Import os module as it is referenced in line 2097 * Consistent resource.prop for timestamps and booleans (object store) * Fix grant\_role docstring * Add default value to wait parameter * Consistent resource.prop for timestamps and booleans (network) * Use OpenStackCloudException when \_delete\_server() raises * Always do network interface introspection * Fix race condition in deleting volumes * Use direct requests for flavor extra\_specs set/unset * Fix search\_projects docstring * Fix search\_users docstring * Add new tasks to os\_port playbook * Fix serialize BoolStr formatter * Deal with is\_public and ephemeral in normalize\_flavors * Create clouds in Functional Test base class * Consistent resource.prop for timestamps and booleans (identity) * Run extra specs through TaskManager and use requests * Bug fix: Make set/unset of flavor specs work again * Refactor unit tests to construct cloud in base * Add constructor param to turn on inner logging * Log inner\_exception in test runs * Cluster user guide - first step * Pass specific cloud to openstack\_clouds function * Consistent resource.prop for timestamps and booleans (orchestration) 0.8.2 ----- * Consistent resource.prop for timestamps and booleans (telemetry) * Fix image member apis * Make get\_stack fetch a single full stack * Add environment\_files to stack\_create * Add normalize stack function for heat stack\_list * Fix content-type for swift upload * Fix key manager secret resource object * Consistent resource.prop for timestamps and booleans (key manager) * Add wait\_for\_server API call * Consistent resource.prop for timestamps and booleans (image) * Make metadata handling consistent in Compute * Fix coverage configuration and execution * Update create\_endpoint() * Make delete\_project to call get\_project * Update reno for stable/mitaka * Consistent resource.prop for timestamps and booleans (compute) * Add osic vendor profile * Test v3 params on v2.0 endpoint; Add v3 unit * Add update\_service() * Use network in neutron\_available\_floating\_ips * Fix functional tests * Allow passing project\_id to create\_network * In the service lock, reset the service, not the lock * Add/Remove port interface to a router * Consistent resource.prop for timestamps and booleans (block store) * Bug fix: Do not fail on routers with no ext gw * Consistent resource.prop for timestamps and booleans (metric) * Mock glance v1 image with object not dict * Use warlock in the glance v2 tests * Fixes for latest cinder and neutron clients 0.8.1 ----- * Add debug message about file hash calculation * Pass username/password to SwiftService * Add Hypervisor support to Compute Service * Also reset swift service object at upload time * Invalidate volume cache when waiting for attach * Use isinstance() for result type checking * Add test for os\_server Ansible module * Fix create\_server() with a named network * os\_router playbook cleanup * Fix heat create\_stack and delete\_stack * Catch failures with particular clouds * Allow testing against Ansible dev branch * Recognize subclasses of list types 0.8.0 ----- * Add Nova server group resource * Update the README a bit * Allow session\_client to take the same args as make\_client * Remove pool\_id attr from creation request body of pool\_member * Add ability to pass just filename to create\_image * Make metadata handling consistent in Object Store * Updated from global requirements * Override delete function of senlin cluster/node * Add support for provider network options * Remove mock testing of os-client-config for swift * Basic resource.prop for ID attributes (message) * Fix formulation * Add release notes * Add a method to download an image from glance * Basic resource.prop for ID attributes (cluster) * Adding Check/Recover Actions to Clusters * Basic resource.prop for ID attributes (block store) * Basic resource.prop for ID attributes (orchestration) * Fix compute tests for resource.prop ID attributes * Send swiftclient username/password and token * Add test option to use Ansible source repo * Basic resource.prop for ID attributes (compute) * Basic resource.prop for ID attributes (image) * Add enabled flag to keystone service data * Clarify Munch object usage in documentation * Add docs tox target * create\_service() should normalize return value * Prepare functional test subunit stream for collection * Basic resource.prop for ID attributes (identity) * Use release version of Ansible for testing * Basic resource.prop for ID attributes (telemetry) * Modify test workaround for extra\_dhcp\_opts * Remove HP and RunAbove from vendor profiles * Added SSL support for VEXXHOST * Fix for stable/liberty job * Update attributes uses hard coded id * Adding check/recover actions to cluster nodes * Basic resource.prop for ID attributes (network) * granting and revoking privs to users and groups * Remove 'date' from Object resource * Add support for zetta.io * Make functional test resources configurable * Fix Port resource properties * Refactor profile set\_ methods * Add UNIXEpoch formatter as a type for properties * Update create\_network function in test\_network * Stop ignoring v2password plugin 0.7.4 ----- * Add release note for FIP timeout fix * Documentation for cluster API and resources * Go ahead and remove final excludes * Resource object attributes not updated on some interfaces * include keystonev2 role assignments * Add release note for new get\_object() API call * Pass timeout through to floating ip creation * Fix normalize\_role\_assignments() return value * Don't set project\_domain if not project scoped * Add ISO8601 formatter as a type for properties * Add LoadBalancer vip\_port\_id and provider properties * Remove a done todo list item * Raise NotFound exception when get a deleted stack * add the ability to get an object back from swift * Clean up removed hacking rule from [flake8] ignore lists * Updated from global requirements * allow for updating passwords in keystone v2 * download\_object/get\_object must have the same API * Map KSA exception to SDK exceptions * Fix URLs for CLI Reference * Support neutron subnets without gateway IPs * Updated from global requirements * Send keystoneauth a better user-agent string * Add network availability zone support * set up release notes build * Allow resource get to carry query string * Rework cluster API * Save the adminPass if returned on server create * Skip test class unless a service exists * Fix unit tests that validate client call arguments * Add attribute 'location' to base resource * Add preview\_stack for orchestration * Fix a precedence problem with auth arguments * Return empty dict instead of None for lack of file * Pass version arg by name not position * Allow inventory filtering by cloud name * Update Quota documentation and properties * Use \_get\_client in make\_client helper function * Add barbicanclient support * Update Subnet Pools Documentation * Add range search functionality * Update router's functional tests to validate is\_ha property * Fix create\_pool\_member and update\_pool\_member * Updated from global requirements * Remove openstack-common.conf * Use assertTrue/False instead of assertEqual(T/F) * Add IBM Public Cloud * Remove status property from LBaaS resources * Add functional tests for DVR router * Add missing Listener resource properties * Better support for metadata in Compute service * Replace assertEqual(None, \*) with assertIsNone in tests * Update auth urls and identity API versions * Stop hardcoding compute in simple\_client * correct rpmlint errors * Add tests for stack search API * Fix filtering in search\_stacks() * Add image user guide * Bug fix: Cinder v2 returns bools now * s/save/download/ * Normalize server objects * Replace assertTrue(isinstance()) with assertIsInstance() * Replace assertEqual(None, \*) with assertIsNone in tests * Add support for availability zone request * Add proxy methods for node actions (cluster) * Rename timestamp fields for cluster service * Add cluster actions to cluster proxy * Update volume API default version from v1 to v2 * Debug log a deferred keystone exception, else we mask some useful diag * Fix README.rst, add a check for it to fit PyPI rules * Make server variable expansion optional * Use reno for release notes * add URLs for release announcement tools * Have inventory use os-client-config extra\_config * Fix unittest stack status * Allow filtering clouds on command line * Fix docstring of resource\_id parameter in resource module * Fix server action resource call * Munge region\_name to '' if set to None * Fix some README typos * Correct response value in resource unittests * Fix token\_endpoint usage * Raise not found error if stack is deleted when find\_stack * Add Receiver resource to cluster service * remove python 2.6 os-client-config classifier * Add Subnet Pool CRUD * remove python 2.6 trove classifier * Fix shade tests with OCC 1.13.0 * If cloud doesn't list regions expand passed name * No Mutable Defaults * Add Quota RUD and missing properties * Add 'resize' action to cluster * Add option to enable HTTP tracing * Fix glance endpoints with endpoint\_override * Allow passing in explicit version for legacy\_client * Pass endpoint override to constructors * Return None when getting an attr which is None when using resource.prop() * Support backwards compat for \_ args * Add backwards compat mapping for auth-token * Add support for querying role assignments * Add Network mtu and port\_security\_enabled properties * Replace assertEqual(None, \*) with assertIsNone in tests * Support block\_store types where IDs are taken * Remove requests from requirements * cluster: Use typed props instead of \*\_id * Add inventory unit tests * Updated from global requirements * Rename key\_management to key\_manager * Replace 'value' arguments in telemetry proxy * Add Port port\_security\_enabled property * Replace 'value' arguments in orchestration proxy * Replace 'value' arguments in object\_store proxy * Replace 'value' arguments in network proxy 0.7.3 ----- * Replace 'value' arguments in key\_management proxies * Replace 'value' arguments in image proxies * Allow arbitrary client-specific options * Fix server deletes when cinder isn't available * Pedantic spelling correction * Fix exceptions to catch for ignore\_missing * Bug fix: create\_stack() fails when waiting * Updated from global requirements * Stack API improvements * Add admonition to telemetry code * Bug fix: delete\_object() returns True/False * Add Router ha, distributed and routes properties * Fix "report a bug" launchpad project * Add wait support for ironic node [de]activate * Add PolicyType resource for clustering * Add 'ProfileType' resource for senlin * block\_store and cluster: replace 'value' arguments * Add cluster-policy binding resource to Senlin * Skip orchestration functional tests * Replace 'value' arguments in identity proxies * Replace 'value' arguments in database proxy * Replace 'value' arguments in compute proxy 0.7.2 ----- * Update doc link in README * Remove oslosphinx * Improve test coverage: container/object list API * Make a new swift client prior to each image upload * Improve test coverage: volume attach/detach API * Skip broken functional tests * Add ceilometer constructor to known constructors * Delete key pair and server for Compute example * Fix 400 error in compute examples * Fix post test hook script * Remove the Metric proxy * Remove an extra dangling doc reference to CDN * Bug fix: Allow name update for domains * Improve test coverage: network delete API * Bug fix: Fix pass thru filtering in list\_networks * Consider 'in-use' a non-pending volume for caching * Remove incomplete CDN code * Improve test coverage: private extension API * Improve test coverage: hypervisor list * Fix failing compute example * Use reno for release notes * Add support for generalized per-region settings * Fix a README typo - hepler is not actually a thing * Make client constructor optional * Updated README to clarify legacy client usage * Add simple helper function for client construction * Add method for registering argparse options * Updated from global requirements * Update vexxhost to Identity v3 * Updated from global requirements * Add identity user guide * Doc: Add instructions for creating cloud.yaml * Improve test coverage: list\_router\_interfaces API * Change the client imports to stop shadowing * Use non-versioned cinderclient constructor * Replace stackforge with openstack * Improve test coverage: server secgroup API * Improve test coverage: container API * Make sure that cloud always has a name * Add BuildInfo resource to cluster service * Updated from global requirements * Improve test coverage: project API * Improve test coverage: user API * Provide a better comment for the object short-circuit * Add network user guide * Remove cinderclient version pin * Add functional tests for boot from volume * Remove optional keystoneauth1 imports * Enable running tests against RAX and IBM * Don't double-print exception subjects * Accept objects in name\_or\_id parameter * Add authorize method to Connection * Avoid Pool object creating in pool\_member functional calls * Fix cluster action api invocations * Normalize volume objects * Add rebuild\_server function call * Replace 'MagicMock' with 'Mock' * Fix argument sequences for boot from volume * Updated from global requirements * Trivial: Fix a typo in resource.py * Add server resize function calls * Make nova server\_interface function calls work * Fix typo in action test case * Add event resource for senlin(cluster) service * Remove missing capability * Remove some dead exception types * Fix senlin update verb * Replace 'MagicMock' with 'Mock' * Publicize the \_convert\_id call of Resource class * Try running examples tests on gate * Add documentation for testing examples * Make delete\_server() return True/False * Add BHS1 to OVH * Adjust conditions when enable\_snat is specified * Only log errors in exceptions on demand * Fix resource leak in test\_compute * Clean up compute functional tests * Cleanup doc references to past modules * Use consistent argument names for find proxies * Handle cinder v2 * find\_security\_group\_rule does not find by name * Stop using nova client in test\_compute * Updates doc enviro to use OpenStack Docs theme * Retry API calls if they get a Retryable failure 0.7.1 ----- * Fix call to shade\_exceptions in update\_project * Set "password" as default auth plugin * Add test for os\_volume Ansible module * Add find support to BaseProxy * Fix for min\_disk/min\_ram in create\_image API * Add test for os\_image Ansible module * Add support for secure.yaml file for auth info * Fix warnings.filterwarnings call * boot-from-volume and network params for server create * Do not send 'router:external' unless it is set * Add test for os\_port Ansible module * Allow specifying cloud name to ansible tests 0.7.0 ----- * Fix a 60 second unit test * Make sure timeouts are floats * Remove default values from innner method * Bump os-client-config requirement * Do not allow security group rule update * Fix lack of parenthesis around boolean logic * Keystone auth integration * Only pass timeout to swift if we have a value * Refactor os-client-config usage in from\_config * Updated from global requirements * Updated from global requirements * Add test for os\_user\_group Ansible module * Add user group assignment API * Add test for os\_user Ansible module * Add test for os\_nova\_flavor Ansible module * Stop using uuid in functional tests * Make functional object tests actually run * Fix name of the object-store api key * Refactor per-service key making * Add Ansible object role * Fix for create\_object * Add support for legacy envvar prefixes * Four minor fixes that make debugging better * Add new context manager for shade exceptions, final * Add ability to selectively run ansible tests * Add Ansible testing infrastructure * Create Key Pair * Fix JSON schema * Add new context manager for shade exceptions, cont. again * Pull server list cache setting via API * Plumb fixed\_address through add\_ips\_to\_server * Workaround a dispute between osc and neutronclient * Workaround for int value with verbose\_level * Support ignore\_missing in find\_pool\_member method * Remove unneeded workaround for ksc * Add default API version for magnum service * Let os-client-config handle session creation * Remove designate support * Remove test reference to api\_versions * Update dated project methods * Fix incorrect variable name * Add CRUD methods for keystone groups * Adjust image v1 to use upload instead of create * Adjust object\_store to use upload/download names * Work around a bug in keystoneclient constructor * Return cache settings as numbers not strings * Add method to get a mounted session from config * Bump ironicclient depend * Make sure cache expiration time is an int * Convert floats to string * Add new context manager for shade exceptions, cont * Don't assume pass\_version\_arg=False for network * Update network api version in defaults.json * Dont turn bools into strings * Use requestsexceptions for urllib squelching * Use the requestsexceptions library * Don't warn on configured insecure certs * Normalize domain data * Normalization methods should return Munch * Fix keystone domain searching * Normalize int config values to string * Fix server.action does not work * Remove the example code that mimics a CLI * Add new context manager for shade exceptions * teach shade how to list\_hypervisors * Update ansible router playbook * Disable spurious urllib warnings * Add logging module support * Add methods for getting Session and Client objects * Update conoha's vendor profile to include SJC * Use json for in-tree cloud data * Stop calling obj\_to\_dict everwhere * Always return a munch from Tasks * Make raw-requests calls behave like client calls * Minor logging improvements * Updated from global requirements * Update auro to indicate move to neutron * Copy values in backwards\_interface differently * Remove another extraneous get for create\_server * Don't wrap wrapped exception in create\_server * Skip an extra unneeded server get * Fix typo in Catalyst region configs * A better create server example * Don't wrap wrapped exceptions in operatorcloud.py * Add docs for create\_server * Update README to not reference client passthrough * Move ironic client attribute to correct class * Move \_neutron\_exceptions context manager to \_utils * Fix misspelling of ironic state name * Timeout too aggressive for inspection tests * Split out OpenStackCloud and OperatorCloud classes * Adds volume snapshot functionality to shade * Fix the return values of create and delete volume * Remove removal of jenkins clouds.yaml * Consume /etc/openstack/clouds.yaml * Add logic to support baremetal inspection * node\_set\_provision\_state wait/timeout support * Add warning suppression for keystoneauth loggers * Suppress Rackspace SAN warnings again * Aligned a few words in the docs * Sort vendor list * Add conoha public cloud * Allow for templated variables in auth\_url * Use assertDictEqual to test dict equality * return additional detail about servers * expand security groups in get\_hostvars\_from\_server * Always pull regions from vendor profiles * add list\_server\_security\_groups method * Add swift object and container list functionality * Translate task name in log message always * Add debug logging to iterate timeout * Change the fallback on server wait to 2 seconds * Add entry for James Blair to .mailmap * handle routers without an external gateway in list\_router\_interfaces * Support to Profile resource for cluster service * Add node resource for cluster service * Fix projects list/search/get interface * Remove unused parameter from create\_stack * Move valid\_kwargs decorator to \_utils * Add heat support * Abstract out the name of the name key * Add heatclient support * Use OCC to create clouds in inventory * Add action resource for cluster service * novaclient 2.32.0 does not work against rackspace * Add policy resource for cluster service * Support private address override in inventory * Normalize user information * Set cache information from clouds.yaml * Make designate record methods private for now * Fix typos in docstrings: * s/stackforge/openstack/ * Rely on devstack for clouds.yaml * Rename identity\_domain to domain * Rename designate domains to zones * Replace Bunch with compatible fork Munch * Make a few IP methods private * Update .gitreview for new namespace * Push filtering down into neutron * Clean up cache interface, add support for services * Make floating IP func tests less racey * Make router func tests less racey * Create neutron floating ips with server info * Undecorate cache decorated methods on null cache * Tweak create\_server to use list\_servers cache * Add Rackspace LON region * Add API method to list router interfaces * Handle list\_servers caching more directly * Split the nova server active check out * Pass wait to add\_ips\_to\_server * Fix floating ip removal on delete server * Document filters for get methods * Add some more docstrings * Validate requested region against region list * Fix documentation around regions * Add an API reference to the docs * Pass OpenStackConfig in to CloudConfig for caches * Remove shared=False from get\_internal\_network * Make attach\_instance return updated volume object * Tell git to ignore .eggs directory * Align users with list/search/get interface * Add script to document deleting private networks * Add region resource to identity service * Add create/delete for keystone roles * Accept and emit union of keystone v2/v3 service * Use keystone v3 service type argument * Add auth hook for OpenStackClient * Add get/list/search methods for identity roles * Add methods to update internal router interfaces * Add get\_server\_by\_id optmization * Add option to floating ip creation to not reuse * Adds some lines to complete table formatting * Provide option to delete floating IP with server * Update python-troveclient requirement * Add a private method for nodepool server vars * Update required ironicclient version * Split get\_hostvars\_from\_server into two * Invalidate image cache everytime we make a change * Use the ipaddress library for ip calculations * Optimize network finding * Fix create\_image\_snapshot * Add universal=1 to setup.cfg to build python 3 wheels * Return IPv6 address for interface\_ip on request * Plumb wait and timout down to add\_auto\_ip * Pass parameters correctly for image snapshots * Fix mis-named has\_service entry * Provide shortcut around has\_service * Provide short-circuit for finding server networks * Update fake to match latest OCC * Some cleanup * The Compute User Guide * Fix two typos * Put in override for Rackspace broken neutron * Support passing force\_ipv4 to the constructor * identity version is 2.0 * Dont throw exception on missing service * Handle OS\_CLOUD and OS\_REGION\_NAME friendly-like * Server functional test - image and flavor * Added SWITCHengines vendor file * Add functional test for private\_v4 * Attempt to use glanceclient strip\_version * Fix baremetal port deletion * Add router ansible test and update network role * Trap exceptions in helper functions * Add more info to some exceptions * Allow more complex router updates * Allow more complex router creation * Allow creating externally accessible networks * Handle glance v1 and v2 difference with is\_public * Get defaults for image type from occ * Use the get\_auth function from occ * update RST for readme so pypi looks pretty * Add a NullHandler to all of our loggers * Remove many redundant debug logs * Fix a little error with the None auth type * Add support to stack update * Make inner\_exception a private member * Add support for Catalyst as vendor * Just do the error logging in the base exception * Store the inner exception when creating an OSCException * Start using keystoneauth for keystone sessions * Change ignore-errors to ignore\_errors * Updated from global requirements * Change ignore-errors to ignore\_errors * Handle ksa opt with no deprecated field * Fall back to keystoneclient arg processing * Fix typo in ovh region names * Move plugin loader creation to try block * Convert auth kwargs '-' to '\_' * Properly handle os- prefixed args in fix\_args * Test kwargs passing not just argparse * Allow configuring domain id once * Add internap to the vendor list * Fix typo in comment - we use ksa not ksc * Defer plugin validation to keystoneauth * Remove an extra line * Add Datacentred to the vendor list * Add ultimum to list of vendors * Add Enter Cloud Suite to vendors list * Add elastx to vendor support matrix * Switch the image default to v2 * Update auro auth\_url and region information * Add citycloud to the vendors list * Return keystoneauth plugins based on auth args * Move keystone to common identity client interface * Remove duplicate lines that are the same as default * Add default version number for heat * Bump the default API version for python-ironicclient * Update OVH public cloud information * Do not use name attribute for path argument * Copy attributes in resource constructor * Don't use positional for keypair loaded * Avoid 2.27.0 of novaclient * Handle empty defaults.yaml file * unregister\_machine blocking logic * Fix exception lists in functional tests * Migrate neutron to the common client interface * Remove last vestige of glanceclient being different * Pass timeout to session, not constructors * Delete floating ip by ID instead of name * Move glanceclient to new common interface * Add tox targets for functional testing on 2 and 3 * Fix find available floating IP test * Image import * Updated from global requirements * add scheduler\_hints support for server creation * Make Resource.find more generically applicable * Get rid of example command line options * Delete transport test test\_debug\_post * Remove unecessary parameters to resource methods * Get url for object store object in the normal way * Fix set resource property id attribute * Fix resource property id * Fix image v2 member base\_path * Add object store object functional tests * Object store get sending bad headers * Remove the ips method from server.py * Addition of shade unregister\_machine timeout * More Pythonic Connection example usage * Move service filter out of auth 0.6.2 ----- * Get container off of an Object if its passed * Rename userguides to guides * Add a functional test for find\_extension * Initial support for ironic enroll state * Make sure there is data for the meter test * Fix find\_extension for Network and Compute proxies * Properly pass on Container in path\_args * Do not treat project\_name and project\_id the same * Remove spaces around data in transport debug print * Rename extensions to plugins * Remove redundant connection tests * Move TestTransportBase out of base * Improve the from\_config doc * Remove connection CRUD methods * Add flavor access API * Make client constructor calls consistent * Revert "Revert "Use the correct auth\_plugin for token authentication"" * Only log text strings in requests * Updated from global requirements * Change functional testing to use clouds.yaml * Updated from global requirements * Revert "Use the correct auth\_plugin for token authentication" * Add a developer coding standards doc * Ignore infra CI env vars * Fix from\_config argument * Use the correct auth\_plugin for token authentication * Updated from global requirements * Add flavor functional tests * Bug fix for obj\_to\_dict() * Add log message for when IP addresses fail * Add methods to set and unset flavor extra specs 0.6.1 ----- * Fixed problem with service name in comments * Listing flavors should pull all flavors * Be consistent with accessing server dict * Throw an exception on a server without an IP * Be smarter finding private IP * Clarify future changes in docs * Fix KeyError when server's response doesn't contain resource key * Align to generic password auth-type * Change visibility to interface * Fix call to get\_interface * Add functional tests for compute limits * Fixes a typo in test name * Changes in the new marker, initialise new marker to aviod bug 0.6.0 ----- * Remove meta.get\_server\_public\_ip() function * Document create\_object * Remove unused server functions * Fix two typos and one readablity on shade documentation * Clean up services in profile * Pass socket timeout to swiftclient * Process config options via os-client-config * Update ansible subnet test * Fix test\_object.py test class name * Claim no messages correctly * Fix for swift servers older than 1.11.0 * Clarify floating ip use for vendors * Add per-service endpoint overrides * Use disable\_vendor\_agent flags in create\_image * Use os-client-config SSL arg processing * Correctly pass the server ID to add\_ip\_from\_pool * Add functional tests for telemetry sample * Add configuration function using os-client-config * Updated from global requirements * Add initial designate read-only operations * Add functional tests for telemetry meter * Fix key management proxy docs * add .eggs to .gitignore * Add wait for delete method * Always use a fixed address when attaching a floating IP to a server * Fix spelling in proxy * Added functional tests for compute image API * Drop py33 support * Remove requirements.txt from tox.ini * Remove requirements.txt from tox.ini * Updated from global requirements * Update mock requirements * Catch leaky exceptions from create\_image() * Add missing docstrings * Dynamically load services * Remove py26 and py33 from tox.ini * Rename 'endpoint\_type' to 'interface' * Have service name default to None * Add flavor admin support * Remove region list from single cloud * Clean up document warnings * Fix debug logging lines * Split account/container metadata to own resources * Change auth plugin names to match KSA * Account for Error 396 on Rackspace * Updated from global requirements * Fix small error in README.rst * Generallize example so it can be modified easily * Fix set\_default() when used before config init * Fix logger name for examples * Allow use of admin tokens in keystone * Add query params to all the proxy list calls * Remove limit/marker from object\_store proxy * Updated from global requirements * Remove thin interface * Convert list and find to use params parameter * Add ignore\_missing to proxy find * Fix identity domain methods * Update ansible module playbooks * Rework how we get domains * Argument consistency in test\_proxy\_base * Log reauth * Specify the config file with environment variable * Add support for configuring region lists with yaml * Fix "Bad floatingip request" when multiple fixed IPs are present * Add docstrings for database resources * Add or change timestamp suffixes to "\_at" * Remove unnecessary None handling * Add Ansible module test for subnet * Add Ansible module test for networks * Fix rendering issue in Readme * Add a testing framework for the Ansible modules * Some updates to object\_store user guide * Support project/tenant and domain vs. None * Add CRUD methods for Keystone domains * Don't allow users to set all API versions the same * Raise exception for nova egress secgroup rule * Add docstrings to key\_management resources * Add docstrings for Metric resources * Rename keystore key-management * Fix cacert for tests and examples * Modify secgroup rule processing * Have resource find use get if possible * Updated from global requirements * Check results of find before returning * Move object\_store functional tests to proper name * Make sure we are returning floating IPs in current domain * Correctly name the functional TestImage class * Include examples in toctree * Change docs links to generic format * Add the pbr generated changelog to the docs * Locking ironic API microversion * Add Neutron/Nova Floating IP tests * Refactor verify\_get tests * Add docstrings to telemetry resources * Add docs for Image v1 and v2 resources * Add orchestration resource docs * Adding SSL arguments to glance client * Clean up vendor data * Add support for indicating preference for IPv6 * Use Message.existing() to create existing messages * Add normal list params to list method for telemetry statistics * Fix SSL verify/insecure translation * Add functional tests for telemetry statistics * Add functional tests for telemetry alarm\_change * Add functional tests for telemetry alarm crud * Add functional tests for telementry resource * Set sys.stdout for logging for examples and tests * Remove list\_keypair\_dicts method * Do not use environment for Swift unit tests * Add Neutron/Nova Floating IP attach/detach * Fix available\_floating\_ip when using Nova network * Skip Swift functional tests if needed * Fix AttributeError in keystone functional tests * Update keypair APIs to latest standards * Remove namespace from network ext test * Add Neutron/Nova Floating IP delete (i.e. deallocate from project) * Add Neutron/Nova Floating IP create (i.e. allocate to project) * Docs for logging * More selective logging * Convert ironicclient node.update() call to Task * Convert ironicclient node.get() call to Task * Move TestShadeOperator in a separate file * Fix intermittent error in unit tests * Pin cinderclient * Normalize project\_name aliases * Add comment explaining why finding an IP is hard * Add IPv6 to the server information too * Use accessIPv4 and accessIPv6 if they're there * Add Neutron/Nova Floating IP list/search/get * Catch all exceptions around port for ip finding * Centralize exception management for Neutron * Fix MD5 headers regression * Enable Orchestration in DevStack * Ensure that service values are strings * Pass token and endpoint to swift os\_options * Correct test\_quota functional test * Add path\_args to create and update proxy methods * Clean up a few more python-openstacksdk references * Move volume docs to block\_store * Add \_at suffix to created/updated Server attrs * Convert ironicclient node.validate() call to Task * Convert ironicclient node.list() call to Task * Refactor verify\_list tests * Return True/False for delete methods * Updated from global requirements * Return the entire response in an InvalidResponse * Rename volume to block\_store * Rename project to openstacksdk * Add some accessor methods to CloudConfig * Add delete method for security group rules * Add get\_server\_external\_ipv6() to meta * Refactor find\_nova\_addresses() * Replace get\_server\_public\_ip() with get\_server\_external\_ipv4() * Add get\_server\_external\_ipv4() to meta * Add more parameters to update\_port() * Improve documentation for create\_port() * Correct get\_machine\_by\_mac and add test * Add create method for secgroup rule * Add functional tests for update\_ip and find\_available\_ip * Coalesce port values in secgroup rules * Move \_utils unit testing to separate file * Updated from global requirements * Add funtcional tests for port * Rename clustering to cluster service * Switch put\_update to patch\_update * Add functional tests for floating IP * Add pool\_id param for pool\_member related proxy methods * Updated from global requirements * Fix missing doc on identity v2 * Add Heat resource support * Convert telemetry capability list to generator * Fix vpn service docstring error * Use ostestr for tests * Fix missing doc on identity v3 * Add functional tests for telemetry capabiliities * Updated from global requirements * Use very long functional test linger * Increase time we linger waiting for delete * Fix functional test gate * Fix create proxy issue * Support variations of external network attribute * Move Server.created to created\_at * Compare message data outside of assert\_called\_with * Add action() and check() method for heat support * Add secgroup update API * Add missing tests * Add very initial support for passing in occ object * Add test to check cert and key as a tuple * Don't emit volume tracebacks in inventory debug * Return new secgroup object * Add functional tests for security group rule * Add functional tests for add & remove router interface * Use one yaml file per vendor * Add functional test for Network Quota * Raise warning when a vendor profile is missing * Some cleanup in the README.rst * Allow create\_object to create objects * Refactor verify\_delete in proxy tests * Add support for OVH Public Cloud * Refactor verify\_create in proxy tests * Add SSL documentation to README.rst * Port ironic client port.get\_by\_address() to a Task * Port ironic client port.get() to a Task * Add inventory command to shade * Extract logging config into a helper function * Refactor verify\_update in proxy tests * Move stray metric test under proper directory * Stringify project details * Raise a warning with conflicting SSL params * Change references of "clouds.yaml" for real file * Add create method for security groups * Add delete method for security groups * Switch to SwiftService for segmented uploads * Add support to get a SwiftService object * Add functional tests for servers * Add functional tests for security groups * Add functional tests for container metadata and delete * Claim messages and delete messages * Add cover/ folder to .gitignore * Raise a warning when using 'cloud' in config * Add cloud vendor files config in doc * Add network/v2 vpn service resource * Add 'to\_dict' operation to Resource class * Senlin cluster resource and unit tests * Add port resource methods * Split security group list operations * Add keystone endpoint resource methods * Add Keystone service resource methods * Rely on defaults being present * Consume os\_client\_config defaults as base defaults * Remove hacking select line * Provide a helper method to get requests ssl values * Add design for an object interface * Fix proxy docs * Port ironic client node.list\_ports() to a Task * Port ironic client port.list() to a Task * Split list filtering into \_utils * Add path\_args when invoking Resource.list() from proxy layer * Complete property definition in some lb resources * Cast nova server object to dict after refetch * Split iterate\_timeout into \_utils * Cleanup OperatorCloud doc errors/warnings * Add more defaults to our defaults file * Remove fingerprint as keypair name * Add docstring to heat stack resource * Create messages on a queue * Add comment for tox coverage failure * Move compute limits to new \_get API * Create clouds.yaml for functional tests * Change naming in vendor doc to match vendors.py * Add auro to list of known vendors * Add list of image params needed to disable agents * Added functional tests for subnet * Delete queue * Added functional tests for router * Fix proxy delete error * Rename messaging module to message * Update pbr version pins * Add set\_one\_cloud method * Add tests for get\_cloud\_names * Add flag to indicate handling of security groups * Don't normalize too deeply * Add tests for cloud config comparison * Metric resource docs framework * Keystore resource docs framework * Image resource docs framework * Add inequality method * Decorator for functional tests to check services * Add an equality method for CloudConfig * Capture the filename used for config * Normalize all keys down to \_ instead of - * Expose method for getting a list of cloud names * Set metadata headers on object create * Fix catalog error * Rename cloud to profile * Don't pass None as the cloud name * Initial commit for the Messaging service (Zaqar) * Always refresh glanceclient for tokens validity * Don't cache keystone tokens as KSC does it for us * Make sure glance image list actually runs in Tasks * Remove oslo incubator config file * Make caching work when cloud name is None * Accept intermediate path arguments at proxy * Removed fields=id\_attribute in find function * Handle novaclient exception in delete\_server wait * Minor changes to top level docs * Module loader docs * Orchestration resource docs * Identity resource doc framework * Add telemetry resource docs * Support PUT in Image v2 API * Add some examples documentation * Fix functional tests deletes * Add id\_attribute to base proxy calls * Remove pass from delete functional tests * Fix underline for docs * Add keypair functional tests * Remove some mentions to preferences from docs * Add requirements.txt file for readthedocs * Make ironic use the API version system * Correct the API base path of lbaas resources * Fix documentation warnings 0.5.0 ----- * Change example for preferences * Move from UserPreference to Profile * Update orchestration functional tests * Add proxy docs and empty user guides * Set OS\_CLOUD for functional tests * proxy find telemetry * proxy find orchestration * proxy find network * proxy find keystore * proxy image find * proxy find identity * proxy find database * AFT compute extension * AFT network network CRUD * AFT network * Enable occ cloud region for example * change hacking requirements and fix hacking problems * proxy find compute * Make images list paginated * Create a method to format urls * Identity list updates * proxy image lists * Proxy database lists * Proxy keystore lists * Proxy network lists * Fix telemetry proxy comment * Proxy metric lists * Proxy orchestration lists * Proxy lists telemetry * Support for verify option for examples and tests * Rename list\_flavors flavors in example * Functional tests use OS\_CLOUD environment variable * Fix flavor functional test * No headers in body for create and update * Fix proxy object get comment * Catch client exceptions during list ops * Replace ci.o.o links with docs.o.o/infra * Changes for get calls in image proxies * Changes for database proxy gets * Changes for compute proxy list calls * Fix comment error on gets and heads * Common head method for proxies * Common list method for proxies * Pass OS\_ variables through to functional tests * Remove httpretty * Changes for get calls in object\_store proxy * Orchestration proxy changes * Changes for get calls in volume proxy * Changes for get calls in telemetry proxy * Improve error message on auth\_plugin failure * Handle novaclient exceptions during delete\_server * Changes for get calls in network proxy * Changes for get calls in keystore proxy * Changes for get calls in identity proxies * Get changes for compute proxy * Basic object store container functional tests * Create base class for functional tests * Add floating IP pool resource methods * Proxy get method * Don't error on missing certs * Remove clouds.yaml from gitignore * Add clouds.yaml file for contributor testing docs * Activate the cdn stuff * Temporarily work around httpretty again * Change overriding defaults to kwarg * Stop leaking server objects * Add tests for OSC usage * Use fakes instead of mocks for data objects * Use appdirs for platform-independent locations * Add UnitedStack * Expose function to get defaults dict * Add default versions for trove and ironic * Sort defaults list for less conflicts * Only add fall through cloud as a fall through * Fix several small nits in network v2 proxy * Update images API for get/list/search interface * Rewrite extension checking methods * Update server API for get/list/search interface * Compute proxy update changes * Volume proxy create changes * Telemetry proxy create changes * Object Store proxy create changes * Network proxy create changes * Add the IPv6 subnet attributes * Updated from global requirements * Keystore proxy create changes * Image create proxy changes * Add flag to indicate where floating ips come from * Identity create proxy changes * Database create changes for proxy * Create changes for compute proxy * get\_one\_cloud should use auth merge * Also accept .yml as a suffix * Updated from global requirements * Proxy create method * Fix delete\_server when wait=True * Initial version of clustering service support * Return Bunch objects instead of plain dicts * Add os-client-config support for examples * Fix docs for volume proxy delete * Proxy update telemetry changes * Proxy update network changes * Proxy update keystore changes * Proxy update image changes * Proxy update identity changes * proxy update database changes * Switch tasks vs put on a boolean config flag * Enhance the OperatorCloud constructor * Convert node\_set\_provision\_state to task * Update recent Ironic exceptions * Enhance error message in update\_machine * Remove crufty lines from README * Rename get\_endpoint() to get\_session\_endpoint() * Update vendor support to reflect v2 non-task * Make warlock filtering match dict filtering * Fix exception re-raise during task execution for py34 * Add more tests for server metadata processing * Add thread sync points to Task * Add early service fail and active check method * Add a method for getting an endpoint * Raise a shade exception on broken volumes * Split exceptions into their own file * Add minor OperatorCloud documentation * Proxy update method * Allow for int or string ID comparisons * Add flag to trigger task interface for rackspace * Change ironic maintenance method to align with power method * Add Ironic machine power state pass-through * Update secgroup API for new get/list/search API * Remove references to v2\_0 from examples * Move network example into directory * Move keypair to standalone example * Synchronize global requirements * Fix functional tests to run against live clouds * Add functional tests for create\_image * Do not cache unsteady state images * Add tests and invalidation for glance v2 upload * Allow complex filtering with embedded dicts * Add proxy for trust operations * Move jenkins create and delete in their onw files * Call super in OpenStackCloudException * Add Ironic maintenance state pass-through * Add update\_machine method * Replace e.message with str(e) * Update flavor API for new get/list/search API * Add a docstring to the Task class * Remove REST links from inventory metadata * Have get\_image\_name return an image\_name * Add post hook file for a functional test gate * Move wait\_for\_status to resource module * Fix get\_hostvars\_from\_server for volume API update * Add test for create\_image with glance v1 * Explicitly request cloud name in test\_caching * Add test for caching in list\_images * Test flavor cache and add invalidation * Fix major update\_user issues * create\_user should return the user created * Test that deleting user invalidates user cache * Use new getters in update\_subnet and update\_router * Update volume API for new getters and dict retval * Search methods for networks, subnets and routers * Update unregister\_machine to use tasks * Invalidate user cache on user create * Apply delete changes to image proxies * Apply delete changes to keystore proxy * Apply delete changes to identity proxies * Apply delete changes to volume proxy * Apply telemetry delete change * Apply orchestration delete change * Apply object\_store delete changes * Apply network delete changes * Apply delete API changes * Update register\_machine to use tasks * Add test of OperatorCloud auth\_type=None * Allow name or ID for update\_router() * Allow name or ID for update\_subnet() * Add test for user\_cache * MonkeyPatch time.sleep in unit tests to avoid wait * Create stack * Updated from global requirements * Add more detail to method not supported exception * Add module name to repr string * Add patch\_machine method and operator unit test substrate * Wrap ironicclient methods that leak objects * Basic test for meta method obj\_list\_to\_dict * Change Ironic node lookups to support names * Add meta method obj\_list\_to\_dict * The first functional test * Document vendor support information * Reset cache default to 0 * Add test for invalidation after delete * Deprecate use of cache in list\_volumes * Invalidate volume list cache when creating * Make cache key generator ignore cache argument * Add get\_subnet() method * add .venv to gitignore * Move region\_names out of auth dict * Add runabove to vendors * Add image information to vexxhost account * Add API method update\_subnet() * Add API method delete\_subnet() * Add API method create\_subnet() * Add vexxhost * Add DreamCompute to vendors list * Allow overriding envvars as the name of the cloud * Put env vars into their own cloud config * Add keystoneclient to test-requirements * Actually put some content into our sphinx docs * Unsteady state in volume list should prevent cache * Test volume list caching * Allow passing config into shade.openstack\_cloud * Refactor caching to allow per-method invalidate * Add tests for caching * Rename auth\_plugin to auth\_type * Update os-client-config min version * Fix volume operations * Determine limit based on page size * Improve image.v2.tag * Proxy delete method * Add \_check\_resource to BaseProxy * Rework update\_attrs so dirty list always updated * Fix exception in update\_router() * Add API auto-generation based on docstrings * Fix docs nit - make it clear the arg is a string * Poll on the actual image showing up * Add delete\_image call * Skip passing in timeout to glance if it's not set * Add some unit test for create\_server * Migrate API calls to task management * Fix naming inconsistencies in rebuild\_server tests * identity/v3 trust resource * Add task management framework * Namespace caching per cloud * Allow for passing cache class in as a parameter * Make way for the functional tests * Add 'rebuild' to shade * Let router update to specify external gw net ID * Create swift container if it does not exist * Fix a use of in where it should be equality * Disable warnings about old Rackspace certificates * Add trust-id to command line arguments * Pass socket timeout to all of the Client objects * Add methods for logical router management * Add api-level timeout parameter * Update .gitreview for git section rename * Add a Proxy for the Volume service * Custom exception needs str representation * metric: add support for generic resources * Adjust httpretty inclusion * Add new \_verify to proxy base tests * Add ResourceNotFound exception * Updated from global requirements * Raise NotFoundException for 404s * Remove httpretty from resource tests * Remove httpretty from Transport tests * Start moving Transport tests off of httpretty * Add requests\_mock to test-requirements.txt * Add basic unit test for shade.openstack\_cloud * Small fixes found working on ansible modules * Disable dogpile.cache if cache\_interval is None * Add support for keystone projects * Fix up and document input parameters * Handle image name for boot from volume * Clean up race condition in functional tests * Remove unused utils module in auth tests * Make get\_id public * Change dogpile cache defaults * Add initial compute functional tests to Shade * Image v2 Proxy should inhert from BaseProxy * Get the ID of a single sub-resource * Avoid httpretty 0.8.8 because it is broken * Add missing equal sign to README * Remove repr calls from telemetry classes * Canonical request/response logging * Add cover to .gitignore * Make the keypair example a bit more robust * Delete a Stack * Convert example --data to use eval instead of json * Add cover to .gitignore * Fix jenkins name and floating ip * Add ServerDetail so list can include details * Set Flavor and Image resources on Server * Set put\_update for compute.v2.server.Server * Catch AttributeError in header with no alias * Set int type on several container headers * Move the network stuff out of the jenkins example * Fix compute proxy for server wait * Some convenience methods for security groups * Flesh out api version defaults * Set headers on object before creating/updating * Handle project\_name/tenant\_name in the auth dict * Remove py26 jobs * Remove CaseInsensitiveDict * Add two newlines to the ends of files * Rename auth\_plugin to auth\_type * Add ironic node deployment support * identity: use set() for valid\_options * identity: add missing tenant options to valid options * Add base for Proxy classes to inherit from * Fix assert order in test\_resource * Ensure that assert order is (expected, actual) * Remove transaction timestamp from container * Fix glossary and other 404s * metric: add archive policy support * metric: add support for metric resource * Align cert, key, cacert and verify with requests * Add methods to create and delete networks * Add behavior to enable ironic noauth mode * Add support for configuring dogpile.cache * Fix coverage report * Add more testing of vendor yaml loading * More comprehensive unit tests for os-client-config * Adjust paginate argument usage * Allow keystone validation bypass for noauth usage * Add basic unit test for config * Removed x-auth-token from obj.py * Fix bad links out of the index * Allow user to set a prop back to default * Reorder envlist to avoid the rm -fr .testrepository when running tox -epy34 * Make image processing work for v2 * Utilize dogpile.cache for caching * Add support for volume attach/detach * Do not allow to pass \*-cache on init * Import from v2 instead of v1\_1 * Remove id from put and patch requests * Add unit test for meta.get\_groups\_from\_server * Add unit tests for meta module 0.4.1 ----- * Send empty dict when no headers on create/update 0.4.0 ----- * Adjust long list handling for Flavor * Add ImageDetail for extra information * Fix comment and assert order * Add FlavorDetail for extra information * Use case insensitive dict for Resource attributes * Support listing non-paginated response bodies * Create header property * Convert user\_name to username * resync ksc auth plugins * omit 0.8.7 httpretty * Add a method to create image snapshots from nova * Return extra information for debugging on failures * Don't try to add an IP if there is one * Provide more accurate repr * Document compute/v2 resources * Fix the discoverable plugin with tokens * Increase Resource test coverage * Updated from global requirements * Fix up the limits documentation * Add the Volume resource for the Volume service * Add the Snapshot resource for the Volume service * Add the Type resource for the Volume service * add metric proxy and service * Move metric capabilities into a single Resource * Serialize Resource types before sending to server * Allow Resource attributes to be updated * Provide one resource for Compute v2 Limits * Adjust Container override methods * Mutate Resource via mapping instead of dict * Revamp README file * Add hasExtension method to check cloud capabilities * Create server requires \*Ref names during POST * Prefer storing prop values as their name * Don't compare images when image is None * Support Resource as a type for properties * Revert to KSC auth plugins * Add logging functionality to openstack.utils * Add API docs for network.v2 * Add Resource.name property * Bypass type conversion when setting default prop * telemetry: fix threshold rule in alarm to be dict * telemetry: add missing alarm property severity * Add service\_catalog property * telemetry: add support for Gnocchi capabilities * Introduce the Volume service * Remove unnecessary container creation * Make is\_object\_stale() a public method * Prefer dest value when option is depricated 0.3.2 ----- * Update README and setup text for PyPI * Allow region\_name to be None * Don't return the auth dict inside the loop * Make sure we're deep-copying the auth dict 0.3.1 ----- * Set Resource.page limit to None * Add Resource.from\_name * Add status\_code to HttpException * Provide a better default user-agent string * Fix broken object hashing * Adds some more swift operations * Adds get\_network() and list\_networks function * Get a stack * Build up contributor documentation section * Build up user documentation section * Fix the example on the Usage page * Fix telemetry resource paths * Add support for creating/deleting volumes * Remove version from path * Get auth token lazily * Reorganize existing documentation files * Convert the find method to use the page method rather than list * Add six to requirements * Remove iso8601 from dependencies * Rename floatingip to floating\_ip * Pass service\_name to nova\_client constructor * Create a neutron client * Port to use keystone sessions and auth plugins * Add consistent methods for returning dicts * Add get\_flavor method * Make get\_image return None * Allow examples.get call without data * Resource.find should not raise ResourceNotFound * Use the "iterate timeout" idiom from nodepool * Remove runtime depend on pbr * Provide Rackspace service\_name override * Working script to create a jenkins server * Add the capability for the user to get a page of data * Fix obj\_to\_dict type filtering * Server convenience methods wait and get IPs * Remove flake/pep8 ignores * Adds a method to get security group * Use the proper timeutils package name * Adjust some parameters and return types * Refactor auth plugin loading * Get rid of some useless code * Make better support of service versions * Fix RuntimeError on Python 3 while listing objects * Pull in improvements from nodepool * Remove positional args to create\_server * Don't include deleted images by default * Add image upload support * Refactor glance version call into method * Support uploading swift objects * Debug log any time we re-raise an exception * Start keeping default versions for all services * Support keystone auth plugins in a generic way * Replace defaults\_dict with scanning env vars * Correct auth\_plugin argument values * Better exception in Auth plugins * Implement Swift Proxy object and example * Build Resource from either existing object or id * Add image v2 proxy * Remove py26 support * Explain obj\_to\_dict * Fix python3 unittests * Complete the Resource class documentation * Updated from global requirements * Change meta info to be an Infra project * Fix flake8 errors and turn off hacking * Fix up copyright headers * Add image v2 tags * Add better caching around volumes * Updated from global requirements * Remove extra GET call when limit provided to list * Workflow documentation is now in infra-manual * Workflow documentation is now in infra-manual * Add object\_store resource documentation * Neutron apparently doesn't support PATCH * compute/v2 server metadata and server meta resouce * AttributeError trapped when it won't be raised * Prepare for documentation of Resources * Don't attempt to \_\_get\_\_ prop without instance * Reswizzle proxy tests * Corrections to readme * keystore proxy methods and tests * Support regionless/global services * Expand r\_id to resource\_id for more clarity * identity/v2 extension resource * identity version resource and version fixes * Correct Resource.id deleter property 0.2.1 ----- * Correct the namespace for Server resource 0.2.0 ----- * Updated from global requirements * Add members resource to image v2 api * Add image resource to v2 images api * Add image V2 service version * remove id\_only from keypair find * Add the ability to set a new default for props * Updated from global requirements * Add coverage-package-name to tox.ini for coverage * Updated from global requirements * Fixed a typo in a comment in the tests * Have prop attribute access return None by default * Get prop aliases through their type, not raw value * Add getting started steps for novices * Expand index toctree to two levels * Add details to HttpException string * Fixed a typo in a docstring * Compute proxy methods * Implement iterator paging * Create a discoverable plugin * Sample thin interface * Rename keypairs to keypair to be more consistent * keystore/v1 container resource * keystore/v1 order resource * Add keystore service and secret resource * Minor docs updates to index, installation and usage * Use project name to retrieve version info * Initial "Getting Started" guide * Identity v2 proxy methods * Identity v3 proxy methods * Telemetry proxy methods * Orchestration proxy methods * Network proxy methods * Image proxy methods * Database proxy methods * base class for proxy tests * Use yaml.safe\_load instead of load * Updated from global requirements * Throw error if a non-existent cloud is requested * Properly parse the keypair response and workaround issues * Have resource CRUD return self to simplify the proxies * The fixed ip format on the port may actually be an array * Add resource CRUD to connection class * Add an example for the connection class * move examples to code use preference docs * Move examples to code session docs * Move examples service filter to code * Move transport examples to code * Support boot from volume * Make get\_image work on name or id * Create a method to handle the set\_\* method logic * Fixed a number of typos * Updated from global requirements * High level interface * Fix a missed argument from a previous refactor * Add some additional server meta munging * identity v3 docs * class linke for v2 * resource autodocs * update :class references * identity v2 docs * auth plugin identity base docs * base auth plugin docs * Move the examples docs into the code * remove pointless test * Map CloudConfig attributes to CloudConfig.config * service filter docs * fix identity service comment * User preference docs * Add connection documentation * Convert transport docs to autodoc * Convert the session object to autodoc * Change configuration for sphinx autodoc * Reverse order of tests to avoid incompatibility * Support injecting mount-point meta info * Add ability to extract a list of versions * Allow user to select different visibilities * Add user preference and example CLI to build it * Fix the v2 auth plugin to handle empty token * Move ironic node create/delete logic into shade * Refactor ironic commands into OperatorCloud class * fix typo in create\_server * Don't die if we didn't grab a floating ip * Process flavor and image names * Stop prefixing values with slugify * Don't access object members on a None * Make all of the compute logic work * Handle booleans that are strings in APIs * Add delete and get server name * Fixed up a bunch of flake8 warnings * Add in server metadata routines * Introduce the connection class * Add support for argparse Namespace objects * Add support for command line argument processing * Plumb through a small name change for args * Updated from global requirements * Consume project\_name from os-client-config * Handle lack of username for project\_name defaults * Handle the project/tenant nonesense more cleanly * add Ironic client * Add cache control settings * Handle no vendor clouds config files * Remove unused class method get\_services * Apply id\_attribute to Ceilometer Meters * Remove extraneous vim editor configuration comments 0.1.0.dev20141008 ----------------- * Update README requirements * Updated from global requirements * Use the now graduated oslo.utils * Make user\_id a higher priority for v2 auth * Use stevedore to load authorization plugins * Prepare for auth plugins * Use meter\_name as id for statistics * compute/v2 limits\_absolute resource * Determines version from auth\_url when not explicit * Updates to use keystone session * Add clouds-public.yaml * Add ability to find an available floating ip * Add support for Samples coming from Ceilometer * Add support for telemetry sample Statistics * Prep for move to stackforge * Handle missing vendor key * Make env vars lowest priority * Handle null region * Discover Trove API version * Update the README file for more completeness * Offload config to the os-client-config library * Get rid of extra complexity with service values * Remove babel and add pyyaml * Port in config reading from shade * Initial Cookiecutter Commit * Floating ip does not have an id * Fix find for resources with id\_attribute * Add find command to the examples * identity/v3 user resource * Updated from global requirements * Add database users for instances * Apply id\_attribute throughout resources * compute/v2 keypairs resource * Add databases to instances * Move cacert/insecure awkwardness examples/common * Updated from global requirements * Add docs environement to testing interface * identity/v3 policy resource * identity/v3 domain resource * identity/v3 project resource * identity/v3 credential resource * identity/v3 group resource * identity/v3 endpoint resource * identity/v3 service resource * Add \_\_init\_\_ files to identity service * Add example code to README * Add volumes and config file parsing * Change example so CLI names match object arguments * Remove unused os-url option * Fix log invocations * Adding database flavor support * Fixing path to generated documentation * Implement the rest of Swift containers and objects * Work toward Python 3.4 support and testing * Sync up with latest keystoneclient changes * Allow headers to be retreived with GET bodies * Remove some extra lines from the README * Add the initial library code * Initial cookiecutter repo * Add domains for v3 authentication * identity/v2 role resource * network/v2 pool\_member resource * Fix the example authenticate * compute/v2 limits\_rate resource * Add example update * Add id\_attribute and resource\_name to resource * Adding alarm history to telemetry * Introduces example of running a method on a resource * Add the Alarm resource to telemetry * Updated from global requirements * compute/v2 server\_interface resource * Fix os\_region in example session * orchestration version resource * orchestration/v1 stack resource * Change OS\_REGION to OS\_REGION\_NAME * Change the capabilities to capability * Publicize resource\_id property * Add Meter resource to telemetry * Add the Resource resource to telemetry * Exception has wrong string for list * Server IP resource * Various standard server actions * compute/v2 server resource * identity/v2 tenant resource * identity/v2 user resource * Updated from global requirements * Fixes for telemetry * database/v1.0 instance resource * Add support for Swift containers * compute/v2 image resource * compute/ version resource * compute/v2 flavor resource * Introducing telemetry service * compute/v2 extension resource * network/v2 network resource * Add some factories * network/v2 security\_group\_rule resource * network/v2 quota resource * Add support for interface add/remove to routers * network/v2 metering\_label\_rule resource * network/v2 port resource * network/v2 load balancer healthmonitor resource * network/v2 subnet resource * network/v2 load balancer pool resource * network/v2 loadbalancer resource * network version resource * network/v2 security\_group resource * Have examples handle snake case to camel case * network/v2 load balancer listener resource * network/v2 floatingip resource * network/v2 extension resource * network/v2 metering\_label resource * network/v2 router resource * Separate head restrictions from get * Make logging more efficient in transport * Proper string formatting in exception messages * Minor fixes to examples * Full flavor CRUD * Don't join None values in utils.urljoin * Add example script for HEAD requests * Keep id in \_attrs * Add find method to resource * H405 activate * Add parameters to the list for filtering * Have exceptions print something by default * Make these comments H405 compliant * Allow --data option to pass UUID instead of json * Add some comments to the examples * Simple network resource * Add support for HEAD requests of resources * Change transport JSON handling * Fixed a small grammatical mistake in a docstring * Add example get * Add example delete * Example create command * Add common method to find a resource * The resource repr method should print id * Have the service catalog ignore empty urls * Add --data option to debug curl logging * Make version parsing in examples more intelligent * Important changes for service filtering * Very basic image resource * Updated from global requirements * json default for transport and resource \_\_repr\_\_ * Make the session command a little more friendly * Synced from global-requirements * Example session command * Remove a now unused flake8 exclude directory * Example code reorg and auth examples * Removed two flake8 skips * Sync hacking requirement with global requirements * Important auth fixes * Capitalize SDK more reasonably in an exception name * Move MethodNotSupported exception to exceptions * HttpException should be derived from SdkException * Some docs for the session object * Get rid of base\_url from transport * Rearrange session arguments * Clean up transport stuff out of the resource class * Resolve Ed's concerns on README * Fleshed out the README and removed dependency on babel * Removed now unnecesary workaround for PyPy * Comment in middle of glossary messes it up * Authentication from keystoneclient * Add command structure to example code * Update sphinx from global-requirements * Wrap lines at the appropriate length and use native sphinx constructs * Reorganize the index a bit to make the important content be at the top * Fix an innacuracy in an example in the docs * Fixed an emberassing typo * Added a makefile for sphinx * Converted the glossary to use native Sphinx markup * Mark openstacksdk as being a universal wheel * Add initial glossary * Add Transport doc * Resource Properties * Update the requirements * Finish transport renaming in the tests * Add some sample scripts * Add base resource class * Session layer with base authenticator * Add .venv to .gitignore * Docs cleanup * Rename session to transport * Add redirection handling to openstack.session.Session * Add requests.Session wrapper class * Fix temporary pypy gate issue with setuptools * Several stylistic fixes for the docs * Switch to oslosphinx * add newlines to end of requirements files * remove api\_strawman * reigster->register typo * Added support in the strawman for re-authentication * Initial new version with demonstration of clean implementation * Added sample directory layout for pystack * Remove locale overrides in tox * Fix misspellings in python openstacksdk * Finished the pystack strawman overview * Initial pystack strawman docs * Made tox -e pep8 passed. Also made git review work * setting up the initial layout; move the api proposals to api\_strawman * Added example code based on pystack * This should be a plural * Consolidate readmes * Initial blob of thoughts from me * Initial commit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/HACKING.rst0000664000175000017500000000361500000000000016032 0ustar00zuulzuul00000000000000openstacksdk Style Commandments =============================== Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ Indentation ----------- PEP-8 allows for 'visual' indentation. **Do not use it**. Visual indentation looks like this: .. code-block:: python return_value = self.some_method(arg1, arg1, arg3, arg4) Visual indentation makes refactoring the code base unnecessarily hard. Instead of visual indentation, use this: .. code-block:: python return_value = self.some_method( arg1, arg1, arg3, arg4) That way, if some_method ever needs to be renamed, the only line that needs to be touched is the line with some_method. Additionally, if you need to line break at the top of a block, please indent the continuation line an additional 4 spaces, like this: .. code-block:: python for val in self.some_method( arg1, arg1, arg3, arg4): self.do_something_awesome() Neither of these are 'mandated' by PEP-8. However, they are prevailing styles within this code base. Unit Tests ---------- Unit tests should be virtually instant. If a unit test takes more than 1 second to run, it is a bad unit test. Honestly, 1 second is too slow. All unit test classes should subclass `openstack.tests.unit.base.TestCase`. The base TestCase class takes care of properly creating `Connection` objects in a way that protects against local environment. Test cases should use requests-mock to mock out HTTP interactions rather than using mock to mock out object access. Don't Use setUpClass -------------------- setUpClass looks like it runs once for the class. In parallel test execution environments though, it runs once per execution context. This makes reasoning about when it is going to actually run and what is going to happen extremely difficult and can produce hard to debug test issues. Don't ever use it. It makes baby pandas cry. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/LICENSE0000664000175000017500000002363600000000000015246 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/MANIFEST.in0000664000175000017500000000013500000000000015764 0ustar00zuulzuul00000000000000include AUTHORS include ChangeLog exclude .gitignore exclude .gitreview global-exclude *.pyc././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.6254966 openstacksdk-4.0.0/PKG-INFO0000664000175000017500000003473700000000000015342 0ustar00zuulzuul00000000000000Metadata-Version: 1.2 Name: openstacksdk Version: 4.0.0 Summary: An SDK for building applications to work with OpenStack Home-page: https://docs.openstack.org/openstacksdk/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ============ openstacksdk ============ openstacksdk is a client library for building applications to work with OpenStack clouds. The project aims to provide a consistent and complete set of interactions with OpenStack's many services, along with complete documentation, examples, and tools. It also contains an abstraction interface layer. Clouds can do many things, but there are probably only about 10 of them that most people care about with any regularity. If you want to do complicated things, the per-service oriented portions of the SDK are for you. However, if what you want is to be able to write an application that talks to any OpenStack cloud regardless of configuration, then the Cloud Abstraction layer is for you. More information about the history of openstacksdk can be found at https://docs.openstack.org/openstacksdk/latest/contributor/history.html Getting started --------------- .. rubric:: Authentication and connection management openstacksdk aims to talk to any OpenStack cloud. To do this, it requires a configuration file. openstacksdk favours ``clouds.yaml`` files, but can also use environment variables. The ``clouds.yaml`` file should be provided by your cloud provider or deployment tooling. An example: .. code-block:: yaml clouds: mordred: region_name: Dallas auth: username: 'mordred' password: XXXXXXX project_name: 'demo' auth_url: 'https://identity.example.com' openstacksdk will look for ``clouds.yaml`` files in the following locations: * If set, the path indicated by the ``OS_CLIENT_CONFIG_FILE`` environment variable * ``.`` (the current directory) * ``$HOME/.config/openstack`` * ``/etc/openstack`` You can create a connection using the ``openstack.connect`` function. The cloud name can be either passed directly to this function or specified using the ``OS_CLOUD`` environment variable. If you don't have a ``clouds.yaml`` file and instead use environment variables for configuration then you can use the special ``envvars`` cloud name to load configuration from the environment. For example: .. code-block:: python import openstack # Initialize connection from a clouds.yaml by passing a cloud name conn_from_cloud_name = openstack.connect(cloud='mordred') # Initialize connection from a clouds.yaml using the OS_CLOUD envvar conn_from_os_cloud = openstack.connect() # Initialize connection from environment variables conn_from_env_vars = openstack.connect(cloud='envvars') .. note:: How this is all achieved is described in more detail `below `__. .. rubric:: The cloud layer openstacksdk consists of four layers which all build on top of each other. The highest level layer is the *cloud* layer. Cloud layer methods are available via the top level ``Connection`` object returned by ``openstack.connect``. For example: .. code-block:: python import openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # List the servers for server in conn.list_servers(): print(server.to_dict()) The cloud layer is based on logical operations that can potentially touch multiple services. The benefit of this layer is mostly seen in more complicated operations that take multiple steps and where the steps vary across providers. For example: .. code-block:: python import openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # Upload an image to the cloud image = conn.create_image( 'ubuntu-trusty', filename='ubuntu-trusty.qcow2', wait=True) # Find a flavor with at least 512M of RAM flavor = conn.get_flavor_by_ram(512) # Boot a server, wait for it to boot, and then do whatever is needed # to get a public IP address for it. conn.create_server( 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True) .. rubric:: The proxy layer The next layer is the *proxy* layer. Most users will make use of this layer. The proxy layer is service-specific, so methods will be available under service-specific connection attributes of the ``Connection`` object such as ``compute``, ``block_storage``, ``image`` etc. For example: .. code-block:: python import openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # List the servers for server in conn.compute.servers(): print(server.to_dict()) .. note:: A list of supported services is given `below `__. .. rubric:: The resource layer Below this there is the *resource* layer. This provides support for the basic CRUD operations supported by REST APIs and is the base building block for the other layers. You typically will not need to use this directly but it can be helpful for operations where you already have a ``Resource`` object to hand. For example: .. code-block:: python import openstack import openstack.config.loader import openstack.compute.v2.server # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # List the servers for server in openstack.compute.v2.server.Server.list(session=conn.compute): print(server.to_dict()) .. rubric:: The raw HTTP layer Finally, there is the *raw HTTP* layer. This exposes raw HTTP semantics and is effectively a wrapper around the `requests`__ API with added smarts to handle stuff like authentication and version management. As such, you can use the ``requests`` API methods you know and love, like ``get``, ``post`` and ``put``, and expect to receive a ``requests.Response`` object in response (unlike the other layers, which mostly all return objects that subclass ``openstack.resource.Resource``). Like the *resource* layer, you will typically not need to use this directly but it can be helpful to interact with APIs that have not or will not be supported by openstacksdk. For example: .. code-block:: python import openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # List servers for server in openstack.compute.get('/servers').json(): print(server) .. __: https://requests.readthedocs.io/en/latest/ .. _openstack.config: Configuration ------------- openstacksdk uses the ``openstack.config`` module to parse configuration. ``openstack.config`` will find cloud configuration for as few as one cloud and as many as you want to put in a config file. It will read environment variables and config files, and it also contains some vendor specific default values so that you don't have to know extra info to use OpenStack * If you have a config file, you will get the clouds listed in it * If you have environment variables, you will get a cloud named `envvars` * If you have neither, you will get a cloud named `defaults` with base defaults You can view the configuration identified by openstacksdk in your current environment by running ``openstack.config.loader``. For example: .. code-block:: bash $ python -m openstack.config.loader More information at https://docs.openstack.org/openstacksdk/latest/user/config/configuration.html .. _supported-services: Supported services ------------------ The following services are currently supported. A full list of all available OpenStack service can be found in the `Project Navigator`__. .. note:: Support here does not guarantee full-support for all APIs. It simply means some aspect of the project is supported. .. list-table:: Supported services :widths: 15 25 10 40 :header-rows: 1 * - Service - Description - Cloud Layer - Proxy & Resource Layer * - **Compute** - - - * - Nova - Compute - ✔ - ✔ (``openstack.compute``) * - **Hardware Lifecycle** - - - * - Ironic - Bare metal provisioning - ✔ - ✔ (``openstack.baremetal``, ``openstack.baremetal_introspection``) * - Cyborg - Lifecycle management of accelerators - ✔ - ✔ (``openstack.accelerator``) * - **Storage** - - - * - Cinder - Block storage - ✔ - ✔ (``openstack.block_storage``) * - Swift - Object store - ✔ - ✔ (``openstack.object_store``) * - Cinder - Shared filesystems - ✔ - ✔ (``openstack.shared_file_system``) * - **Networking** - - - * - Neutron - Networking - ✔ - ✔ (``openstack.network``) * - Octavia - Load balancing - ✔ - ✔ (``openstack.load_balancer``) * - Designate - DNS - ✔ - ✔ (``openstack.dns``) * - **Shared services** - - - * - Keystone - Identity - ✔ - ✔ (``openstack.identity``) * - Placement - Placement - ✔ - ✔ (``openstack.placement``) * - Glance - Image storage - ✔ - ✔ (``openstack.image``) * - Barbican - Key management - ✔ - ✔ (``openstack.key_manager``) * - **Workload provisioning** - - - * - Magnum - Container orchestration engine provisioning - ✔ - ✔ (``openstack.container_infrastructure_management``) * - **Orchestration** - - - * - Heat - Orchestration - ✔ - ✔ (``openstack.orchestration``) * - Senlin - Clustering - ✔ - ✔ (``openstack.clustering``) * - Mistral - Workflow - ✔ - ✔ (``openstack.workflow``) * - Zaqar - Messaging - ✔ - ✔ (``openstack.message``) * - **Application lifecycle** - - - * - Masakari - Instances high availability service - ✔ - ✔ (``openstack.instance_ha``) .. __: https://www.openstack.org/software/project-navigator/openstack-components#openstack-services Links ----- * `Issue Tracker `_ * `Code Review `_ * `Documentation `_ * `PyPI `_ * `Mailing list `_ * `Release Notes `_ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Requires-Python: >=3.8 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/README.rst0000664000175000017500000002502100000000000015716 0ustar00zuulzuul00000000000000============ openstacksdk ============ openstacksdk is a client library for building applications to work with OpenStack clouds. The project aims to provide a consistent and complete set of interactions with OpenStack's many services, along with complete documentation, examples, and tools. It also contains an abstraction interface layer. Clouds can do many things, but there are probably only about 10 of them that most people care about with any regularity. If you want to do complicated things, the per-service oriented portions of the SDK are for you. However, if what you want is to be able to write an application that talks to any OpenStack cloud regardless of configuration, then the Cloud Abstraction layer is for you. More information about the history of openstacksdk can be found at https://docs.openstack.org/openstacksdk/latest/contributor/history.html Getting started --------------- .. rubric:: Authentication and connection management openstacksdk aims to talk to any OpenStack cloud. To do this, it requires a configuration file. openstacksdk favours ``clouds.yaml`` files, but can also use environment variables. The ``clouds.yaml`` file should be provided by your cloud provider or deployment tooling. An example: .. code-block:: yaml clouds: mordred: region_name: Dallas auth: username: 'mordred' password: XXXXXXX project_name: 'demo' auth_url: 'https://identity.example.com' openstacksdk will look for ``clouds.yaml`` files in the following locations: * If set, the path indicated by the ``OS_CLIENT_CONFIG_FILE`` environment variable * ``.`` (the current directory) * ``$HOME/.config/openstack`` * ``/etc/openstack`` You can create a connection using the ``openstack.connect`` function. The cloud name can be either passed directly to this function or specified using the ``OS_CLOUD`` environment variable. If you don't have a ``clouds.yaml`` file and instead use environment variables for configuration then you can use the special ``envvars`` cloud name to load configuration from the environment. For example: .. code-block:: python import openstack # Initialize connection from a clouds.yaml by passing a cloud name conn_from_cloud_name = openstack.connect(cloud='mordred') # Initialize connection from a clouds.yaml using the OS_CLOUD envvar conn_from_os_cloud = openstack.connect() # Initialize connection from environment variables conn_from_env_vars = openstack.connect(cloud='envvars') .. note:: How this is all achieved is described in more detail `below `__. .. rubric:: The cloud layer openstacksdk consists of four layers which all build on top of each other. The highest level layer is the *cloud* layer. Cloud layer methods are available via the top level ``Connection`` object returned by ``openstack.connect``. For example: .. code-block:: python import openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # List the servers for server in conn.list_servers(): print(server.to_dict()) The cloud layer is based on logical operations that can potentially touch multiple services. The benefit of this layer is mostly seen in more complicated operations that take multiple steps and where the steps vary across providers. For example: .. code-block:: python import openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # Upload an image to the cloud image = conn.create_image( 'ubuntu-trusty', filename='ubuntu-trusty.qcow2', wait=True) # Find a flavor with at least 512M of RAM flavor = conn.get_flavor_by_ram(512) # Boot a server, wait for it to boot, and then do whatever is needed # to get a public IP address for it. conn.create_server( 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True) .. rubric:: The proxy layer The next layer is the *proxy* layer. Most users will make use of this layer. The proxy layer is service-specific, so methods will be available under service-specific connection attributes of the ``Connection`` object such as ``compute``, ``block_storage``, ``image`` etc. For example: .. code-block:: python import openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # List the servers for server in conn.compute.servers(): print(server.to_dict()) .. note:: A list of supported services is given `below `__. .. rubric:: The resource layer Below this there is the *resource* layer. This provides support for the basic CRUD operations supported by REST APIs and is the base building block for the other layers. You typically will not need to use this directly but it can be helpful for operations where you already have a ``Resource`` object to hand. For example: .. code-block:: python import openstack import openstack.config.loader import openstack.compute.v2.server # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # List the servers for server in openstack.compute.v2.server.Server.list(session=conn.compute): print(server.to_dict()) .. rubric:: The raw HTTP layer Finally, there is the *raw HTTP* layer. This exposes raw HTTP semantics and is effectively a wrapper around the `requests`__ API with added smarts to handle stuff like authentication and version management. As such, you can use the ``requests`` API methods you know and love, like ``get``, ``post`` and ``put``, and expect to receive a ``requests.Response`` object in response (unlike the other layers, which mostly all return objects that subclass ``openstack.resource.Resource``). Like the *resource* layer, you will typically not need to use this directly but it can be helpful to interact with APIs that have not or will not be supported by openstacksdk. For example: .. code-block:: python import openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # List servers for server in openstack.compute.get('/servers').json(): print(server) .. __: https://requests.readthedocs.io/en/latest/ .. _openstack.config: Configuration ------------- openstacksdk uses the ``openstack.config`` module to parse configuration. ``openstack.config`` will find cloud configuration for as few as one cloud and as many as you want to put in a config file. It will read environment variables and config files, and it also contains some vendor specific default values so that you don't have to know extra info to use OpenStack * If you have a config file, you will get the clouds listed in it * If you have environment variables, you will get a cloud named `envvars` * If you have neither, you will get a cloud named `defaults` with base defaults You can view the configuration identified by openstacksdk in your current environment by running ``openstack.config.loader``. For example: .. code-block:: bash $ python -m openstack.config.loader More information at https://docs.openstack.org/openstacksdk/latest/user/config/configuration.html .. _supported-services: Supported services ------------------ The following services are currently supported. A full list of all available OpenStack service can be found in the `Project Navigator`__. .. note:: Support here does not guarantee full-support for all APIs. It simply means some aspect of the project is supported. .. list-table:: Supported services :widths: 15 25 10 40 :header-rows: 1 * - Service - Description - Cloud Layer - Proxy & Resource Layer * - **Compute** - - - * - Nova - Compute - ✔ - ✔ (``openstack.compute``) * - **Hardware Lifecycle** - - - * - Ironic - Bare metal provisioning - ✔ - ✔ (``openstack.baremetal``, ``openstack.baremetal_introspection``) * - Cyborg - Lifecycle management of accelerators - ✔ - ✔ (``openstack.accelerator``) * - **Storage** - - - * - Cinder - Block storage - ✔ - ✔ (``openstack.block_storage``) * - Swift - Object store - ✔ - ✔ (``openstack.object_store``) * - Cinder - Shared filesystems - ✔ - ✔ (``openstack.shared_file_system``) * - **Networking** - - - * - Neutron - Networking - ✔ - ✔ (``openstack.network``) * - Octavia - Load balancing - ✔ - ✔ (``openstack.load_balancer``) * - Designate - DNS - ✔ - ✔ (``openstack.dns``) * - **Shared services** - - - * - Keystone - Identity - ✔ - ✔ (``openstack.identity``) * - Placement - Placement - ✔ - ✔ (``openstack.placement``) * - Glance - Image storage - ✔ - ✔ (``openstack.image``) * - Barbican - Key management - ✔ - ✔ (``openstack.key_manager``) * - **Workload provisioning** - - - * - Magnum - Container orchestration engine provisioning - ✔ - ✔ (``openstack.container_infrastructure_management``) * - **Orchestration** - - - * - Heat - Orchestration - ✔ - ✔ (``openstack.orchestration``) * - Senlin - Clustering - ✔ - ✔ (``openstack.clustering``) * - Mistral - Workflow - ✔ - ✔ (``openstack.workflow``) * - Zaqar - Messaging - ✔ - ✔ (``openstack.message``) * - **Application lifecycle** - - - * - Masakari - Instances high availability service - ✔ - ✔ (``openstack.instance_ha``) .. __: https://www.openstack.org/software/project-navigator/openstack-components#openstack-services Links ----- * `Issue Tracker `_ * `Code Review `_ * `Documentation `_ * `PyPI `_ * `Mailing list `_ * `Release Notes `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/SHADE-MERGE-TODO.rst0000664000175000017500000001521000000000000017204 0ustar00zuulzuul00000000000000Tasks Needed for rationalizing shade and openstacksdk ===================================================== A large portion of the important things have already been done and landed already. For reference, those are: * shade and os-client-config library content have been merged into the tree. * Use official service-type names from Service Types Authority via os-service-types to refer to services and proxies. * Automatically also add properties to the connection for every known alias for each service-type. * Made openstack.proxy.Proxy a subclass of keystoneauth1.adapter.Adapter. Removed local logic that duplicates keystoneauth logic. This means every proxy also has direct REST primitives available. For example: .. code-block:: python connection = connection.Connection() servers = connection.compute.servers() server_response = connection.compute.get('/servers') * Removed the Profile object in favor of openstack.config. * Removed the Session object in favor of using keystoneauth. * Plumbed Proxy use of Adapter through the Adapter subclass from shade that uses the TaskManager to run REST calls. * Finish migrating to Resource2 and Proxy2, rename them to Resource and Proxy. * Merge OpenStackCloud into Connection. This should result in being able to use the connection interact with the cloud using all three interfaces. For instance: .. code-block:: python conn = connection.Connection() servers = conn.list_servers() # High-level resource interface from shade servers = conn.compute.servers() # SDK Service/Object Interface response = conn.compute.get('/servers') # REST passthrough * Removed ServiceFilter and the various Service objects in favor of discovery. Next steps ========== * Maybe rename self.session and session parameter in all usage in proxy and resource to self.adapter. They are Adapters not Sessions, but that may not mean anything to people. * Migrate unit tests to requests-mock instead of mocking python calls to session. * Replace _prepare_request with requests.Session.prepare_request. shade integration ----------------- * Invent some terminology that is clear and makes sense to distinguish between the object interface that came originally from openstacksdk and the interface that came from shade. * Shift the shade interface methods to use the Object Interface for their operations. It's possible there may be cases where the REST layer needs to be used instead, but we should try to sort those out. * Investigate options and then make a plan as to whether shade methods should return SDK objects or return dicts/munches as they do today. Should we make Resource objects extend dict/munch so they can be used like the shade ones today? Or should we just have the external shade shim library get objects from the high-level SDK 'shade' interface and call to_dict() on them all? * Add support for shade expressing normalization model/contract into Resource, or for just leveraging what's in Resource for shade-layer normalization. * Make a plan for normalization supporting shade users continuing to get shade normalized resource Munch objects from shade API calls, sdk proxy/resource users getting SDK objects, and both of them being able to opt in to "strict" normalization at Connection constructor time. Perhaps making Resource subclass Munch would allow mixed use? Needs investigation. * Investigate auto-generating the bulk of shade's API based on introspection of SDK objects, leaving only the code with extra special logic in the shade layer. Service Proxies --------------- These are all things to think about. * Authenticate at Connection() creation time? Having done that, use the catalog in the token to determine which service proxies to add to the Connection object. * Filter the above service list from the token by has_service() from openstack.config. * Add a has_service method to Connection which will BASICALLY just be hasattr(self, 'service') - but will look nicer. * Consider adding magic to Connection for every service that a given cloud DOESN'T have that will throw an exception on any attribute access that is "cloud doesn't have service blah" rather than simply Attribute Not Found. The SDK has a python api regardless of the services remotely, it would be nice if trimming the existing attribute list wouldn't make it impossible for someone to validate their code correctness. It's also possible that instead of not having services, we always mount proxy objects for every service, but we mount a "NotFound" proxy for each service that isn't there. * Since openstacksdk uses version discovery now, there is always a good path to "the" version of a given service. However, a cloud may have more than one. Attach the discovered service proxy to connection as today under the service type name. Add a property to each service proxy for each version the SDK knows about. For instance: .. code-block:: python connection = openstack.Connection() connection.volume # openstack.volume.v3._proxy connection.volume.v2 # openstack.volume.v2._proxy connection.volume.v3 # openstack.volume.v3._proxy Those versioned proxies should be done as Adapters with min and max version set explicitly. This should allow a common pattern for people to write code that just wants to use the discovered or configured service, or who want to attempt to use a specific version of the API if they know what they're doing and at the very least wind up with a properly configured Adapter they can make rest calls on. Because: .. code-block:: python connection = openstack.Connection() connection.dns.v2.get('/zones') should always work on an OpenStack cloud with designate even if the SDK authors don't know anything about Designate and haven't added Resource or Proxy explicitly for it. * Decide what todo about non-OpenStack services. Do we add base Proxy properties to Connection for every service we find in the catalog regardless of official/non-official? If so, do we let someone pass a dict of service-type, Proxy to connection that would let the provide a local service we don't know about? If we do that- we should disallow passing in overrides for services we DO know about to discourage people writing local tools that have different Compute behavior, for instance. Microversions ------------- * keystoneauth.adapter.Adapter knows how to send microversion headers, and get_endpoint_data knows how to fetch supported ranges. As microversion support is added to calls, it needs to be on a per-request basis. This has implications to both Resource and Proxy, as cloud payloads for data mapping can be different on a per-microversion basis. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/babel.cfg0000664000175000017500000000002000000000000015745 0ustar00zuulzuul00000000000000[python: **.py] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/bindep.txt0000664000175000017500000000045300000000000016233 0ustar00zuulzuul00000000000000# This is a cross-platform list tracking distribution packages needed by tests; # see http://docs.openstack.org/infra/bindep/ for additional information. build-essential [platform:dpkg] python3-dev [platform:dpkg] libffi-dev [platform:dpkg] libffi-devel [platform:rpm] openssl-devel [platform:rpm] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1092477 openstacksdk-4.0.0/devstack/0000775000175000017500000000000000000000000016033 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/devstack/plugin.sh0000664000175000017500000000221400000000000017664 0ustar00zuulzuul00000000000000# Install and configure **openstacksdk** library in devstack # # To enable openstacksdk in devstack add an entry to local.conf that looks like # # [[local|localrc]] # enable_plugin openstacksdk https://opendev.org/openstack/openstacksdk function preinstall_openstacksdk { : } function install_openstacksdk { if use_library_from_git "openstacksdk"; then # don't clone, it'll be done by the plugin install setup_dev_lib "openstacksdk" else pip_install "openstacksdk" fi } function configure_openstacksdk { : } function initialize_openstacksdk { : } function unstack_openstacksdk { : } function clean_openstacksdk { : } # This is the main for plugin.sh if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then preinstall_openstacksdk elif [[ "$1" == "stack" && "$2" == "install" ]]; then install_openstacksdk elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then configure_openstacksdk elif [[ "$1" == "stack" && "$2" == "extra" ]]; then initialize_openstacksdk fi if [[ "$1" == "unstack" ]]; then unstack_openstacksdk fi if [[ "$1" == "clean" ]]; then clean_openstacksdk fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1092477 openstacksdk-4.0.0/doc/0000775000175000017500000000000000000000000014774 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/requirements.txt0000664000175000017500000000030100000000000020252 0ustar00zuulzuul00000000000000docutils>=0.11 # OSI-Approved Open Source, Public Domain openstackdocstheme>=2.2.1 # Apache-2.0 reno>=3.1.0 # Apache-2.0 sphinx>=2.0.0,!=2.1.0 # BSD sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1092477 openstacksdk-4.0.0/doc/source/0000775000175000017500000000000000000000000016274 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/conf.py0000664000175000017500000000574100000000000017602 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import warnings sys.path.insert(0, os.path.abspath('../..')) sys.path.insert(0, os.path.abspath('.')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'openstackdocstheme', 'sphinxcontrib.rsvgconverter', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/openstacksdk' openstackdocs_pdf_link = True openstackdocs_use_storyboard = False html_theme = 'openstackdocs' # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2017, Various members of the OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' autodoc_member_order = 'bysource' # Include both the class and __init__ docstrings when describing the class autoclass_content = 'both' # Don't document type hints as they're too noisy autodoc_typehints = 'none' # Locations to exclude when looking for source files. exclude_patterns = [] # -- Options for HTML output ---------------------------------------------- # Don't let openstackdocstheme insert TOCs automatically. theme_include_auto_toc = False # -- Options for LaTeX output --------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ( 'index', 'doc-openstacksdk.tex', 'OpenStackSDK Documentation', 'OpenStack Foundation', 'manual', ), ] # Allow deeper levels of nesting for \begin...\end stanzas latex_elements = {'maxlistdepth': 10} # Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664 latex_use_xindy = False ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1132495 openstacksdk-4.0.0/doc/source/contributor/0000775000175000017500000000000000000000000020646 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/contributor/clouds.yaml0000664000175000017500000000170700000000000023030 0ustar00zuulzuul00000000000000clouds: devstack: auth: auth_url: http://xxx.xxx.xxx.xxx/identity password: password project_domain_id: default project_name: demo user_domain_id: default username: demo identity_api_version: '3' region_name: RegionOne volume_api_version: '3' devstack-admin: auth: auth_url: http://xxx.xxx.xxx.xxx/identity password: password project_domain_id: default project_name: admin user_domain_id: default username: admin identity_api_version: '3' region_name: RegionOne volume_api_version: '3' devstack-alt: auth: auth_url: http://xxx.xxx.xxx.xxx/identity password: password project_domain_id: default project_name: alt_demo user_domain_id: default username: alt_demo identity_api_version: '3' region_name: RegionOne volume_api_version: '3' example: image_name: cirros-0.5.2-x86_64-disk flavor_name: m1.small ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/contributor/coding.rst0000664000175000017500000000764500000000000022657 0ustar00zuulzuul00000000000000OpenStack SDK Developer Coding Standards ======================================== In the beginning, there were no guidelines. And it was good. But that didn't last long. As more and more people added more and more code, we realized that we needed a set of coding standards to make sure that the *openstacksdk* API at least *attempted* to display some form of consistency. Thus, these coding standards/guidelines were developed. Note that not all of *openstacksdk* adheres to these standards just yet. Some older code has not been updated because we need to maintain backward compatibility. Some of it just hasn't been changed yet. But be clear, all new code *must* adhere to these guidelines. Below are the patterns that we expect *openstacksdk* developers to follow. Release Notes ------------- *openstacksdk* uses `reno `_ for managing its release notes. A new release note should be added to your contribution anytime you add new API calls, fix significant bugs, add new functionality or parameters to existing API calls, or make any other significant changes to the code base that we should draw attention to for the user base. It is *not* necessary to add release notes for minor fixes, such as correction of documentation typos, minor code cleanup or reorganization, or any other change that a user would not notice through normal usage. Exceptions ---------- Exceptions should NEVER be wrapped and re-raised inside of a new exception. This removes important debug information from the user. All of the exceptions should be raised correctly the first time. openstack.cloud API Methods --------------------------- The ``openstack.cloud`` layer has some specific rules: - When an API call acts on a resource that has both a unique ID and a name, that API call should accept either identifier with a name_or_id parameter. - All resources should adhere to the get/list/search interface that control retrieval of those resources. E.g., ``get_image()``, ``list_images()``, ``search_images()``. - Resources should have ``create_RESOURCE()``, ``delete_RESOURCE()``, ``update_RESOURCE()`` API methods (as it makes sense). - For those methods that should behave differently for omitted or None-valued parameters, use the ``_utils.valid_kwargs`` decorator. This includes all Neutron ``update_*`` functions. - Deleting a resource should return True if the delete succeeded, or False if the resource was not found. Returned Resources ~~~~~~~~~~~~~~~~~~ The ``openstack.cloud`` layer should rely on the proxy layer for the given service. This will ensure complex objects returned to the caller are of ``openstack.resource.Resource`` type. Nova vs. Neutron ~~~~~~~~~~~~~~~~ - Recognize that not all cloud providers support Neutron, so never assume it will be present. If a task can be handled by either Neutron or Nova, code it to be handled by either. - For methods that accept either a Nova pool or Neutron network, the parameter should just refer to the network, but documentation of it should explain about the pool. See: ``create_floating_ip()`` and ``available_floating_ip()`` methods. Tests ----- - New API methods *must* have unit tests! - New unit tests should only mock at the REST layer using ``requests_mock``. Any mocking of *openstacksdk* itself should be considered legacy and to be avoided. Exceptions to this rule can be made when attempting to test the internals of a logical shim where the inputs and output of the method aren't actually impacted by remote content. - Functional tests should be added, when possible. - In functional tests, always use unique names (for resources that have this attribute) and use it for clean up (see next point). - In functional tests, always define cleanup functions to delete data added by your test, should something go wrong. Data removal should be wrapped in a try except block and try to delete as many entries added by the test as possible. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/contributor/contributing.rst0000664000175000017500000000004700000000000024110 0ustar00zuulzuul00000000000000.. include:: ../../../CONTRIBUTING.rst ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1132495 openstacksdk-4.0.0/doc/source/contributor/create/0000775000175000017500000000000000000000000022111 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.0652266 openstacksdk-4.0.0/doc/source/contributor/create/examples/0000775000175000017500000000000000000000000023727 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1132495 openstacksdk-4.0.0/doc/source/contributor/create/examples/resource/0000775000175000017500000000000000000000000025556 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/contributor/create/examples/resource/fake.py0000664000175000017500000000140600000000000027037 0ustar00zuulzuul00000000000000# Apache 2 header omitted for brevity from openstack import resource class Fake(resource.Resource): resource_key = "resource" resources_key = "resources" base_path = "/fake" allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_head = True #: The transaction date and time. timestamp = resource.Header("x-timestamp") #: The name of this resource. name = resource.Body("name", alternate_id=True) #: The value of the resource. Also available in headers. value = resource.Body("value", alias="x-resource-value") #: Is this resource cool? If so, set it to True. #: This is a multi-line comment about cool stuff. cool = resource.Body("cool", type=bool) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/contributor/create/examples/resource/fake_service.py0000664000175000017500000000043400000000000030557 0ustar00zuulzuul00000000000000# Apache 2 header omitted for brevity from openstack import service_description from openstack.fake.v2 import _proxy as _proxy_v2 class FakeService(service_description.ServiceDescription): """The fake service.""" supported_versions = { '2': _proxy_v2.Proxy, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/contributor/create/resource.rst0000664000175000017500000001670100000000000024477 0ustar00zuulzuul00000000000000.. TODO(shade) Update this guide. Creating a New Resource ======================= This guide will walk you through how to add resources for a service. Naming Conventions ------------------ Above all, names across this project conform to Python's naming standards, as laid out in `PEP 8 `_. The relevant details we need to know are as follows: * Module names are lower case, and separated by underscores if more than one word. For example, ``openstack.object_store`` * Class names are capitalized, with no spacing, and each subsequent word is capitalized in a name. For example, ``ServerMetadata``. * Attributes on classes, including methods, are lower case and separated by underscores. For example, ``allow_list`` or ``get_data``. Services ******** Services in the OpenStack SDK are named after their program name, not their code name. For example, the project often known as "Nova" is always called "compute" within this SDK. This guide walks through creating service for an OpenStack program called "Fake". Following our guidelines, the code for its service would live under the ``openstack.fake`` namespace. What follows is the creation of a :class:`~openstack.resource.Resource` class for the "Fake" service. Resources ********* Resources are named after the server-side resource, which is set in the ``base_path`` attribute of the resource class. This guide creates a resource class for the ``/fake`` server resource, so the resource module is called ``fake.py`` and the class is called ``Fake``. An Example ---------- ``openstack/fake/fake_service.py`` .. literalinclude:: examples/resource/fake_service.py :language: Python :linenos: ``openstack/fake/v2/fake.py`` .. literalinclude:: examples/resource/fake.py :language: Python :linenos: ``fake.Fake`` Attributes ------------------------ Each service's resources inherit from :class:`~openstack.resource.Resource`, so they can override any of the base attributes to fit the way their particular resource operates. ``resource_key`` and ``resources_key`` ************************************** These attributes are set based on how your resource responds with data. The default values for each of these are ``None``, which works fine when your resource returns a JSON body that can be used directly without a top-level key, such as ``{"name": "Ernie Banks", ...}"``. However, our ``Fake`` resource returns JSON bodies that have the details of the resource one level deeper, such as ``{"resources": {"name": "Ernie Banks", ...}, {...}}``. It does a similar thing with single resources, putting them inside a dictionary keyed on ``"resource"``. By setting ``Fake.resource_key`` on *line 8*, we tell the ``Resource.create``, ``Resource.get``, and ``Resource.update`` methods that we're either sending or receiving a resource that is in a dictionary with that key. By setting ``Fake.resources_key`` on *line 9*, we tell the ``Resource.list`` method that we're expecting to receive multiple resources inside a dictionary with that key. ``base_path`` ************* The ``base_path`` is the URL we're going to use to make requests for this resource. In this case, *line 10* sets ``base_path = "/fake"``, which also corresponds to the name of our class, ``Fake``. Most resources follow this basic formula. Some cases are more complex, where the URL to make requests to has to contain some extra data. The volume service has several resources which make either basic requests or detailed requests, so they use ``base_path = "/volumes/%s(detailed)"``. Before a request is made, if ``detailed = True``, they convert it to a string so the URL becomes ``/volumes/detailed``. If it's ``False``, they only send ``/volumes/``. ``service`` *********** *Line 11* is an instance of the service we're implementing. Each resource ties itself to the service through this setting, so that the proper URL can be constructed. In ``fake_service.py``, we specify the valid versions as well as what this service is called in the service catalog. When a request is made for this resource, the Session now knows how to construct the appropriate URL using this ``FakeService`` instance. Supported Operations -------------------- The base :class:`~openstack.resource.Resource` disallows all types of requests by default, requiring each resource to specify which requests they support. On *lines 14-19*, our ``Fake`` resource specifies that it'll work with all of the operations. In order to have the following methods work, you must allow the corresponding value by setting it to ``True``: +----------------------------------------------+----------------+ | :class:`~openstack.resource.Resource.create` | allow_create | +----------------------------------------------+----------------+ | :class:`~openstack.resource.Resource.delete` | allow_delete | +----------------------------------------------+----------------+ | :class:`~openstack.resource.Resource.head` | allow_head | +----------------------------------------------+----------------+ | :class:`~openstack.resource.Resource.list` | allow_list | +----------------------------------------------+----------------+ | :class:`~openstack.resource.Resource.fetch` | allow_fetch | +----------------------------------------------+----------------+ | :class:`~openstack.resource.Resource.commit` | allow_commit | +----------------------------------------------+----------------+ An additional attribute to set is ``commit_method``. It defaults to ``PUT``, but some services use ``POST`` or ``PATCH`` to commit changes back to the remote resource. Properties ---------- .. TODO(shade) Especially this section The way resource classes communicate values between the user and the server are :class:`~openstack.resource.prop` objects. These act similarly to Python's built-in property objects, but they share only the name - they're not the same. Properties are set based on the contents of a response body or headers. Based on what your resource returns, you should set ``prop``\s to map those values to ones on your :class:`~openstack.resource.Resource` object. *Line 22* sets a prop for ``timestamp`` , which will cause the ``Fake.timestamp`` attribute to contain the value returned in an ``X-Timestamp`` header, such as from a ``Fake.head`` request. *Line 24* sets a prop for ``name``, which is a value returned in a body, such as from a ``Fake.get`` request. Note from *line 12* that ``name`` is specified its ``id`` attribute, so when this resource is populated from a response, ``Fake.name`` and ``Fake.id`` are the same value. *Line 26* sets a prop which contains an alias. ``Fake.value`` will be set when a response body contains a ``value``, or when a header contains ``X-Resource-Value``. *Line 28* specifies a type to be checked before sending the value in a request. In this case, we can only set ``Fake.cool`` to either ``True`` or ``False``, otherwise a TypeError will be raised if the value can't be converted to the expected type. Documentation ------------- We use Sphinx's ``autodoc`` feature in order to build API documentation for each resource we expose. The attributes we override from :class:`~openstack.resource.Resource` don't need to be documented, but any :class:`~openstack.resource.prop` attributes must be. All you need to do is add a comment *above* the line to document, with a colon following the pound-sign. *Lines 21, 23, 25, and 27-28* are comments which will then appear in the API documentation. As shown in *lines 27 & 28*, these comments can span multiple lines. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/contributor/history.rst0000664000175000017500000000502200000000000023100 0ustar00zuulzuul00000000000000A Brief History =============== *openstacksdk* started its life as three different libraries: *shade*, *os-client-config* and *python-openstacksdk*. *shade* *shade* started its life as some code inside of OpenStack Infra's `nodepool`_ project, and as some code inside of the `Ansible OpenStack Modules`_. Ansible had a bunch of different OpenStack related modules, and there was a ton of duplicated code. Eventually, between refactoring that duplication into an internal library, and adding the logic and features that the OpenStack Infra team had developed to run client applications at scale, it turned out that we'd written nine-tenths of what we'd need to have a standalone library. Because of its background from nodepool, *shade* contained abstractions to work around deployment differences and is resource oriented rather than service oriented. This allows a user to think about Security Groups without having to know whether Security Groups are provided by Nova or Neutron on a given cloud. On the other hand, as an interface that provides an abstraction, it deviates from the published OpenStack REST API and adds its own opinions, which may not get in the way of more advanced users with specific needs. *os-client-config* *os-client-config* was a library for collecting client configuration for using an OpenStack cloud in a consistent and comprehensive manner, which introduced the ``clouds.yaml`` file for expressing named cloud configurations. *python-openstacksdk* *python-openstacksdk* was a library that exposed the OpenStack APIs to developers in a consistent and predictable manner. After a while it became clear that there was value in both the high-level layer that contains additional business logic and the lower-level SDK that exposes services and their resources faithfully and consistently as Python objects. Even with both of those layers, it is still beneficial at times to be able to make direct REST calls and to do so with the same properly configured `Session`_ from `python-requests`_. This led to the merge of the three projects. The original contents of the *shade* library have been moved into ``openstack.cloud`` and *os-client-config* has been moved in to ``openstack.config``. .. _nodepool: https://docs.openstack.org/infra/nodepool/ .. _Ansible OpenStack Modules: http://docs.ansible.com/ansible/latest/list_of_cloud_modules.html#openstack .. _Session: http://docs.python-requests.org/en/master/user/advanced/#session-objects .. _python-requests: http://docs.python-requests.org/en/master/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/contributor/index.rst0000664000175000017500000000571200000000000022514 0ustar00zuulzuul00000000000000Contributing to the OpenStack SDK ================================= This section of documentation pertains to those who wish to contribute to the development of this SDK. If you're looking for documentation on how to use the SDK to build applications, refer to the `user <../user>`_ section. About the Project ----------------- The OpenStack SDK is a OpenStack project aimed at providing a complete software development kit for the programs which make up the OpenStack community. It is a Python library with corresponding documentation, examples, and tools released under the Apache 2 license. .. toctree:: :maxdepth: 2 history Contribution Mechanics ---------------------- .. toctree:: :maxdepth: 2 contributing Contacting the Developers ------------------------- IRC ~~~ The developers of this project are available in the `#openstack-sdks`__ channel on OFTC IRC. This channel includes conversation on SDKs and tools within the general OpenStack community, including OpenStackClient as well as occasional talk about SDKs created for languages outside of Python. .. __: http://webchat.oftc.net?channels=%23openstack-sdks Email ~~~~~ The `openstack-discuss`__ mailing list fields questions of all types on OpenStack. Using the ``[sdk]`` filter to begin your email subject will ensure that the message gets to SDK developers. .. __: mailto:openstack-discuss@lists.openstack.org?subject=[sdk]%20Question%20about%20openstacksdk Coding Standards ---------------- We are a bit stricter than usual in the coding standards department. It's a good idea to read through the :doc:`coding ` section. .. toctree:: :maxdepth: 2 coding Development Environment ----------------------- The first step towards contributing code and documentation is to setup your development environment. We use a pretty standard setup, but it is fully documented in our :doc:`setup ` section. .. toctree:: :maxdepth: 2 setup Testing ------- The project contains two test packages, one for unit tests and one for functional tests. The ``openstack.tests.unit`` package tests the SDK's features in isolation. The ``openstack.tests.functional`` package tests the SDK's features and examples against an OpenStack cloud. .. toctree:: testing Project Layout -------------- The project contains a top-level ``openstack`` package, which houses several modules that form the foundation upon which each service's API is built on. Under the ``openstack`` package are packages for each of those services, such as ``openstack.compute``. .. toctree:: layout Adding Features --------------- Does this SDK not do what you need it to do? Is it missing a service? Are you a developer on another project who wants to add their service? You're in the right place. Below are examples of how to add new features to the OpenStack SDK. .. toctree:: :maxdepth: 2 create/resource .. TODO(briancurtin): document how to create a proxy .. TODO(briancurtin): document how to create auth plugins ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/contributor/layout.rst0000664000175000017500000001024100000000000022713 0ustar00zuulzuul00000000000000How the SDK is organized ======================== The following diagram shows how the project is laid out. .. literalinclude:: layout.txt Resource -------- The :class:`openstack.resource.Resource` base class is the building block of any service implementation. ``Resource`` objects correspond to the resources each service's REST API works with, so the :class:`openstack.compute.v2.server.Server` subclass maps to the compute service's ``https://openstack:1234/v2/servers`` resource. The base ``Resource`` contains methods to support the typical `CRUD `_ operations supported by REST APIs, and handles the construction of URLs and calling the appropriate HTTP verb on the given ``Adapter``. Values sent to or returned from the service are implemented as attributes on the ``Resource`` subclass with type :class:`openstack.resource.prop`. The ``prop`` is created with the exact name of what the API expects, and can optionally include a ``type`` to be validated against on requests. You should choose an attribute name that follows PEP-8, regardless of what the server-side expects, as this ``prop`` becomes a mapping between the two.:: is_public = resource.prop('os-flavor-access:is_public', type=bool) There are six additional attributes which the ``Resource`` class checks before making requests to the REST API. ``allow_create``, ``allow_retreive``, ``allow_commit``, ``allow_delete``, ``allow_head``, and ``allow_list`` are set to ``True`` or ``False``, and are checked before making the corresponding method call. The ``base_path`` attribute should be set to the URL which corresponds to this resource. Many ``base_path``\s are simple, such as ``"/servers"``. For ``base_path``\s which are composed of non-static information, Python's string replacement is used, e.g., ``base_path = "/servers/%(server_id)s/ips"``. ``resource_key`` and ``resources_key`` are attributes to set when a ``Resource`` returns more than one item in a response, or otherwise requires a key to obtain the response value. For example, the ``Server`` class sets ``resource_key = "server"`` as an individual ``Server`` is stored in a dictionary keyed with the singular noun, and ``resources_key = "servers"`` as multiple ``Server``\s are stored in a dictionary keyed with the plural noun in the response. Proxy ----- Each service implements a ``Proxy`` class based on :class:`~openstack.proxy.Proxy`, within the ``openstack//vX/_proxy.py`` module. For example, the v2 compute service's ``Proxy`` exists in ``openstack/compute/v2/_proxy.py``. The :class:`~openstack.proxy.Proxy` class is based on :class:`~keystoneauth1.adapter.Adapter`. .. autoclass:: openstack.proxy.Proxy :members: :show-inheritance: Each service's ``Proxy`` provides a higher-level interface for users to work with via a :class:`~openstack.connection.Connection` instance. Rather than requiring users to maintain their own ``Adapter`` and work with lower-level :class:`~openstack.resource.Resource` objects, the ``Proxy`` interface offers a place to make things easier for the caller. Each ``Proxy`` class implements methods which act on the underlying ``Resource`` classes which represent the service. For example:: def list_flavors(self, **params): return flavor.Flavor.list(self.session, **params) This method is operating on the ``openstack.compute.v2.flavor.Flavor.list`` method. For the time being, it simply passes on the ``Adapter`` maintained by the ``Proxy``, and returns what the underlying ``Resource.list`` method does. Cloud ----- .. todo TODO. Connection ---------- The :class:`openstack.connection.Connection` class builds atop a :class:`openstack.config.cloud_region.CloudRegion` object, and provides a higher level interface constructed of ``Proxy`` objects from each of the services. The ``Connection`` class' primary purpose is to act as a high-level interface to this SDK, managing the lower level connection bits and exposing the ``Resource`` objects through their corresponding `Proxy`_ object. If you've built proper ``Resource`` objects and implemented methods on the corresponding ``Proxy`` object, the high-level interface to your service should now be exposed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/contributor/layout.txt0000664000175000017500000000033000000000000022720 0ustar00zuulzuul00000000000000openstack/ connection.py resource.py compute/ compute_service.py v2/ server.py _proxy.py tests/ compute/ v2/ test_server.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/contributor/setup.rst0000664000175000017500000001047300000000000022545 0ustar00zuulzuul00000000000000Creating a Development Environment ================================== Required Tools -------------- Python ~~~~~~ As the OpenStack SDK is developed in Python, you will need at least one version of Python installed. Our continuous integration system runs against several versions, so ultimately we will have the proper test coverage, but having multiple versions locally results in less time spent in code review when changes unexpectedly break other versions. Python can be downloaded from https://www.python.org/downloads. virtualenv ~~~~~~~~~~ In order to isolate our development environment from the system-based Python installation, we use `virtualenv `_. This allows us to install all of our necessary dependencies without interfering with anything else, and preventing others from interfering with us. Virtualenv must be installed on your system in order to use it, and it can be had from PyPI, via pip, as follows. Note that you may need to run this as an administrator in some situations.:: $ apt-get install python3-virtualenv # Debian based platforms $ dnf install python3-virtualenv # Red Hat based platforms $ pip install virtualenv # Mac OS X and other platforms You can create a virtualenv in any location. A common usage is to store all of your virtualenvs in the same place, such as under your home directory. To create a virtualenv for the default Python, run the following:: $ virtualenv $HOME/envs/sdk To create an environment for a different version, run the following:: $ virtualenv -p python3 $HOME/envs/sdk3 When you want to enable your environment so that you can develop inside of it, you *activate* it. To activate an environment, run the /bin/activate script inside of it, like the following:: $ source $HOME/envs/sdk3/bin/activate (sdk3)$ Once you are activated, you will see the environment name in front of your command prompt. In order to exit that environment, run the ``deactivate`` command. tox ~~~ We use `tox `_ as our test runner, which allows us to run the same test commands against multiple versions of Python. Inside any of the virtualenvs you use for working on the SDK, run the following to install ``tox`` into it.:: (sdk3)$ pip install tox Git ~~~ The source of the OpenStack SDK is stored in Git. In order to work with our source repository, you must have Git installed on your system. If your system has a package manager, it can likely be had from there. If not, you can find downloads or the source at http://git-scm.com. Getting the Source Code ----------------------- .. TODO(briancurtin): We should try and distill the following document into the minimally necessary parts to include directly in this section. I've talked to several people who are discouraged by that large of a document to go through before even getting into the project they want to work on. I don't want that to happen to us because we have the potential to be more public facing than a lot of other projects. .. note:: Before checking out the code, please read the OpenStack `Developer's Guide `_ for details on how to use the continuous integration and code review systems that we use. The canonical Git repository is hosted on opendev.org at http://opendev.org/openstack/openstacksdk/:: (sdk3)$ git clone https://opendev.org/openstack/openstacksdk (sdk3)$ cd openstacksdk Installing Dependencies ----------------------- In order to work with the SDK locally, such as in the interactive interpreter or to run example scripts, you need to install the project's dependencies.:: (sdk3)$ pip install -r requirements.txt After the downloads and installs are complete, you'll have a fully functional environment to use the SDK in. Building the Documentation -------------------------- Our documentation is written in reStructured Text and is built using Sphinx. A ``docs`` command is available in our ``tox.ini``, allowing you to build the documentation like you'd run tests. The ``docs`` command is not evaluated by default.:: (sdk3)$ tox -e docs That command will cause the documentation, which lives in the ``docs`` folder, to be built. HTML output is the most commonly referenced, which is located in ``docs/build/html``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/contributor/testing.rst0000664000175000017500000001024600000000000023060 0ustar00zuulzuul00000000000000Testing ======= The tests are run with `tox `_ and configured in ``tox.ini``. The test results are tracked by `stestr `_ and configured in ``.stestr.conf`` and via command line options passed to the ``stestr`` executable when it's called by ``tox``. Unit Tests ---------- Running tests ~~~~~~~~~~~~~ In order to run the entire unit test suite, simply run the ``tox`` command inside of your source checkout. This will attempt to run every test command listed inside of ``tox.ini``, which includes Python 3.x, and a PEP 8 check. You should run the full test suite on all versions before submitting changes for review in order to avoid unexpected failures in the continuous integration system.:: $ tox ... py3: commands succeeded pep8: commands succeeded congratulations :) During development, it may be more convenient to run a subset of the tests to keep test time to a minimum. You can choose to run the tests only on one version. A step further is to run only the tests you are working on.:: # Run run the tests on Python 3.9 $ tox -e py39 # Run only the compute unit tests on Python 3.9 $ tox -e py39 openstack.tests.unit.compute # Run only the tests in a specific file on Python 3.9 $ tox -e py39 -- -n openstack/tests/unit/compute/test_version.py Functional Tests ---------------- The functional tests assume that you have a public or private OpenStack cloud that you can run the tests against. The tests must be able to be run against public clouds but first and foremost they must be run against OpenStack. In practice, this means that the tests should initially be run against a stable branch of `DevStack `_. Configuration ~~~~~~~~~~~~~ To connect the functional tests to an OpenStack cloud we require a ``clouds.yaml`` file, as discussed in :doc:`/user/config/configuration`. You can place this ``clouds.yaml`` file in the root of your source checkout or in one of the other standard locations, ``$HOME/.config/openstack`` or ``/etc/openstack``. There must be at least three clouds configured, or rather three accounts configured for the one cloud. These accounts are: - An admin account, which defaults to ``devstack-admin`` but is configurable via the ``OPENSTACKSDK_OPERATOR_CLOUD`` environment variable, - A user account, which defaults to ``devstack`` but is configurable via the ``OPENSTACKSDK_DEMO_CLOUD`` environment variable, and - An alternate user account, which defaults to ``devstack-demo`` but is configurable via the ``OPENSTACKSDK_DEMO_CLOUD_ALT`` environment variable In addition, you must indicate the names of the flavor and image that should be used for tests. These can be configured via ``OPENSTACKSDK_FLAVOR`` and ``OPENSTACKSDK_IMAGE`` environment variables or ``functional.flavor_name`` and ``functional.image_name`` settings in the ``clouds.yaml`` file, respectively. Finally, you can configure the timeout for tests using the ``OPENSTACKSDK_FUNC_TEST_TIMEOUT`` environment variable (defaults to 300 seconds). Some test modules take specific timeout values. For example, all tests in ``openstack.tests.functional.compute`` will check for the ``OPENSTACKSDK_FUNC_TEST_TIMEOUT_COMPUTE`` environment variable before checking for ``OPENSTACKSDK_FUNC_TEST_TIMEOUT``. .. note:: Recent versions of DevStack will configure a suitable ``clouds.yaml`` file for you, which will be placed at ``/etc/openstack/clouds.yaml``. This is an example of a minimal configuration for a ``clouds.yaml`` that connects the functional tests to a DevStack instance. .. literalinclude:: clouds.yaml :language: yaml Replace ``xxx.xxx.xxx.xxx`` with the IP address or FQDN of your DevStack instance. Running tests ~~~~~~~~~~~~~ Functional tests are also run against multiple Python versions. In order to run the entire functional test suite against the default Python 3 version in your environment, run the ``tox -e functional`` command inside of your source checkout. This will attempt to run every tests in the ``openstack/tests/functional`` directory. For example:: $ tox -e functional ... functional: commands succeeded congratulations :) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/glossary.rst0000664000175000017500000000712700000000000020700 0ustar00zuulzuul00000000000000:orphan: Glossary ======== .. glossary:: :sorted: CLI Command-Line Interface; a textual user interface. compute OpenStack Compute (Nova). container One of the :term:`object-store` resources; a container holds :term:`objects ` being stored. endpoint A base URL used in a REST request. An `authentication endpoint` is specifically the URL given to a user to identify a cloud. A service endpoint is generally obtained from the service catalog. host A physical computer. Contrast with :term:`node` and :term:`server`. identity OpenStack Identity (Keystone). image OpenStack Image (Glance). Also the attribute name of the disk files stored for use by servers. keypair The attribute name of the SSH public key used in the OpenStack Compute API for server authentication. node A logical system, may refer to a :term:`server` (virtual machine) or a :term:`host`. Generally used to describe an OS instance where a specific process is running, e.g. a 'network node' is where the network processes run, and may be directly on a host or in a server. Contrast with :term:`host` and :term:`server`. object A generic term which normally refers to the a Python ``object``. The OpenStack Object Store service (Swift) also uses `object` as the name of the item being stored within a :term:`container`. object-store OpenStack Object Store (Swift). project The name of the owner of resources in an OpenStack cloud. A `project` can map to a customer, account or organization in different OpenStack deployments. Used instead of the deprecated :term:`tenant`. region The attribute name of a partitioning of cloud resources. resource A Python object representing an OpenStack resource inside the SDK code. Also used to describe the items managed by OpenStack. role A personality that a user assumes when performing a specific set of operations. A `role` includes a set of rights and privileges that a user assuming that role inherits. The OpenStack Identity service includes the set of roles that a user can assume in the :term:`token` that is issued to that user. The individual services determine how the roles are interpreted and access granted to operations or resources. The OpenStack Identity service treats a role as an arbitrary name assigned by the cloud administrator. server A virtual machine or a bare-metal host managed by the OpenStack Compute service. Contrast with :term:`host` and :term:`node`. service In OpenStack this refers to a service/endpoint in the :term:`ServiceCatalog `. It could also be a collection of endpoints for different :term:`regions `. A service has a type and a name. service catalog The list of :term:`services ` configured at a given authentication endpoint available to the authenticated user. tenant Deprecated in favor of :term:`project`. token An arbitrary bit of text that is used to access resources. Some tokens are `scoped` to determine what resources are accessible with it. A token may be revoked at any time and is valid for a finite duration. volume OpenStack Volume (Cinder). Also the attribute name of the virtual disks managed by the OpenStack Volume service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/index.rst0000664000175000017500000000136400000000000020141 0ustar00zuulzuul00000000000000openstacksdk ============ This documentation is split into three sections: * An :doc:`installation ` guide * A section for :doc:`users ` looking to build applications which make use of OpenStack * A section for those looking to :doc:`contribute ` to this project Installation ------------ .. toctree:: :maxdepth: 2 install/index For Users --------- .. toctree:: :maxdepth: 2 user/index For Contributors ---------------- .. toctree:: :maxdepth: 2 contributor/index General Information ------------------- General information about the SDK including a glossary and release history. .. toctree:: :maxdepth: 1 Glossary of Terms Release Notes ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1132495 openstacksdk-4.0.0/doc/source/install/0000775000175000017500000000000000000000000017742 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/install/index.rst0000664000175000017500000000055000000000000021603 0ustar00zuulzuul00000000000000Installation guide ================== The OpenStack SDK is available on `PyPI`__ under the name **openstacksdk**. To install it, use ``pip``: .. code-block:: bash $ pip install openstacksdk To check the installed version you can call the module with: .. code-block:: bash $ python -m openstack version .. __: https://pypi.org/project/openstacksdk ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/releasenotes.rst0000664000175000017500000000020500000000000021514 0ustar00zuulzuul00000000000000Release Notes ============= Release notes for `openstacksdk` can be found at https://releases.openstack.org/teams/openstacksdk.html ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1172516 openstacksdk-4.0.0/doc/source/user/0000775000175000017500000000000000000000000017252 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1172516 openstacksdk-4.0.0/doc/source/user/config/0000775000175000017500000000000000000000000020517 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/config/configuration.rst0000664000175000017500000004036300000000000024126 0ustar00zuulzuul00000000000000.. _openstack-config: ====================================== Configuring OpenStack SDK Applications ====================================== .. _config-environment-variables: Environment Variables --------------------- `openstacksdk` honors all of the normal `OS_*` variables. It does not provide backwards compatibility to service-specific variables such as `NOVA_USERNAME`. If you have OpenStack environment variables set, `openstacksdk` will produce a cloud config object named `envvars` containing your values from the environment. If you don't like the name `envvars`, that's ok, you can override it by setting `OS_CLOUD_NAME`. Service specific settings, like the nova service type, are set with the default service type as a prefix. For instance, to set a special service_type for trove set .. code-block:: bash export OS_DATABASE_SERVICE_TYPE=rax:database .. _config-clouds-yaml: Config Files ------------ `openstacksdk` will look for a file called `clouds.yaml` in the following locations: * ``.`` (the current directory) * ``$HOME/.config/openstack`` * ``/etc/openstack`` The first file found wins. You can also set the environment variable `OS_CLIENT_CONFIG_FILE` to an absolute path of a file to look for and that location will be inserted at the front of the file search list. The keys are all of the keys you'd expect from `OS_*` - except lower case and without the OS prefix. So, region name is set with `region_name`. Service specific settings, like the nova service type, are set with the default service type as a prefix. For instance, to set a special service_type for trove (because you're using Rackspace) set: .. code-block:: yaml database_service_type: 'rax:database' Site Specific File Locations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In addition to `~/.config/openstack` and `/etc/openstack` - some platforms have other locations they like to put things. `openstacksdk` will also look in an OS specific config dir * `USER_CONFIG_DIR` * `SITE_CONFIG_DIR` `USER_CONFIG_DIR` is different on Linux, OSX and Windows. * Linux: `~/.config/openstack` * OSX: `~/Library/Application Support/openstack` * Windows: `C:\\Users\\USERNAME\\AppData\\Local\\OpenStack\\openstack` `SITE_CONFIG_DIR` is different on Linux, OSX and Windows. * Linux: `/etc/openstack` * OSX: `/Library/Application Support/openstack` * Windows: `C:\\ProgramData\\OpenStack\\openstack` An example config file is probably helpful: .. code-block:: yaml clouds: mtvexx: profile: https://vexxhost.com auth: username: mordred@inaugust.com password: XXXXXXXXX project_name: mordred@inaugust.com region_name: ca-ymq-1 dns_api_version: 1 mordred: region_name: RegionOne auth: username: 'mordred' password: XXXXXXX project_name: 'shade' auth_url: 'https://montytaylor-sjc.openstack.blueboxgrid.com:5001/v2.0' infra: profile: rackspace auth: username: openstackci password: XXXXXXXX project_id: 610275 regions: - DFW - ORD - IAD You may note a few things. First, since `auth_url` settings are silly and embarrassingly ugly, known cloud vendor profile information is included and may be referenced by name or by base URL to the cloud in question if the cloud serves a vendor profile. One of the benefits of that is that `auth_url` isn't the only thing the vendor defaults contain. For instance, since Rackspace lists `rax:database` as the service type for trove, `openstacksdk` knows that so that you don't have to. In case the cloud vendor profile is not available, you can provide one called `clouds-public.yaml`, following the same location rules previously mentioned for the config files. `regions` can be a list of regions. When you call `get_all_clouds`, you'll get a cloud config object for each cloud/region combo. As seen with `dns_service_type`, any setting that makes sense to be per-service, like `service_type` or `endpoint` or `api_version` can be set by prefixing the setting with the default service type. That might strike you funny when setting `service_type` and it does me too - but that's just the world we live in. Auth Settings ------------- Keystone has auth plugins - which means it's not possible to know ahead of time which auth settings are needed. `openstacksdk` sets the default plugin type to `password`, which is what things all were before plugins came about. In order to facilitate validation of values, all of the parameters that exist as a result of a chosen plugin need to go into the auth dict. For password auth, this includes `auth_url`, `username` and `password` as well as anything related to domains, projects and trusts. Splitting Secrets ----------------- In some scenarios, such as configuration management controlled environments, it might be easier to have secrets in one file and non-secrets in another. This is fully supported via an optional file `secure.yaml` which follows all the same location rules as `clouds.yaml`. It can contain anything you put in `clouds.yaml` and will take precedence over anything in the `clouds.yaml` file. .. code-block:: yaml # clouds.yaml clouds: internap: profile: internap auth: username: api-55f9a00fb2619 project_name: inap-17037 regions: - ams01 - nyj01 # secure.yaml clouds: internap: auth: password: XXXXXXXXXXXXXXXXX SSL Settings ------------ When the access to a cloud is done via a secure connection, `openstacksdk` will always verify the SSL cert by default. This can be disabled by setting `verify` to `False`. In case the cert is signed by an unknown CA, a specific cacert can be provided via `cacert`. **WARNING:** `verify` will always have precedence over `cacert`, so when setting a CA cert but disabling `verify`, the cloud cert will never be validated. Client certs are also configurable. `cert` will be the client cert file location. In case the cert key is not included within the client cert file, its file location needs to be set via `key`. .. code-block:: yaml # clouds.yaml clouds: regular-secure-cloud: auth: auth_url: https://signed.cert.domain:5000 ... unknown-ca-with-client-cert-secure-cloud: auth: auth_url: https://unknown.ca.but.secure.domain:5000 ... key: /home/myhome/client-cert.key cert: /home/myhome/client-cert.crt cacert: /home/myhome/ca.crt self-signed-insecure-cloud: auth: auth_url: https://self.signed.cert.domain:5000 ... verify: False Note for parity with ``openstack`` command-line options the `insecure` boolean is also recognised (with the opposite semantics to `verify`; i.e. `True` ignores certificate failures). This should be considered deprecated for `verify`. Cache Settings -------------- .. versionchanged:: 1.0.0 Previously, caching was managed exclusively in the cloud layer. Starting in openstacksdk 1.0.0, caching is moved to the proxy layer. As the cloud layer depends on the proxy layer in 1.0.0, this means both layers can benefit from the cache. Authenticating and accessing resources on a cloud is often expensive. It is therefore quite common that applications will wish to do some client-side caching of both credentials and cloud resources. To facilitate this, *openstacksdk* supports caching credentials and resources using the system keyring and *dogpile.cache*, respectively. .. tip:: It is important to emphasise that *openstacksdk* does not actually cache anything itself. Rather, it collects and presents the cache information so that your various applications that are connecting to OpenStack can share a cache should you desire. It is important that your cache backend is correctly configured according to the needs of your application. Caching in enabled or disabled globally, rather than on a cloud-by-cloud basis. This is done by setting configuring the``cache`` top-level key. Caching of authentication tokens can be configured using the following settings: ``cache.auth`` A boolean indicating whether tokens should be cached in the keyring. When enabled, this allows the consequent connections to the same cloud to skip fetching new token. When the token expires or is invalidated, `openstacksdk` will automatically establish a new connection. Defaults to ``false``. For example, to configure caching of authentication tokens. .. code-block:: yaml cache: auth: true Caching of resources can be configured using the following settings: ``cache.expiration_time`` The expiration time in seconds for a cache entry. This should be an integer. Defaults to ``0``. ``cache.class`` The cache backend to use, which can include any backend supported by *dogpile.cache* natively as well as backend provided by third-part packages. This should be a string. Defaults to ``dogpile.cache.memory``. ``cache.arguments`` A mapping of arbitrary arguments to pass into the cache backend. These are backend specific. Keys should correspond to a configuration option for the configured cache backend. Defaults to ``{}``. ``cache.expirations`` A mapping of resource types to expiration times. The keys should be specified in the same way as the metrics are emitted, by joining meaningful resource URL segments with ``.``. For example, both ``/servers`` and ``/servers/ID`` should be specified as ``servers``, while ``/servers/ID/metadata/KEY`` should be specified as `server.metadata`. Values should be an expiration time in seconds. A value of ``-1`` indicates that the cache should never expire, while a value of ``0`` disables caching for the resource. Defaults to ``{}`` For example, to configure caching with the ``dogpile.cache.memory`` backend with a 1 hour expiration. .. code-block:: yaml cache: expiration_time: 3600 To configure caching with the ``dogpile.cache.memory`` backend with a 1 hour expiration but only for requests to the OpenStack Compute service's ``/servers`` API: .. code-block:: yaml cache: expirations: servers: 3600 To configure caching with the ``dogpile.cache.pylibmc`` backend with a 1 hour expiration time and a memcached server running on your localhost. .. code-block:: yaml cache: expiration_time: 3600 arguments: url: - 127.0.0.1 To configure caching with the ``dogpile.cache.pylibmc`` backend with a 1 hour expiration time, a memcached server running on your localhost, and multiple per-resource cache expiration times. .. code-block:: yaml cache: class: dogpile.cache.pylibmc expiration_time: 3600 arguments: url: - 127.0.0.1 expiration: server: 5 flavor: -1 compute.servers: 5 compute.flavors: -1 image.images: 5 Finally, if the ``cache`` key is undefined, a null cache is enabled meaning caching is effectively disabled. .. note:: Non ``GET`` requests cause cache invalidation based on the caching key prefix. This means that, for example, a ``PUT`` request to ``/images/ID`` will invalidate all images cache (list and all individual entries). Moreover it is possible to explicitly pass the ``skip_cache`` parameter to the ``proxy._get`` function to bypass cache and invalidate what is already there. This is happening automatically in the ``wait_for_status`` methods where it is expected that resource will change some of the attributes over the time. Forcing complete cache invalidation can be achieved calling ``conn._cache.invalidate`` MFA Support ----------- MFA support requires a specially prepared configuration file. In this case a combination of two different authorization plugins is used with their individual requirements to the specified parameters. .. code-block:: yaml clouds: mfa: auth_type: "v3multifactor" auth_methods: - v3password - v3totp auth: auth_url: https://identity.cloud.com username: user user_id: uid password: XXXXXXXXX project_name: project user_domain_name: udn project_domain_name: pdn IPv6 ---- IPv6 is the future, and you should always use it if your cloud supports it and if your local network supports it. Both of those are easily detectable and all friendly software should do the right thing. However, sometimes a cloud API may return IPv6 information that is not useful to a production deployment. For example, the API may provide an IPv6 address for a server, but not provide that to the host instance via metadata (configdrive) or standard IPv6 autoconfiguration methods (i.e. the host either needs to make a bespoke API call, or otherwise statically configure itself). For such situations, you can set the ``force_ipv4``, or ``OS_FORCE_IPV4`` boolean environment variable. For example: .. code-block:: yaml clouds: mtvexx: profile: vexxhost auth: username: mordred@inaugust.com password: XXXXXXXXX project_name: mordred@inaugust.com region_name: ca-ymq-1 dns_api_version: 1 monty: profile: fooprovider force_ipv4: true auth: username: mordred@inaugust.com password: XXXXXXXXX project_name: mordred@inaugust.com region_name: RegionFoo The above snippet will tell client programs to prefer the IPv4 address and leave the ``public_v6`` field of the `Server` object blank for the ``fooprovider`` cloud . You can also set this with a client flag for all clouds: .. code-block:: yaml client: force_ipv4: true Per-region settings ------------------- Sometimes you have a cloud provider that has config that is common to the cloud, but also with some things you might want to express on a per-region basis. For instance, Internap provides a public and private network specific to the user in each region, and putting the values of those networks into config can make consuming programs more efficient. To support this, the region list can actually be a list of dicts, and any setting that can be set at the cloud level can be overridden for that region. .. code-block:: yaml clouds: internap: profile: internap auth: password: XXXXXXXXXXXXXXXXX username: api-55f9a00fb2619 project_name: inap-17037 regions: - name: ams01 values: networks: - name: inap-17037-WAN1654 routes_externally: true - name: inap-17037-LAN6745 - name: nyj01 values: networks: - name: inap-17037-WAN1654 routes_externally: true - name: inap-17037-LAN6745 Setting Precedence ------------------ Some settings are redundant, e.g. ``project-name`` and ``project-id`` both specify the project. In a conflict between redundant settings, the ``_name`` ``clouds.yaml`` option (or equivalent ``-name`` CLI option and ``_NAME`` environment variable) will be used. Some environment variables or commandline flags can override the settings from clouds.yaml. These are: - ``--domain-id`` (``OS_DOMAIN_ID``) - ``--domain-name`` (``OS_DOMAIN_NAME``) - ``--user-domain-id`` (``OS_USER_DOMAIN_ID``) - ``--user-domain-name`` (``OS_USER_DOMAIN_NAME``) - ``--project-domain-id`` (``OS_PROJECT_DOMAIN_ID``) - ``--project-domain-name`` (``OS_PROJECT_DOMAIN_NAME``) - ``--auth-token`` (``OS_AUTH_TOKEN``) - ``--project-id`` (``OS_PROJECT_ID``) - ``--project-name`` (``OS_PROJECT_NAME``) - ``--tenant-id`` (``OS_TENANT_ID``) (deprecated for ``--project-id``) - ``--tenant-name`` (``OS_TENANT_NAME``) (deprecated for ``--project-name``) Similarly, if one of the above settings is specified in ``clouds.yaml`` as part of the ``auth`` section as well as the main section, the ``auth`` settings will be overridden. For example in this config section, note that project is specified multiple times: .. code-block:: yaml clouds: mtvexx: profile: https://vexxhost.com auth: username: mordred@inaugust.com password: XXXXXXXXX project_name: mylessfavoriteproject project_id: 0bedab75-898c-4521-a038-0b4b71c41bed region_name: ca-ymq-1 project_name: myfavoriteproject project_id: 2acf9403-25e8-479e-a3c6-d67540c424a4 In the above example, the ``project_id`` configuration values will be ignored in favor of the ``project_name`` configuration values, and the higher-level project will be chosen over the auth-specified project. So the actual project used will be ```myfavoriteproject```. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/config/index.rst0000664000175000017500000000025700000000000022364 0ustar00zuulzuul00000000000000====================== Using os-client-config ====================== .. toctree:: :maxdepth: 2 configuration using vendor-support network-config reference ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/config/network-config.rst0000664000175000017500000000556100000000000024214 0ustar00zuulzuul00000000000000============== Network Config ============== There are several different qualities that networks in OpenStack might have that might not be able to be automatically inferred from the available metadata. To help users navigate more complex setups, `os-client-config` allows configuring a list of network metadata. .. code-block:: yaml clouds: amazing: networks: - name: blue routes_externally: true - name: purple routes_externally: true default_interface: true - name: green routes_externally: false - name: yellow routes_externally: false nat_destination: true - name: chartreuse routes_externally: false routes_ipv6_externally: true - name: aubergine routes_ipv4_externally: false routes_ipv6_externally: true Every entry must have a name field, which can hold either the name or the id of the network. `routes_externally` is a boolean field that labels the network as handling north/south traffic off of the cloud. In a public cloud this might be thought of as the "public" network, but in private clouds it's possible it might be an RFC1918 address. In either case, it's provides IPs to servers that things not on the cloud can use. This value defaults to `false`, which indicates only servers on the same network can talk to it. `routes_ipv4_externally` and `routes_ipv6_externally` are boolean fields to help handle `routes_externally` in the case where a network has a split stack with different values for IPv4 and IPv6. Either entry, if not given, defaults to the value of `routes_externally`. `default_interface` is a boolean field that indicates that the network is the one that programs should use. It defaults to false. An example of needing to use this value is a cloud with two private networks, and where a user is running ansible in one of the servers to talk to other servers on the private network. Because both networks are private, there would otherwise be no way to determine which one should be used for the traffic. There can only be one `default_interface` per cloud. `nat_destination` is a boolean field that indicates which network floating ips should be attached to. It defaults to false. Normally this can be inferred by looking for a network that has subnets that have a gateway_ip. But it's possible to have more than one network that satisfies that condition, so the user might want to tell programs which one to pick. There can be only one `nat_destination` per cloud. `nat_source` is a boolean field that indicates which network floating ips should be requested from. It defaults to false. Normally this can be inferred by looking for a network that is attached to a router. But it's possible to have more than one network that satisfies that condition, so the user might want to tell programs which one to pick. There can be only one `nat_source` per cloud. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/config/reference.rst0000664000175000017500000000045000000000000023206 0ustar00zuulzuul00000000000000============= API Reference ============= .. module:: openstack.config :synopsis: OpenStack client configuration .. autoclass:: openstack.config.OpenStackConfig :members: :inherited-members: .. autoclass:: openstack.config.cloud_region.CloudRegion :members: :inherited-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/config/using.rst0000664000175000017500000000244600000000000022404 0ustar00zuulzuul00000000000000======================================== Using openstack.config in an Application ======================================== Usage ----- The simplest and least useful thing you can do is: .. code-block:: python python -m openstack.config.loader Which will print out whatever if finds for your config. If you want to use it from python, which is much more likely what you want to do, things like: Get a named cloud. .. code-block:: python import openstack.config cloud_region = openstack.config.OpenStackConfig().get_one( 'internap', region_name='ams01') print(cloud_region.name, cloud_region.region, cloud_region.config) Or, get all of the clouds. .. code-block:: python import openstack.config cloud_regions = openstack.config.OpenStackConfig().get_all() for cloud_region in cloud_regions: print(cloud_region.name, cloud_region.region, cloud_region.config) argparse -------- If you're using `openstack.config` from a program that wants to process command line options, there is a registration function to register the arguments that both `openstack.config` and keystoneauth know how to deal with - as well as a consumption argument. .. code-block:: python import argparse import openstack parser = argparse.ArgumentParser() cloud = openstack.connect(options=parser) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/config/vendor-support.rst0000664000175000017500000002040300000000000024257 0ustar00zuulzuul00000000000000============== Vendor Support ============== OpenStack presents deployers with many options, some of which can expose differences to end users. `os-client-config` tries its best to collect information about various things a user would need to know. The following is a text representation of the vendor related defaults `os-client-config` knows about. Default Values -------------- These are the default behaviors unless a cloud is configured differently. * Identity uses `password` authentication * Identity API Version is 2 * Image API Version is 2 * Volume API Version is 2 * Compute API Version is 2.1 * Images must be in `qcow2` format * Images are uploaded using PUT interface * Public IPv4 is directly routable via DHCP from Neutron * IPv6 is not provided * Floating IPs are not required * Floating IPs are provided by Neutron * Security groups are provided by Neutron * Vendor specific agents are not used AURO ---- https://api.auro.io:5000/v2.0 ============== ================ Region Name Location ============== ================ van1 Vancouver, BC ============== ================ * Public IPv4 is provided via NAT with Neutron Floating IP Betacloud --------- https://api-1.betacloud.de:5000 ============== ================== Region Name Location ============== ================== betacloud-1 Karlsruhe, Germany ============== ================== * Identity API Version is 3 * Images must be in `raw` format * Public IPv4 is provided via NAT with Neutron Floating IP * Volume API Version is 3 Binero ------ https://auth.binero.cloud:5000/v3 ============== ================== Region Name Location ============== ================== europe-se-1 Stockholm, SE ============== ================== * Identity API Version is 3 * Volume API Version is 3 * Public IPv4 is directly routable via DHCP from Neutron * Public IPv4 is provided via NAT with Neutron Floating IP Catalyst -------- https://api.cloud.catalyst.net.nz:5000/v2.0 ============== ================ Region Name Location ============== ================ nz-por-1 Porirua, NZ nz_wlg_2 Wellington, NZ ============== ================ * Identity API Version is 3 * Compute API Version is 2 * Images must be in `raw` format * Volume API Version is 3 City Cloud ---------- https://%(region_name)s.citycloud.com:5000/v3/ ============== ================ Region Name Location ============== ================ Buf1 Buffalo, NY dx1 Dubai, UAE Fra1 Frankfurt, DE Kna1 Karlskrona, SE Lon1 London, UK Sto2 Stockholm, SE tky1 Tokyo, JP ============== ================ * Identity API Version is 3 * Public IPv4 is provided via NAT with Neutron Floating IP * Volume API Version is 1 ConoHa ------ https://identity.%(region_name)s.conoha.io ============== ================ Region Name Location ============== ================ tyo1 Tokyo, JP sin1 Singapore sjc1 San Jose, CA ============== ================ * Image upload is not supported DreamCompute ------------ https://iad2.dream.io:5000 ============== ================ Region Name Location ============== ================ RegionOne Ashburn, VA ============== ================ * Identity API Version is 3 * Images must be in `raw` format * IPv6 is provided to every server Open Telekom Cloud ------------------ https://iam.%(region_name)s.otc.t-systems.com/v3 ============== =================== Region Name Location ============== =================== eu-de Biere/Magdeburg, DE eu-nl Amsterdam, NL ============== =================== * Identity API Version is 3 * Public IPv4 is provided via NAT with Neutron Floating IP ELASTX ------ https://ops.elastx.cloud:5000/v3 ============== ================ Region Name Location ============== ================ se-sto Stockholm, SE ============== ================ * Identity API Version is 3 * Public IPv4 is provided via NAT with Neutron Floating IP Enter Cloud Suite ----------------- https://api.entercloudsuite.com/v2.0 ============== ================ Region Name Location ============== ================ nl-ams1 Amsterdam, NL it-mil1 Milan, IT de-fra1 Frankfurt, DE ============== ================ * Compute API Version is 2 Fuga ---- https://identity.api.fuga.io:5000 ============== ================ Region Name Location ============== ================ cystack Netherlands ============== ================ * Identity API Version is 3 * Volume API Version is 3 Internap -------- https://identity.api.cloud.inap.com/v2.0 ============== ================ Region Name Location ============== ================ ams01 Amsterdam, NL da01 Dallas, TX nyj01 New York, NY sin01 Singapore sjc01 San Jose, CA ============== ================ * Floating IPs are not supported Limestone Networks ------------------ https://auth.cloud.lstn.net:5000/v3 ============== ================== Region Name Location ============== ================== us-dfw-1 Dallas, TX us-slc Salt Lake City, UT ============== ================== * Identity API Version is 3 * Images must be in `raw` format * IPv6 is provided to every server connected to the `Public Internet` network OVH --- https://auth.cloud.ovh.net/v3 ============== ================ Region Name Location ============== ================ BHS1 Beauharnois, QC SBG1 Strassbourg, FR GRA1 Gravelines, FR ============== ================ * Images may be in `raw` format. The `qcow2` default is also supported * Floating IPs are not supported Rackspace --------- https://identity.api.rackspacecloud.com/v2.0/ ============== ================ Region Name Location ============== ================ DFW Dallas, TX HKG Hong Kong IAD Washington, D.C. LON London, UK ORD Chicago, IL SYD Sydney, NSW ============== ================ * Database Service Type is `rax:database` * Compute Service Name is `cloudServersOpenStack` * Images must be in `vhd` format * Images must be uploaded using the Glance Task Interface * Floating IPs are not supported * Public IPv4 is directly routable via static config by Nova * IPv6 is provided to every server * Security groups are not supported * Uploaded Images need properties to not use vendor agent:: :vm_mode: hvm :xenapi_use_agent: False * Block Storage API Version is 2 * The Block Storage API supports version 2 but only version 1 is in the catalog. The Block Storage endpoint is https://{region_name}.blockstorage.api.rackspacecloud.com/v2/{project_id} * While passwords are recommended for use, API keys do work as well. The `rackspaceauth` python package must be installed, and then the following can be added to clouds.yaml:: auth: username: myusername api_key: myapikey auth_type: rackspace_apikey SWITCHengines ------------- https://keystone.cloud.switch.ch:5000/v3 ============== ================ Region Name Location ============== ================ LS Lausanne, CH ZH Zurich, CH ============== ================ * Identity API Version is 3 * Compute API Version is 2 * Images must be in `raw` format * Volume API Version is 3 Ultimum ------- https://console.ultimum-cloud.com:5000/v2.0 ============== ================ Region Name Location ============== ================ RegionOne Prague, CZ ============== ================ * Volume API Version is 1 UnitedStack ----------- https://identity.api.ustack.com/v3 ============== ================ Region Name Location ============== ================ bj1 Beijing, CN gd1 Guangdong, CN ============== ================ * Identity API Version is 3 * Images must be in `raw` format * Volume API Version is 1 VEXXHOST -------- http://auth.vexxhost.net ============== ================ Region Name Location ============== ================ ca-ymq-1 Montreal, QC sjc1 Santa Clara, CA ============== ================ * DNS API Version is 1 * Identity API Version is 3 * Volume API Version is 3 Zetta ----- https://identity.api.zetta.io/v3 ============== ================ Region Name Location ============== ================ no-osl1 Oslo, NO ============== ================ * DNS API Version is 2 * Identity API Version is 3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/connection.rst0000664000175000017500000000101000000000000022133 0ustar00zuulzuul00000000000000Connection ========== .. automodule:: openstack.connection from_config ----------- .. autofunction:: openstack.connection.from_config Connection Object ----------------- .. autoclass:: openstack.connection.Connection :members: :inherited-members: Transitioning from Profile -------------------------- Support exists for users coming from older releases of OpenStack SDK who have been using the :class:`~openstack.profile.Profile` interface. .. toctree:: :maxdepth: 1 transition_from_profile ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/exceptions.rst0000664000175000017500000000065700000000000022175 0ustar00zuulzuul00000000000000Exceptions ========== openstacksdk provides a number of `exceptions`__ for commonly encountered issues, such as missing API endpoints, various HTTP error codes, timeouts and so forth. It is the responsibility of the calling application to handle these exceptions appropriately. Available exceptions -------------------- .. automodule:: openstack.exceptions :members: .. __: https://docs.python.org/3/library/exceptions.html ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1212535 openstacksdk-4.0.0/doc/source/user/guides/0000775000175000017500000000000000000000000020532 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/baremetal.rst0000664000175000017500000000407300000000000023224 0ustar00zuulzuul00000000000000Using OpenStack Baremetal ========================= Before working with the Bare Metal service, you'll need to create a connection to your OpenStack cloud by following the :doc:`connect` user guide. This will provide you with the ``conn`` variable used in the examples below. .. contents:: Table of Contents :local: The primary resource of the Bare Metal service is the **node**. Below are a few usage examples. For a reference to all the available methods, see :doc:`/user/proxies/baremetal`. CRUD operations ~~~~~~~~~~~~~~~ List Nodes ---------- A **node** is a bare metal machine. .. literalinclude:: ../examples/baremetal/list.py :pyobject: list_nodes Full example: `baremetal resource list`_ Provisioning operations ~~~~~~~~~~~~~~~~~~~~~~~ Provisioning actions are the main way to manipulate the nodes. See `Bare Metal service states documentation`_ for details. Manage and inspect Node ----------------------- *Managing* a node in the ``enroll`` provision state validates the management (IPMI, Redfish, etc) credentials and moves the node to the ``manageable`` state. *Managing* a node in the ``available`` state moves it to the ``manageable`` state. In this state additional actions, such as configuring RAID or inspecting, are available. *Inspecting* a node detects its properties by either talking to its BMC or by booting a special ramdisk. .. literalinclude:: ../examples/baremetal/provisioning.py :pyobject: manage_and_inspect_node Full example: `baremetal provisioning`_ Provide Node ------------ *Providing* a node in the ``manageable`` provision state makes it available for deployment. .. literalinclude:: ../examples/baremetal/provisioning.py :pyobject: provide_node Full example: `baremetal provisioning`_ .. _baremetal resource list: http://opendev.org/openstack/openstacksdk/src/branch/master/examples/baremetal/list.py .. _baremetal provisioning: http://opendev.org/openstack/openstacksdk/src/branch/master/examples/baremetal/provisioning.py .. _Bare Metal service states documentation: https://docs.openstack.org/ironic/latest/contributor/states.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/block_storage.rst0000664000175000017500000000050600000000000024103 0ustar00zuulzuul00000000000000Using OpenStack Block Storage ============================= Before working with the Block Storage service, you'll need to create a connection to your OpenStack cloud by following the :doc:`connect` user guide. This will provide you with the ``conn`` variable used in the examples below. .. TODO(thowe): Implement this guide ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1252553 openstacksdk-4.0.0/doc/source/user/guides/clustering/0000775000175000017500000000000000000000000022711 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/clustering/action.rst0000664000175000017500000000267200000000000024727 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================== Working with Actions ==================== An action is an abstraction of some logic that can be executed by a worker thread. Most of the operations supported by Senlin are executed asynchronously, which means they are queued into database and then picked up by certain worker thread for execution. List Actions ~~~~~~~~~~~~ To examine the list of actions: .. literalinclude:: ../../examples/clustering/action.py :pyobject: list_actions When listing actions, you can specify the sorting option using the ``sort`` parameter and you can do pagination using the ``limit`` and ``marker`` parameters. Full example: `manage action`_ Get Action ~~~~~~~~~~ To get a action based on its name or ID: .. literalinclude:: ../../examples/clustering/action.py :pyobject: get_action .. _manage action: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/clustering/action.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/clustering/cluster.rst0000664000175000017500000001112300000000000025122 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================= Managing Clusters ================= Clusters are first-class citizens in Senlin service design. A cluster is defined as a collection of homogeneous objects. The "homogeneous" here means that the objects managed (aka. Nodes) have to be instantiated from the same "profile type". List Clusters ~~~~~~~~~~~~~ To examine the list of receivers: .. literalinclude:: ../../examples/clustering/cluster.py :pyobject: list_cluster When listing clusters, you can specify the sorting option using the ``sort`` parameter and you can do pagination using the ``limit`` and ``marker`` parameters. Full example: `manage cluster`_ Create Cluster ~~~~~~~~~~~~~~ When creating a cluster, you will provide a dictionary with keys and values according to the cluster type referenced. .. literalinclude:: ../../examples/clustering/cluster.py :pyobject: create_cluster Optionally, you can specify a ``metadata`` keyword argument that contains some key-value pairs to be associated with the cluster. Full example: `manage cluster`_ Get Cluster ~~~~~~~~~~~ To get a cluster based on its name or ID: .. literalinclude:: ../../examples/clustering/cluster.py :pyobject: get_cluster Full example: `manage cluster`_ Find Cluster ~~~~~~~~~~~~ To find a cluster based on its name or ID: .. literalinclude:: ../../examples/clustering/cluster.py :pyobject: find_cluster Full example: `manage cluster`_ Update Cluster ~~~~~~~~~~~~~~ After a cluster is created, most of its properties are immutable. Still, you can update a cluster's ``name`` and/or ``params``. .. literalinclude:: ../../examples/clustering/cluster.py :pyobject: update_cluster Full example: `manage cluster`_ Delete Cluster ~~~~~~~~~~~~~~ A cluster can be deleted after creation, When there are nodes in the cluster, the Senlin engine will launch a process to delete all nodes from the cluster and destroy them before deleting the cluster object itself. .. literalinclude:: ../../examples/clustering/cluster.py :pyobject: delete_cluster Add Nodes to Cluster ~~~~~~~~~~~~~~~~~~~~ Add some existing nodes into the specified cluster. .. literalinclude:: ../../examples/clustering/cluster.py :pyobject: add_nodes_to_cluster Remove Nodes from Cluster ~~~~~~~~~~~~~~~~~~~~~~~~~ Remove nodes from specified cluster. .. literalinclude:: ../../examples/clustering/cluster.py :pyobject: remove_nodes_from_cluster Replace Nodes in Cluster ~~~~~~~~~~~~~~~~~~~~~~~~ Replace some existing nodes in the specified cluster. .. literalinclude:: ../../examples/clustering/cluster.py :pyobject: replace_nodes_in_cluster Cluster Scale Out ~~~~~~~~~~~~~~~~~ Inflate the size of a cluster. .. literalinclude:: ../../examples/clustering/cluster.py :pyobject: scale_out_cluster Cluster Scale In ~~~~~~~~~~~~~~~~ Shrink the size of a cluster. .. literalinclude:: ../../examples/clustering/cluster.py :pyobject: scale_out_cluster Cluster Resize ~~~~~~~~~~~~~~ Resize of cluster. .. literalinclude:: ../../examples/clustering/cluster.py :pyobject: resize_cluster Attach Policy to Cluster ~~~~~~~~~~~~~~~~~~~~~~~~ Once a policy is attached (bound) to a cluster, it will be enforced when related actions are performed on that cluster, unless the policy is (temporarily) disabled on the cluster .. literalinclude:: ../../examples/clustering/cluster.py :pyobject: attach_policy_to_cluster Detach Policy from Cluster ~~~~~~~~~~~~~~~~~~~~~~~~~~ Once a policy is attached to a cluster, it can be detached from the cluster at user's request. .. literalinclude:: ../../examples/clustering/cluster.py :pyobject: detach_policy_from_cluster Cluster Check ~~~~~~~~~~~~~ Check cluster health status, Cluster members can be check. .. literalinclude:: ../../examples/clustering/cluster.py :pyobject: check_cluster Cluster Recover ~~~~~~~~~~~~~~~ To restore a specified cluster, members in the cluster will be checked. .. literalinclude:: ../../examples/clustering/cluster.py :pyobject: recover_cluster .. _manage cluster: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/clustering/cluster.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/clustering/event.rst0000664000175000017500000000261700000000000024572 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =================== Working with Events =================== An event is a record generated during engine execution. Such an event captures what has happened inside the senlin-engine. The senlin-engine service generates event records when it is performing some actions or checking policies. List Events ~~~~~~~~~~~ To examine the list of events: .. literalinclude:: ../../examples/clustering/event.py :pyobject: list_events When listing events, you can specify the sorting option using the ``sort`` parameter and you can do pagination using the ``limit`` and ``marker`` parameters. Full example: `manage event`_ Get Event ~~~~~~~~~ To get a event based on its name or ID: .. literalinclude:: ../../examples/clustering/event.py :pyobject: get_event .. _manage event: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/clustering/event.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/clustering/node.rst0000664000175000017500000000550000000000000024370 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============== Managing Nodes ============== Node is a logical object managed by the Senlin service. A node can be a member of at most one cluster at any time. A node can be an orphan node which means it doesn't belong to any clusters. List Nodes ~~~~~~~~~~ To examine the list of Nodes: .. literalinclude:: ../../examples/clustering/node.py :pyobject: list_nodes When listing nodes, you can specify the sorting option using the ``sort`` parameter and you can do pagination using the ``limit`` and ``marker`` parameters. Full example: `manage node`_ Create Node ~~~~~~~~~~~ When creating a node, you will provide a dictionary with keys and values according to the node type referenced. .. literalinclude:: ../../examples/clustering/node.py :pyobject: create_node Optionally, you can specify a ``metadata`` keyword argument that contains some key-value pairs to be associated with the node. Full example: `manage node`_ Get Node ~~~~~~~~ To get a node based on its name or ID: .. literalinclude:: ../../examples/clustering/node.py :pyobject: get_node Full example: `manage node`_ Find Node ~~~~~~~~~ To find a node based on its name or ID: .. literalinclude:: ../../examples/clustering/node.py :pyobject: find_node Full example: `manage node`_ Update Node ~~~~~~~~~~~ After a node is created, most of its properties are immutable. Still, you can update a node's ``name`` and/or ``params``. .. literalinclude:: ../../examples/clustering/node.py :pyobject: update_node Full example: `manage node`_ Delete Node ~~~~~~~~~~~ A node can be deleted after creation, provided that it is not referenced by any active clusters. If you attempt to delete a node that is still in use, you will get an error message. .. literalinclude:: ../../examples/clustering/node.py :pyobject: delete_node Full example: `manage node`_ Check Node ~~~~~~~~~~ If the underlying physical resource is not healthy, the node will be set to ERROR status. .. literalinclude:: ../../examples/clustering/node.py :pyobject: check_node Full example: `manage node`_ Recover Node ~~~~~~~~~~~~ To restore a specified node. .. literalinclude:: ../../examples/clustering/node.py :pyobject: recover_node .. _manage node: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/clustering/node.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/clustering/policy.rst0000664000175000017500000000532000000000000024742 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================= Managing Policies ================= A **policy type** can be treated as the meta-type of a `Policy` object. A registry of policy types is built when the Cluster service starts. When creating a `Policy` object, you will indicate the policy type used in its `spec` property. List Policies ~~~~~~~~~~~~~ To examine the list of policies: .. literalinclude:: ../../examples/clustering/policy.py :pyobject: list_policies When listing policies, you can specify the sorting option using the ``sort`` parameter and you can do pagination using the ``limit`` and ``marker`` parameters. Full example: `manage policy`_ Create Policy ~~~~~~~~~~~~~ When creating a policy, you will provide a dictionary with keys and values according to the policy type referenced. .. literalinclude:: ../../examples/clustering/policy.py :pyobject: create_policy Optionally, you can specify a ``metadata`` keyword argument that contains some key-value pairs to be associated with the policy. Full example: `manage policy`_ Find Policy ~~~~~~~~~~~ To find a policy based on its name or ID: .. literalinclude:: ../../examples/clustering/policy.py :pyobject: find_policy Full example: `manage policy`_ Get Policy ~~~~~~~~~~ To get a policy based on its name or ID: .. literalinclude:: ../../examples/clustering/policy.py :pyobject: get_policy Full example: `manage policy`_ Update Policy ~~~~~~~~~~~~~ After a policy is created, most of its properties are immutable. Still, you can update a policy's ``name`` and/or ``metadata``. .. literalinclude:: ../../examples/clustering/policy.py :pyobject: update_policy The Cluster service doesn't allow updating the ``spec`` of a policy. The only way to achieve that is to create a new policy. Full example: `manage policy`_ Delete Policy ~~~~~~~~~~~~~ A policy can be deleted after creation, provided that it is not referenced by any active clusters or nodes. If you attempt to delete a policy that is still in use, you will get an error message. .. literalinclude:: ../../examples/clustering/policy.py :pyobject: delete_policy .. _manage policy: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/clustering/policy.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/clustering/policy_type.rst0000664000175000017500000000261700000000000026011 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========================= Working with Policy Types ========================= A **policy** is a template that encodes the information needed for specifying the rules that are checked/enforced before/after certain actions are performed on a cluster. The rules are encoded in a property named ``spec``. List Policy Types ~~~~~~~~~~~~~~~~~ To examine the known policy types: .. literalinclude:: ../../examples/clustering/policy_type.py :pyobject: list_policy_types Full example: `manage policy type`_ Get Policy Type ~~~~~~~~~~~~~~~ To retrieve the details about a policy type, you need to provide the name of it. .. literalinclude:: ../../examples/clustering/policy_type.py :pyobject: get_policy_type Full example: `manage policy type`_ .. _manage policy type: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/clustering/policy_type.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/clustering/profile.rst0000664000175000017500000000560600000000000025112 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================= Managing Profiles ================= A **profile type** can be treated as the meta-type of a `Profile` object. A registry of profile types is built when the Cluster service starts. When creating a `Profile` object, you will indicate the profile type used in its `spec` property. List Profiles ~~~~~~~~~~~~~ To examine the list of profiles: .. literalinclude:: ../../examples/clustering/profile.py :pyobject: list_profiles When listing profiles, you can specify the sorting option using the ``sort`` parameter and you can do pagination using the ``limit`` and ``marker`` parameters. Full example: `manage profile`_ Create Profile ~~~~~~~~~~~~~~ When creating a profile, you will provide a dictionary with keys and values specified according to the profile type referenced. .. literalinclude:: ../../examples/clustering/profile.py :pyobject: create_profile Optionally, you can specify a ``metadata`` keyword argument that contains some key-value pairs to be associated with the profile. Full example: `manage profile`_ Find Profile ~~~~~~~~~~~~ To find a profile based on its name or ID: .. literalinclude:: ../../examples/clustering/profile.py :pyobject: find_profile The Cluster service doesn't allow updating the ``spec`` of a profile. The only way to achieve that is to create a new profile. Full example: `manage profile`_ Get Profile ~~~~~~~~~~~ To get a profile based on its name or ID: .. literalinclude:: ../../examples/clustering/profile.py :pyobject: get_profile Full example: `manage profile`_ Update Profile ~~~~~~~~~~~~~~ After a profile is created, most of its properties are immutable. Still, you can update a profile's ``name`` and/or ``metadata``. .. literalinclude:: ../../examples/clustering/profile.py :pyobject: update_profile The Cluster service doesn't allow updating the ``spec`` of a profile. The only way to achieve that is to create a new profile. Full example: `manage profile`_ Delete Profile ~~~~~~~~~~~~~~ A profile can be deleted after creation, provided that it is not referenced by any active clusters or nodes. If you attempt to delete a profile that is still in use, you will get an error message. .. literalinclude:: ../../examples/clustering/profile.py :pyobject: delete_profile .. _manage profile: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/clustering/profile.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/clustering/profile_type.rst0000664000175000017500000000260400000000000026146 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========================== Working with Profile Types ========================== A **profile** is a template used to create and manage nodes, i.e. objects exposed by other OpenStack services. A profile encodes the information needed for node creation in a property named ``spec``. List Profile Types ~~~~~~~~~~~~~~~~~~ To examine the known profile types: .. literalinclude:: ../../examples/clustering/profile_type.py :pyobject: list_profile_types Full example: `manage profile type`_ Get Profile Type ~~~~~~~~~~~~~~~~ To get the details about a profile type, you need to provide the name of it. .. literalinclude:: ../../examples/clustering/profile_type.py :pyobject: get_profile_type Full example: `manage profile type`_ .. _manage profile type: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/clustering/profile_type.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/clustering/receiver.rst0000664000175000017500000000536400000000000025257 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================== Managing Receivers ================== Receivers are the event sinks associated to senlin clusters. When certain events (or alarms) are seen by a monitoring software, the software can notify the senlin clusters of those events (or alarms). When senlin receives those notifications, it can automatically trigger some predefined operations with preset parameter values. List Receivers ~~~~~~~~~~~~~~ To examine the list of receivers: .. literalinclude:: ../../examples/clustering/receiver.py :pyobject: list_receivers When listing receivers, you can specify the sorting option using the ``sort`` parameter and you can do pagination using the ``limit`` and ``marker`` parameters. Full example: `manage receiver`_ Create Receiver ~~~~~~~~~~~~~~~ When creating a receiver, you will provide a dictionary with keys and values according to the receiver type referenced. .. literalinclude:: ../../examples/clustering/receiver.py :pyobject: create_receiver Optionally, you can specify a ``metadata`` keyword argument that contains some key-value pairs to be associated with the receiver. Full example: `manage receiver`_ Get Receiver ~~~~~~~~~~~~ To get a receiver based on its name or ID: .. literalinclude:: ../../examples/clustering/receiver.py :pyobject: get_receiver Full example: `manage receiver`_ Find Receiver ~~~~~~~~~~~~~ To find a receiver based on its name or ID: .. literalinclude:: ../../examples/clustering/receiver.py :pyobject: find_receiver Full example: `manage receiver`_ Update Receiver ~~~~~~~~~~~~~~~ After a receiver is created, most of its properties are immutable. Still, you can update a receiver's ``name`` and/or ``params``. .. literalinclude:: ../../examples/clustering/receiver.py :pyobject: update_receiver Full example: `manage receiver`_ Delete Receiver ~~~~~~~~~~~~~~~ A receiver can be deleted after creation, provided that it is not referenced by any active clusters. If you attempt to delete a receiver that is still in use, you will get an error message. .. literalinclude:: ../../examples/clustering/receiver.py :pyobject: delete_receiver .. _manage receiver: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/clustering/receiver.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/clustering.rst0000664000175000017500000000235100000000000023444 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========================== Using OpenStack Clustering ========================== Before working with the Clustering service, you'll need to create a connection to your OpenStack cloud by following the :doc:`connect` user guide. This will provide you with the ``conn`` variable used by all examples in this guide. The primary abstractions/resources of the Clustering service are: .. toctree:: :maxdepth: 1 Profile Type Profile Cluster Node Policy Type Policy Receiver Action Event ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/compute.rst0000664000175000017500000000513500000000000022744 0ustar00zuulzuul00000000000000Using OpenStack Compute ======================= Before working with the Compute service, you'll need to create a connection to your OpenStack cloud by following the :doc:`connect` user guide. This will provide you with the ``conn`` variable used in the examples below. .. contents:: Table of Contents :local: The primary resource of the Compute service is the server. List Servers ------------ A **server** is a virtual machine that provides access to a compute instance being run by your cloud provider. .. literalinclude:: ../examples/compute/list.py :pyobject: list_servers Full example: `compute resource list`_ List Images ----------- An **image** is the operating system you want to use for your server. .. literalinclude:: ../examples/compute/list.py :pyobject: list_images Full example: `compute resource list`_ List Flavors ------------ A **flavor** is the resource configuration for a server. Each flavor is a unique combination of disk, memory, vCPUs, and network bandwidth. .. literalinclude:: ../examples/compute/list.py :pyobject: list_flavors Full example: `compute resource list`_ List Networks ------------- A **network** provides connectivity to servers. .. literalinclude:: ../examples/network/list.py :pyobject: list_networks Full example: `network resource list`_ Create Key Pair --------------- A **key pair** is the public key and private key of `public–key cryptography`_. They are used to encrypt and decrypt login information when connecting to your server. .. literalinclude:: ../examples/compute/create.py :pyobject: create_keypair Full example: `compute resource create`_ Create Server ------------- At minimum, a server requires a name, an image, a flavor, and a network on creation. You can discover the names and IDs of these attributes by listing them as above and then using the find methods to get the appropriate resources. Ideally you'll also create a server using a keypair so you can login to that server with the private key. Servers take time to boot so we call ``wait_for_server`` to wait for it to become active. .. literalinclude:: ../examples/compute/create.py :pyobject: create_server Full example: `compute resource create`_ .. _compute resource list: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/compute/list.py .. _network resource list: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/network/list.py .. _compute resource create: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/compute/create.py .. _public–key cryptography: https://en.wikipedia.org/wiki/Public-key_cryptography ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/connect.rst0000664000175000017500000000216300000000000022717 0ustar00zuulzuul00000000000000Connect ======= In order to work with an OpenStack cloud you first need to create a :class:`~openstack.connection.Connection` to it using your credentials. A :class:`~openstack.connection.Connection` can be created in 3 ways, using the class itself, :ref:`config-clouds-yaml`, or :ref:`config-environment-variables`. It is recommended to always use :ref:`config-clouds-yaml` as the same config can be used across tools and languages. Create Connection ----------------- To create a :class:`~openstack.connection.Connection` instance, use the :func:`~openstack.connect` factory function. .. literalinclude:: ../examples/connect.py :pyobject: create_connection Full example at `connect.py `_ .. note:: To enable logging, see the :doc:`logging` user guide. Next ---- Now that you can create a connection, continue with the :ref:`user_guides` to work with an OpenStack service. .. TODO(shade) Update the text here and consolidate with the old os-client-config docs so that we have a single and consistent explanation of the envvars cloud, etc. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/connect_from_config.rst0000664000175000017500000000431700000000000025272 0ustar00zuulzuul00000000000000Connect From Config =================== In order to work with an OpenStack cloud you first need to create a :class:`~openstack.connection.Connection` to it using your credentials. A :class:`~openstack.connection.Connection` can be created in 3 ways, using the class itself (see :doc:`connect`), a file, or environment variables as illustrated below. The SDK uses `os-client-config `_ to handle the configuration. Create Connection From A File ----------------------------- Default Location **************** To create a connection from a file you need a YAML file to contain the configuration. .. literalinclude:: ../../contributor/clouds.yaml :language: yaml To use a configuration file called ``clouds.yaml`` in one of the default locations: * Current Directory * ~/.config/openstack * /etc/openstack call :py:func:`~openstack.connection.from_config`. The ``from_config`` function takes three optional arguments: * **cloud_name** allows you to specify a cloud from your ``clouds.yaml`` file. * **cloud_config** allows you to pass in an existing ``openstack.config.loader.OpenStackConfig``` object. * **options** allows you to specify a namespace object with options to be added to the cloud config. .. literalinclude:: ../examples/connect.py :pyobject: Opts .. literalinclude:: ../examples/connect.py :pyobject: create_connection_from_config .. literalinclude:: ../examples/connect.py :pyobject: create_connection_from_args .. note:: To enable logging, set ``debug=True`` in the ``options`` object. User Defined Location ********************* To use a configuration file in a user defined location set the environment variable ``OS_CLIENT_CONFIG_FILE`` to the absolute path of a file.:: export OS_CLIENT_CONFIG_FILE=/path/to/my/config/my-clouds.yaml and call :py:func:`~openstack.connection.from_config` with the **cloud_name** of the cloud configuration to use, . .. Create Connection From Environment Variables -------------------------------------------- TODO(etoews): Document when https://storyboard.openstack.org/#!/story/1489617 is fixed. Next ---- Now that you can create a connection, continue with the :ref:`user_guides` for an OpenStack service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/database.rst0000664000175000017500000000046700000000000023037 0ustar00zuulzuul00000000000000Using OpenStack Database ======================== Before working with the Database service, you'll need to create a connection to your OpenStack cloud by following the :doc:`connect` user guide. This will provide you with the ``conn`` variable used in the examples below. .. TODO(thowe): Implement this guide ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/dns.rst0000664000175000017500000000102300000000000022044 0ustar00zuulzuul00000000000000Using OpenStack DNS =================== Before working with the DNS service, you'll need to create a connection to your OpenStack cloud by following the :doc:`connect` user guide. This will provide you with the ``conn`` variable used in the examples below. .. TODO(gtema): Implement this guide List Zones ---------- .. literalinclude:: ../examples/dns/list.py :pyobject: list_zones Full example: `dns resource list`_ .. _dns resource list: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/dns/list.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/identity.rst0000664000175000017500000000777100000000000023131 0ustar00zuulzuul00000000000000Using OpenStack Identity ======================== Before working with the Identity service, you'll need to create a connection to your OpenStack cloud by following the :doc:`connect` user guide. This will provide you with the ``conn`` variable used in the examples below. The OpenStack Identity service is the default identity management system for OpenStack. The Identity service authentication process confirms the identity of a user and an incoming request by validating a set of credentials that the user supplies. Initially, these credentials are a user name and password or a user name and API key. When the Identity service validates user credentials, it issues an authentication token that the user provides in subsequent requests. An authentication token is an alpha-numeric text string that enables access to OpenStack APIs and resources. A token may be revoked at any time and is valid for a finite duration. List Users ---------- A **user** is a digital representation of a person, system, or service that uses OpenStack cloud services. The Identity service validates that incoming requests are made by the user who claims to be making the call. Users have a login and can access resources by using assigned tokens. Users can be directly assigned to a particular project and behave as if they are contained in that project. .. literalinclude:: ../examples/identity/list.py :pyobject: list_users Full example: `identity resource list`_ List Credentials ---------------- **Credentials** are data that confirms the identity of the user. For example, user name and password, user name and API key, or an authentication token that the Identity service provides. .. literalinclude:: ../examples/identity/list.py :pyobject: list_credentials Full example: `identity resource list`_ List Projects ------------- A **project** is a container that groups or isolates resources or identity objects. .. literalinclude:: ../examples/identity/list.py :pyobject: list_projects Full example: `identity resource list`_ List Domains ------------ A **domain** is an Identity service API v3 entity and represents a collection of projects and users that defines administrative boundaries for the management of Identity entities. Users can be granted the administrator role for a domain. A domain administrator can create projects, users, and groups in a domain and assign roles to users and groups in a domain. .. literalinclude:: ../examples/identity/list.py :pyobject: list_domains Full example: `identity resource list`_ List Groups ----------- A **group** is an Identity service API v3 entity and represents a collection of users that are owned by a domain. A group role granted to a domain or project applies to all users in the group. Adding users to, or removing users from, a group respectively grants, or revokes, their role and authentication to the associated domain or project. .. literalinclude:: ../examples/identity/list.py :pyobject: list_groups Full example: `identity resource list`_ List Services ------------- A **service** is an OpenStack service, such as Compute, Object Storage, or Image service, that provides one or more endpoints through which users can access resources and perform operations. .. literalinclude:: ../examples/identity/list.py :pyobject: list_services Full example: `identity resource list`_ List Endpoints -------------- An **endpoint** is a network-accessible address, usually a URL, through which you can access a service. .. literalinclude:: ../examples/identity/list.py :pyobject: list_endpoints Full example: `identity resource list`_ List Regions ------------ A **region** is an Identity service API v3 entity and represents a general division in an OpenStack deployment. You can associate zero or more sub-regions with a region to make a tree-like structured hierarchy. .. literalinclude:: ../examples/identity/list.py :pyobject: list_regions Full example: `identity resource list`_ .. _identity resource list: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/identity/list.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/image.rst0000664000175000017500000000702300000000000022350 0ustar00zuulzuul00000000000000Using OpenStack Image ===================== Before working with the Image service, you'll need to create a connection to your OpenStack cloud by following the :doc:`connect` user guide. This will provide you with the ``conn`` variable used in the examples below. The primary resource of the Image service is the image. List Images ----------- An **image** is a collection of files for a specific operating system that you use to create or rebuild a server. OpenStack provides `pre-built images `_. You can also create custom images, or snapshots, from servers that you have launched. Images come in different formats and are sometimes called virtual machine images. .. literalinclude:: ../examples/image/list.py :pyobject: list_images Full example: `image resource list`_ Create Image ------------ Create an image by uploading its data and setting its attributes. .. literalinclude:: ../examples/image/create.py :pyobject: upload_image Full example: `image resource create`_ Create Image via interoperable image import process --------------------------------------------------- Create an image then use interoperable image import process to download data from a web URL. For more information about the image import process, please check `interoperable image import`_ .. literalinclude:: ../examples/image/import.py :pyobject: import_image Full example: `image resource import`_ .. _download_image-stream-true: Downloading an Image with stream=True ------------------------------------- As images are often very large pieces of data, storing their entire contents in the memory of your application can be less than desirable. A more efficient method may be to iterate over a stream of the response data. By choosing to stream the response content, you determine the ``chunk_size`` that is appropriate for your needs, meaning only that many bytes of data are read for each iteration of the loop until all data has been consumed. See :meth:`requests.Response.iter_content` for more information. When you choose to stream an image download, openstacksdk is no longer able to compute the checksum of the response data for you. This example shows how you might do that yourself, in a very similar manner to how the library calculates checksums for non-streamed responses. .. literalinclude:: ../examples/image/download.py :pyobject: download_image_stream Downloading an Image with stream=False -------------------------------------- If you wish to download an image's contents all at once and to memory, simply set ``stream=False``, which is the default. .. literalinclude:: ../examples/image/download.py :pyobject: download_image Full example: `image resource download`_ Delete Image ------------ Delete an image. .. literalinclude:: ../examples/image/delete.py :pyobject: delete_image Full example: `image resource delete`_ .. _image resource create: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/image/create.py .. _image resource import: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/image/import.py .. _image resource delete: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/image/delete.py .. _image resource list: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/image/list.py .. _image resource download: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/image/download.py .. _interoperable image import: https://docs.openstack.org/glance/latest/admin/interoperable-image-import.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/intro.rst0000664000175000017500000000571200000000000022424 0ustar00zuulzuul00000000000000=============== Getting started =============== openstacksdk aims to talk to any OpenStack cloud. To do this, it requires a configuration file. openstacksdk favours ``clouds.yaml`` files, but can also use environment variables. The ``clouds.yaml`` file should be provided by your cloud provider or deployment tooling. An example: .. code-block:: yaml clouds: mordred: region_name: Dallas auth: username: 'mordred' password: XXXXXXX project_name: 'demo' auth_url: 'https://identity.example.com' More information on configuring openstacksdk can be found in :doc:`/user/config/configuration`. Given sufficient configuration, you can use openstacksdk to interact with your cloud. openstacksdk consists of three layers. Most users will make use of the *proxy* layer. Using the above ``clouds.yaml``, consider listing servers: .. code-block:: python import openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # List the servers for server in conn.compute.servers(): print(server.to_dict()) openstacksdk also contains a higher-level *cloud* layer based on logical operations: .. code-block:: python import openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # List the servers for server in conn.list_servers(): print(server.to_dict()) The benefit of this layer is mostly seen in more complicated operations that take multiple steps and where the steps vary across providers. For example: .. code-block:: python import openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # Upload an image to the cloud image = conn.create_image( 'ubuntu-trusty', filename='ubuntu-trusty.qcow2', wait=True) # Find a flavor with at least 512M of RAM flavor = conn.get_flavor_by_ram(512) # Boot a server, wait for it to boot, and then do whatever is needed # to get a public IP address for it. conn.create_server( 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True) Finally, there is the low-level *resource* layer. This provides support for the basic CRUD operations supported by REST APIs and is the base building block for the other layers. You typically will not need to use this directly: .. code-block:: python import openstack import openstack.config.loader import openstack.compute.v2.server # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # List the servers for server in openstack.compute.v2.server.Server.list(session=conn.compute): print(server.to_dict()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/key_manager.rst0000664000175000017500000000377000000000000023555 0ustar00zuulzuul00000000000000Using OpenStack Key Manager =========================== Before working with the Key Manager service, you'll need to create a connection to your OpenStack cloud by following the :doc:`connect` user guide. This will provide you with the ``conn`` variable used in the examples below. .. contents:: Table of Contents :local: .. note:: Some interactions with the Key Manager service differ from that of other services in that resources do not have a proper ``id`` parameter, which is necessary to make some calls. Instead, resources have a separately named id attribute, e.g., the Secret resource has ``secret_id``. The examples below outline when to pass in those id values. Create a Secret --------------- The Key Manager service allows you to create new secrets by passing the attributes of the :class:`~openstack.key_manager.v1.secret.Secret` to the :meth:`~openstack.key_manager.v1._proxy.Proxy.create_secret` method. .. literalinclude:: ../examples/key_manager/create.py :pyobject: create_secret List Secrets ------------ Once you have stored some secrets, they are available for you to list via the :meth:`~openstack.key_manager.v1._proxy.Proxy.secrets` method. This method returns a generator, which yields each :class:`~openstack.key_manager.v1.secret.Secret`. .. literalinclude:: ../examples/key_manager/list.py :pyobject: list_secrets The :meth:`~openstack.key_manager.v1._proxy.Proxy.secrets` method can also make more advanced queries to limit the secrets that are returned. .. literalinclude:: ../examples/key_manager/list.py :pyobject: list_secrets_query Get Secret Payload ------------------ Once you have received a :class:`~openstack.key_manager.v1.secret.Secret`, you can obtain the payload for it by passing the secret's id value to the :meth:`~openstack.key_manager.v1._proxy.Proxy.secrets` method. Use the :data:`~openstack.key_manager.v1.secret.Secret.secret_id` attribute when making this request. .. literalinclude:: ../examples/key_manager/get.py :pyobject: get_secret_payload ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/logging.rst0000664000175000017500000000716400000000000022722 0ustar00zuulzuul00000000000000======= Logging ======= .. note:: TODO(shade) This document is written from a shade POV. It needs to be combined with the existing logging guide, but also the logging systems need to be rationalized. `openstacksdk` uses `Python Logging`_. As `openstacksdk` is a library, it does not configure logging handlers automatically, expecting instead for that to be the purview of the consuming application. Simple Usage ------------ For consumers who just want to get a basic logging setup without thinking about it too deeply, there is a helper method. If used, it should be called before any other openstacksdk functionality. .. autofunction:: openstack.enable_logging .. code-block:: python import openstack openstack.enable_logging() The ``stream`` parameter controls the stream where log message are written to. It defaults to `sys.stdout` which will result in log messages being written to STDOUT. It can be set to another output stream, or to ``None`` to disable logging to the console. The ``path`` parameter sets up logging to log to a file. By default, if ``path`` is given and ``stream`` is not, logging will only go to ``path``. You can combine the ``path`` and ``stream`` parameters to log to both places simultaneously. To log messages to a file called ``openstack.log`` and the console on ``stdout``: .. code-block:: python import sys import openstack openstack.enable_logging( debug=True, path='openstack.log', stream=sys.stdout) `openstack.enable_logging` also sets up a few other loggers and squelches some warnings or log messages that are otherwise uninteresting or unactionable by an openstacksdk user. Advanced Usage -------------- `openstacksdk` logs to a set of different named loggers. Most of the logging is set up to log to the root ``openstack`` logger. There are additional sub-loggers that are used at times, primarily so that a user can decide to turn on or off a specific type of logging. They are listed below. openstack.config Issues pertaining to configuration are logged to the ``openstack.config`` logger. openstack.iterate_timeout When `openstacksdk` needs to poll a resource, it does so in a loop that waits between iterations and ultimately times out. The ``openstack.iterate_timeout`` logger emits messages for each iteration indicating it is waiting and for how long. These can be useful to see for long running tasks so that one can know things are not stuck, but can also be noisy. openstack.fnmatch `openstacksdk` will try to use `fnmatch`_ on given `name_or_id` arguments. It's a best effort attempt, so pattern misses are logged to ``openstack.fnmatch``. A user may not be intending to use an fnmatch pattern - such as if they are trying to find an image named ``Fedora 24 [official]``, so these messages are logged separately. .. _fnmatch: https://pymotw.com/2/fnmatch/ HTTP Tracing ------------ HTTP Interactions are handled by `keystoneauth`_. If you want to enable HTTP tracing while using openstacksdk and are not using `openstack.enable_logging`, set the log level of the ``keystoneauth`` logger to ``DEBUG``. For more information see https://docs.openstack.org/keystoneauth/latest/using-sessions.html#logging .. _keystoneauth: https://docs.openstack.org/keystoneauth/latest/ Python Logging -------------- Python logging is a standard feature of Python and is documented fully in the Python Documentation, which varies by version of Python. For more information on Python Logging for Python v2, see https://docs.python.org/2/library/logging.html. For more information on Python Logging for Python v3, see https://docs.python.org/3/library/logging.html. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/message.rst0000664000175000017500000000047200000000000022713 0ustar00zuulzuul00000000000000Using OpenStack Message ======================= Before working with the Message service, you'll need to create a connection to your OpenStack cloud by following the :doc:`connect` user guide. This will provide you with the ``conn`` variable used in the examples below. .. TODO(briancurtin): Implement this guide ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/network.rst0000664000175000017500000001101500000000000022753 0ustar00zuulzuul00000000000000Using OpenStack Network ======================= Before working with the Network service, you'll need to create a connection to your OpenStack cloud by following the :doc:`connect` user guide. This will provide you with the ``conn`` variable used in the examples below. .. contents:: Table of Contents :local: The primary resource of the Network service is the network. List Networks ------------- A **network** is an isolated `Layer 2 `_ networking segment. There are two types of networks, project and provider networks. Project networks are fully isolated and are not shared with other projects. Provider networks map to existing physical networks in the data center and provide external network access for servers. Only an OpenStack administrator can create provider networks. Networks can be connected via routers. .. literalinclude:: ../examples/network/list.py :pyobject: list_networks Full example: `network resource list`_ List Subnets ------------ A **subnet** is a block of IP addresses and associated configuration state. Subnets are used to allocate IP addresses when new ports are created on a network. .. literalinclude:: ../examples/network/list.py :pyobject: list_subnets Full example: `network resource list`_ List Ports ---------- A **port** is a connection point for attaching a single device, such as the `NIC `_ of a server, to a network. The port also describes the associated network configuration, such as the `MAC `_ and IP addresses to be used on that port. .. literalinclude:: ../examples/network/list.py :pyobject: list_ports Full example: `network resource list`_ List Security Groups -------------------- A **security group** acts as a virtual firewall for servers. It is a container for security group rules which specify the type of network traffic and direction that is allowed to pass through a port. .. literalinclude:: ../examples/network/list.py :pyobject: list_security_groups Full example: `network resource list`_ List Routers ------------ A **router** is a logical component that forwards data packets between networks. It also provides `Layer 3 `_ and `NAT `_ forwarding to provide external network access for servers on project networks. .. literalinclude:: ../examples/network/list.py :pyobject: list_routers Full example: `network resource list`_ List Network Agents ------------------- A **network agent** is a plugin that handles various tasks used to implement virtual networks. These agents include neutron-dhcp-agent, neutron-l3-agent, neutron-metering-agent, and neutron-lbaas-agent, among others. .. literalinclude:: ../examples/network/list.py :pyobject: list_network_agents Full example: `network resource list`_ Create Network -------------- Create a project network and subnet. This network can be used when creating a server and allows the server to communicate with others servers on the same project network. .. literalinclude:: ../examples/network/create.py :pyobject: create_network Full example: `network resource create`_ Open a Port ----------- When creating a security group for a network, you will need to open certain ports to allow communication via them. For example, you may need to enable HTTPS access on port 443. .. literalinclude:: ../examples/network/security_group_rules.py :pyobject: open_port Full example: `network security group create`_ Accept Pings ------------ In order to ping a machine on your network within a security group, you will need to create a rule to allow inbound ICMP packets. .. literalinclude:: ../examples/network/security_group_rules.py :pyobject: allow_ping Full example: `network security group create`_ Delete Network -------------- Delete a project network and its subnets. .. literalinclude:: ../examples/network/delete.py :pyobject: delete_network Full example: `network resource delete`_ .. _network resource create: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/network/create.py .. _network resource delete: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/network/delete.py .. _network resource list: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/network/list.py .. _network security group create: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/network/security_group_rules.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/object_store.rst0000664000175000017500000002042200000000000023746 0ustar00zuulzuul00000000000000Using OpenStack Object Store ============================ Before working with the Object Store service, you'll need to create a connection to your OpenStack cloud by following the :doc:`connect` user guide. This will provide you with the ``conn`` variable used in the examples below. .. contents:: Table of Contents :local: The primary resources of the Object Store service are containers and objects. Working with Containers ----------------------- Listing Containers ****************** To list existing containers, use the :meth:`~openstack.object_store.v1._proxy.Proxy.containers` method. :: >>> for cont in conn.object_store.containers(): ... print cont ... openstack.object_store.v1.container.Container: {u'count': 5, u'bytes': 500, u'name': u'my container'} openstack.object_store.v1.container.Container: {u'count': 0, u'bytes': 0, u'name': u'empty container'} openstack.object_store.v1.container.Container: {u'count': 100, u'bytes': 1000000, u'name': u'another container'} The ``containers`` method returns a generator which yields :class:`~openstack.object_store.v1.container.Container` objects. It handles pagination for you, which can be adjusted via the ``limit`` argument. By default, the ``containers`` method will yield as many containers as the service will return, and it will continue requesting until it receives no more. :: >>> for cont in conn.object_store.containers(limit=500): ... print(cont) ... <500 Containers> ... another request transparently made to the Object Store service <500 more Containers> ... Creating Containers ******************* To create a container, use the :meth:`~openstack.object_store.v1._proxy.Proxy.create_container` method. :: >>> cont = conn.object_store.create_container(name="new container") >>> cont openstack.object_store.v1.container.Container: {'name': u'new container'} Working with Container Metadata ******************************* To get the metadata for a container, use the :meth:`~openstack.object_store.v1._proxy.Proxy.get_container_metadata` method. This method either takes the name of a container, or a :class:`~openstack.object_store.v1.container.Container` object, and it returns a `Container` object with all of its metadata attributes set. :: >>> cont = conn.object_store.get_container_metadata("new container") openstack.object_store.v1.container.Container: {'content-length': '0', 'x-container-object-count': '0', 'name': u'new container', 'accept-ranges': 'bytes', 'x-trans-id': 'tx22c5de63466e4c05bb104-0054740c39', 'date': 'Tue, 25 Nov 2014 04:57:29 GMT', 'x-timestamp': '1416889793.23520', 'x-container-read': '.r:mysite.com', 'x-container-bytes-used': '0', 'content-type': 'text/plain; charset=utf-8'} To set the metadata for a container, use the :meth:`~openstack.object_store.v1._proxy.Proxy.set_container_metadata` method. This method takes a :class:`~openstack.object_store.v1.container.Container` object. For example, to grant another user write access to this container, you can set the :attr:`~openstack.object_store.v1.container.Container.write_ACL` on a resource and pass it to `set_container_metadata`. :: >>> cont.write_ACL = "big_project:another_user" >>> conn.object_store.set_container_metadata(cont) openstack.object_store.v1.container.Container: {'content-length': '0', 'x-container-object-count': '0', 'name': u'my new container', 'accept-ranges': 'bytes', 'x-trans-id': 'txc3ee751f971d41de9e9f4-0054740ec1', 'date': 'Tue, 25 Nov 2014 05:08:17 GMT', 'x-timestamp': '1416889793.23520', 'x-container-read': '.r:mysite.com', 'x-container-bytes-used': '0', 'content-type': 'text/plain; charset=utf-8', 'x-container-write': 'big_project:another_user'} Working with Objects -------------------- Objects are held in containers. From an API standpoint, you work with them using similarly named methods, typically with an additional argument to specify their container. Listing Objects *************** To list the objects that exist in a container, use the :meth:`~openstack.object_store.v1._proxy.Proxy.objects` method. If you have a :class:`~openstack.object_store.v1.container.Container` object, you can pass it to ``objects``. :: >>> print cont.name pictures >>> for obj in conn.object_store.objects(cont): ... print obj ... openstack.object_store.v1.container.Object: {u'hash': u'0522d4ccdf9956badcb15c4087a0c4cb', u'name': u'pictures/selfie.jpg', u'bytes': 15744, 'last-modified': u'2014-10-31T06:33:36.618640', u'last_modified': u'2014-10-31T06:33:36.618640', u'content_type': u'image/jpeg', 'container': u'pictures', 'content-type': u'image/jpeg'} ... Similar to the :meth:`~openstack.object_store.v1._proxy.Proxy.containers` method, ``objects`` returns a generator which yields :class:`~openstack.object_store.v1.obj.Object` objects stored in the container. It also handles pagination for you, which you can adjust with the ``limit`` parameter, otherwise making each request for the maximum that your Object Store will return. If you have the name of a container instead of an object, you can also pass that to the ``objects`` method. :: >>> for obj in conn.object_store.objects("pictures".decode("utf8"), limit=100): ... print obj ... <100 Objects> ... another request transparently made to the Object Store service <100 more Objects> Getting Object Data ******************* Once you have an :class:`~openstack.object_store.v1.obj.Object`, you get the data stored inside of it with the :meth:`~openstack.object_store.v1._proxy.Proxy.get_object_data` method. :: >>> print ob.name message.txt >>> data = conn.object_store.get_object_data(ob) >>> print data Hello, world! Additionally, if you want to save the object to disk, the :meth:`~openstack.object_store.v1._proxy.Proxy.download_object` convenience method takes an :class:`~openstack.object_store.v1.obj.Object` and a ``path`` to write the contents to. :: >>> conn.object_store.download_object(ob, "the_message.txt") Uploading Objects ***************** Once you have data you'd like to store in the Object Store service, you use the :meth:`~openstack.object_store.v1._proxy.Proxy.upload_object` method. This method takes the ``data`` to be stored, along with at least an object ``name`` and the ``container`` it is to be stored in. :: >>> hello = conn.object_store.upload_object(container="messages", name="helloworld.txt", data="Hello, world!") >>> print hello openstack.object_store.v1.container.Object: {'content-length': '0', 'container': u'messages', 'name': u'helloworld.txt', 'last-modified': 'Tue, 25 Nov 2014 17:39:29 GMT', 'etag': '5eb63bbbe01eeed093cb22bb8f5acdc3', 'x-trans-id': 'tx3035d41b03334aeaaf3dd-005474bed0', 'date': 'Tue, 25 Nov 2014 17:39:28 GMT', 'content-type': 'text/html; charset=UTF-8'} Working with Object Metadata **************************** Working with metadata on objects is identical to how it's done with containers. You use the :meth:`~openstack.object_store.v1._proxy.Proxy.get_object_metadata` and :meth:`~openstack.object_store.v1._proxy.Proxy.set_object_metadata` methods. The metadata attributes to be set can be found on the :class:`~openstack.object_store.v1.obj.Object` object. :: >>> secret.delete_after = 300 >>> secret = conn.object_store.set_object_metadata(secret) We set the :attr:`~openstack.object_store.obj.Object.delete_after` value to 500 seconds, causing the object to be deleted in 300 seconds, or five minutes. That attribute corresponds to the ``X-Delete-After`` header value, which you can see is returned when we retrieve the updated metadata. :: >>> conn.object_store.get_object_metadata(ob) openstack.object_store.v1.container.Object: {'content-length': '11', 'container': u'Secret Container', 'name': u'selfdestruct.txt', 'x-delete-after': 300, 'accept-ranges': 'bytes', 'last-modified': 'Tue, 25 Nov 2014 17:50:45 GMT', 'etag': '5eb63bbbe01eeed093cb22bb8f5acdc3', 'x-timestamp': '1416937844.36805', 'x-trans-id': 'tx5c3fd94adf7c4e1b8f334-005474c17b', 'date': 'Tue, 25 Nov 2014 17:50:51 GMT', 'content-type': 'text/plain'} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/orchestration.rst0000664000175000017500000000050600000000000024151 0ustar00zuulzuul00000000000000Using OpenStack Orchestration ============================= Before working with the Orchestration service, you'll need to create a connection to your OpenStack cloud by following the :doc:`connect` user guide. This will provide you with the ``conn`` variable used in the examples below. .. TODO(thowe): Implement this guide ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/shared_file_system.rst0000664000175000017500000001164100000000000025140 0ustar00zuulzuul00000000000000Using OpenStack Shared File Systems =================================== Before working with the Shared File System service, you'll need to create a connection to your OpenStack cloud by following the :doc:`connect` user guide. This will provide you with the ``conn`` variable used in the examples below. .. contents:: Table of Contents :local: List Availability Zones ----------------------- A Shared File System service **availability zone** is a failure domain for your shared file systems. You may create a shared file system (referred to simply as **shares**) in a given availability zone, and create replicas of the share in other availability zones. .. literalinclude:: ../examples/shared_file_system/availability_zones.py :pyobject: list_availability_zones Share Instances --------------- Administrators can list, show information for, explicitly set the state of, and force-delete share instances. .. literalinclude:: ../examples/shared_file_system/share_instances.py :pyobject: share_instances Get Share Instance ------------------ Shows details for a single share instance. .. literalinclude:: ../examples/shared_file_system/share_instances.py :pyobject: get_share_instance Reset Share Instance Status --------------------------- Explicitly updates the state of a share instance. .. literalinclude:: ../examples/shared_file_system/share_instances.py :pyobject: reset_share_instance_status Delete Share Instance --------------------- Force-deletes a share instance. .. literalinclude:: ../examples/shared_file_system/share_instances.py :pyobject: delete_share_instance Resize Share ------------ Shared File System shares can be resized (extended or shrunk) to a given size. For details on resizing shares, refer to the `Manila docs `_. .. literalinclude:: ../examples/shared_file_system/shares.py :pyobject: resize_share .. literalinclude:: ../examples/shared_file_system/shares.py :pyobject: resize_shares_without_shrink List Share Group Snapshots -------------------------- A share group snapshot is a point-in-time, read-only copy of the data that is contained in a share group. You can list all share group snapshots .. literalinclude:: ../examples/shared_file_system/share_group_snapshots.py :pyobject: list_share_group_snapshots Get Share Group Snapshot ------------------------ Show share group snapshot details .. literalinclude:: ../examples/shared_file_system/share_group_snapshots.py :pyobject: get_share_group_snapshot List Share Group Snapshot Members --------------------------------- Lists all share group snapshots members. .. literalinclude:: ../examples/shared_file_system/share_group_snapshots.py :pyobject: share_group_snapshot_members Create Share Group Snapshot --------------------------- Creates a snapshot from a share group. .. literalinclude:: ../examples/shared_file_system/share_group_snapshots.py :pyobject: create_share_group_snapshot Reset Share Group Snapshot --------------------------- Reset share group snapshot state. .. literalinclude:: ../examples/shared_file_system/share_group_snapshots.py :pyobject: reset_share_group_snapshot_status Update Share Group Snapshot --------------------------- Updates a share group snapshot. .. literalinclude:: ../examples/shared_file_system/share_group_snapshots.py :pyobject: update_share_group_snapshot Delete Share Group Snapshot --------------------------- Deletes a share group snapshot. .. literalinclude:: ../examples/shared_file_system/share_group_snapshots.py :pyobject: delete_share_group_snapshot List Share Metadata -------------------- Lists all metadata for a given share. .. literalinclude:: ../examples/shared_file_system/share_metadata.py :pyobject: list_share_metadata Get Share Metadata Item ----------------------- Retrieves a specific metadata item from a shares metadata by its key. .. literalinclude:: ../examples/shared_file_system/share_metadata.py :pyobject: get_share_metadata_item Create Share Metadata ---------------------- Creates share metadata. .. literalinclude:: ../examples/shared_file_system/share_metadata.py :pyobject: create_share_metadata Update Share Metadata ---------------------- Updates metadata of a given share. .. literalinclude:: ../examples/shared_file_system/share_metadata.py :pyobject: update_share_metadata Delete Share Metadata ---------------------- Deletes a specific metadata item from a shares metadata by its key. Can specify multiple keys to be deleted. .. literalinclude:: ../examples/shared_file_system/share_metadata.py :pyobject: delete_share_metadata Manage Share ------------ Manage a share with Manila. .. literalinclude:: ../examples/shared_file_system/shares.py :pyobject: manage_share Unmanage Share -------------- Unmanage a share from Manila. .. literalinclude:: ../examples/shared_file_system/shares.py :pyobject: unmanage_share ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/guides/stats.rst0000664000175000017500000000467700000000000022440 0ustar00zuulzuul00000000000000==================== Statistics reporting ==================== `openstacksdk` can report statistics on individual API requests/responses in several different formats. Note that metrics will be reported only when corresponding client libraries (`statsd` for 'statsd' reporting, `influxdb` for influxdb, etc.). If libraries are not available reporting will be silently ignored. statsd ------ `statsd` can be configured via configuration entries or environment variables. A global `metrics` entry defines defaults for all clouds. Each cloud can specify a `metrics` section to override variables; this may be useful to separate results reported for each cloud. .. code-block:: yaml metrics: statsd: host: __statsd_server_host__ port: __statsd_server_port__ prefix: __statsd_prefix__ (default 'openstack.api') clouds: a-cloud: auth: ... metrics: statsd: prefix: 'openstack.api.a-cloud' If the `STATSD_HOST` or `STATSD_PORT` environment variables are set, they will be taken as the default values (and enable `statsd` reporting if no other configuration is specified). InfluxDB -------- `InfluxDB `__ is supported via configuration in the `metrics` field. Similar to `statsd`, each cloud can provide it's own `metrics` section to override any global defaults. .. code-block:: yaml metrics: influxdb: host: __influxdb_server_host__ port: __influxdb_server_port__ use_udp: __True|False__ username: __influxdb_auth_username__ password: __influxdb_auth_password__ database: __influxdb_db_name__ measurement: __influxdb_measurement_name__ timeout: __infludb_requests_timeout__ clouds: .. InfluxDB reporting allows setting additional tags into the metrics based on the selected cloud. .. code-block:: yaml clouds: my_cloud: profile: some_profile ... additional_metric_tags: environment: production prometheus ---------- .. NOTE(ianw) 2021-04-19 : examples here would be great; this is just terse description taken from https://review.opendev.org/c/openstack/openstacksdk/+/614834 The prometheus support does not read from config, and does not run an http service since OpenstackSDK is a library. It is expected that an application that uses OpenstackSDK and wants request stats be collected will pass a `prometheus_client.CollectorRegistry` to `collector_registry`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/index.rst0000664000175000017500000001370000000000000021114 0ustar00zuulzuul00000000000000Using the OpenStack SDK ======================= This section of documentation pertains to those who wish to use this SDK in their own application. If you're looking for documentation on how to contribute to or extend the SDK, refer to the `contributor <../contributor>`_ section. For a listing of terms used throughout the SDK, including the names of projects and services supported by it, see the :doc:`glossary <../glossary>`. .. _user_guides: User Guides ----------- These guides walk you through how to make use of the libraries we provide to work with each OpenStack service. If you're looking for a cookbook approach, this is where you'll want to begin. .. toctree:: :maxdepth: 1 Introduction Configuration Connect to an OpenStack Cloud Connect to an OpenStack Cloud Using a Config File Logging Statistics reporting Microversions Baremetal Block Storage Clustering Compute Database DNS Identity Image Key Manager Message Network Object Store Orchestration Shared File System Testing ------- The SDK provides a number of utilities to help you test your applications. .. toctree:: :maxdepth: 1 testing/index API Documentation ----------------- Service APIs are exposed through a two-layered approach. The classes exposed through our *Connection* interface are the place to start if you're an application developer consuming an OpenStack cloud. The *Resource* interface is the layer upon which the *Connection* is built, with *Connection* methods accepting and returning *Resource* objects. The Cloud Abstraction layer has a data model. .. toctree:: :maxdepth: 1 model Connection Interface ~~~~~~~~~~~~~~~~~~~~ A *Connection* instance maintains your cloud config, session and authentication information providing you with a set of higher-level interfaces to work with OpenStack services. .. toctree:: :maxdepth: 1 connection Once you have a *Connection* instance, services are accessed through instances of :class:`~openstack.proxy.Proxy` or subclasses of it that exist as attributes on the :class:`~openstack.connection.Connection`. .. _service-proxies: Service Proxies ~~~~~~~~~~~~~~~ The following service proxies exist on the :class:`~openstack.connection.Connection`. The service proxies are all always present on the :class:`~openstack.connection.Connection` object, but the combination of your ``CloudRegion`` and the catalog of the cloud in question control which services can be used. .. toctree:: :maxdepth: 1 Accelerator Baremetal Baremetal Introspection Block Storage v2 Block Storage v3 Clustering Compute Container Infrastructure Management Database DNS Identity v2 Identity v3 Image v1 Image v2 Key Manager Load Balancer Message v2 Network Object Store Orchestration Placement Shared File System Workflow Resource Interface ~~~~~~~~~~~~~~~~~~ The *Resource* layer is a lower-level interface to communicate with OpenStack services. While the classes exposed by the *Connection* build a convenience layer on top of this, *Resources* can be used directly. However, the most common usage of this layer is in receiving an object from a class in the *Connection* layer, modifying it, and sending it back into the *Connection* layer, such as to update a resource on the server. The following services have exposed *Resource* classes. .. toctree:: :maxdepth: 1 Accelerator Baremetal Baremetal Introspection Block Storage Clustering Compute Container Infrastructure Management Database DNS Identity Image Key Management Load Balancer Network Orchestration Object Store Placement Shared File System Workflow Low-Level Classes ~~~~~~~~~~~~~~~~~ The following classes are not commonly used by application developers, but are used to construct applications to talk to OpenStack APIs. Typically these parts are managed through the `Connection Interface`_, but their use can be customized. .. toctree:: :maxdepth: 1 resource service_description utils Errors and warnings ~~~~~~~~~~~~~~~~~~~ The SDK attempts to provide detailed errors and warnings for things like failed requests, deprecated APIs, and invalid configurations. Application developers are responsible for handling these errors and can opt into warnings to ensure their applications stay up-to-date. .. toctree:: :maxdepth: 1 exceptions warnings Presentations ------------- .. toctree:: :maxdepth: 1 multi-cloud-demo ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/microversions.rst0000664000175000017500000001240200000000000022705 0ustar00zuulzuul00000000000000Microversions ============= As openstacksdk rolls out support for consuming microversions, it will do so on a call by call basis as needed. Just like with major versions, openstacksdk should have logic to handle each microversion for a given REST call it makes, with the following rules in mind: * If an activity openstack performs can be done differently or more efficiently with a new microversion, the support should be added to openstack.cloud and to the appropriate Proxy class. * openstacksdk should always attempt to use the latest microversion it is aware of for a given call, unless a microversion removes important data. * Microversion selection should under no circumstances be exposed to the user in python API calls in the Resource layer or the openstack.cloud layer. * Microversion selection is exposed to the user in the REST layer via the ``microversion`` argument to each REST call. * A user of the REST layer may set the default microversion by setting ``{service_type}_default_microversion`` in clouds.yaml or ``OS_{service_type|upper}_DEFAULT_MICROVERSION`` environment variable. .. note:: Setting the default microversion in any circumstance other than when using the REST layer is highly discouraged. Both of the higher layers in openstacksdk provide data normalization as well as logic about which REST call to make. Setting the default microversion could change the behavior of the service in question in such a way that openstacksdk does not understand. If there is a feature of a service that needs a microversion and it is not already transparently exposed in openstacksdk, please file a bug. * If a feature is only exposed for a given microversion and cannot be simulated for older clouds without that microversion, it is ok to add it, but a clear error message should be given to the user that the given feature is not available on their cloud. (A message such as "This cloud supports a maximum microversion of XXX for service YYY and this feature only exists on clouds with microversion ZZZ. Please contact your cloud provider for information about when this feature might be available") * When adding a feature that only exists behind a new microversion, every effort should be made to figure out how to provide the same functionality if at all possible, even if doing so is inefficient. If an inefficient workaround is employed, a warning should be provided to the user. (the user's workaround to skip the inefficient behavior would be to stop using that openstacksdk API call) An example of this is the nova "get me a network" feature. The logic of "get me a network" can be done client-side, albeit less efficiently. Adding support for the "get me a network" feature via nova microversion should also add support for doing the client-side workaround. * If openstacksdk is aware of logic for more than one microversion, it should always attempt to use the latest version available for the service for that call. * Objects returned from openstacksdk should always go through normalization and thus should always conform to openstacksdk's documented data model. The objects should never look different to the user regardless of the microversion used for the REST call. * If a microversion adds new fields to an object, those fields should be added to openstacksdk's data model contract for that object and the data should either be filled in by performing additional REST calls if the data is available that way, or the field should have a default value of None which the user can be expected to test for when attempting to use the new value. * If a microversion removes fields from an object that are part of the existing data model contract, care should be taken to not use the new microversion for that call unless forced to by lack of availablity of the old microversion on the cloud in question. In the case where an old microversion is no longer available, care must be taken to either find the data from another source and fill it in, or to put a value of None into the field and document for the user that on some clouds the value may not exist. * If a microversion removes a field and the outcome is particularly intractable and impossible to work around without fundamentally breaking users, an issue should be raised with the service team in question. Hopefully a resolution can be found during the period while clouds still have the old microversion. * As new calls or objects are added, it is important to check in with the service team in question on the expected stability of the object. If there are known changes expected in the future, even if they may be a few years off, openstacksdk should take care to not add committments to its data model for those fields/features. It is ok for openstacksdk to not have something. .. note:: openstacksdk does not currently have any sort of "experimental" opt-in API that would allow exposing things to a user that may not be supportable under the normal compatibility contract. If a conflict arises in the future where there is a strong desire for a feature but also a lack of certainty about its stability over time, an experimental API may want to be explored ... but concrete use cases should arise before such a thing is started. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/model.rst0000664000175000017500000000566600000000000021121 0ustar00zuulzuul00000000000000Data Model ========== *openstacksdk* has a very strict policy on not breaking backwards compatibility ever. However, with the data structures returned from OpenStack, there are places where the resource structures from OpenStack are returned to the user somewhat directly, leaving an openstacksdk user open to changes/differences in result content. To combat that, openstacksdk 'normalizes' the return structure from OpenStack in many places, and the results of that normalization are listed below. Where openstacksdk performs normalization, a user can count on any fields declared in the docs as being completely safe to use - they are as much a part of openstacksdk's API contract as any other Python method. Some OpenStack objects allow for arbitrary attributes at the root of the object. openstacksdk will pass those through so as not to break anyone who may be counting on them, but as they are arbitrary openstacksdk can make no guarantees as to their existence. As part of normalization, openstacksdk will put any attribute from an OpenStack resource that is not in its data model contract into an attribute called 'properties'. The contents of properties are defined to be an arbitrary collection of key value pairs with no promises as to any particular key ever existing. If a user passes ``strict=True`` to the openstacksdk constructor, openstacksdk will not pass through arbitrary objects to the root of the resource, and will instead only put them in the properties dict. If a user is worried about accidentally writing code that depends on an attribute that is not part of the API contract, this can be a useful tool. Keep in mind all data can still be accessed via the properties dict, but any code touching anything in the properties dict should be aware that the keys found there are highly user/cloud specific. Any key that is transformed as part of the openstacksdk data model contract will not wind up with an entry in properties - only keys that are unknown. The ``location`` field ---------------------- A Location defines where a resource lives. It includes a cloud name and a region name, an availability zone as well as information about the project that owns the resource. The project information may contain a project ID, or a combination of one or more of a project name with a domain name or ID. If a project ID is present, it should be considered correct. Some resources do not carry ownership information with them. For those, the project information will be filled in from the project the user currently has a token for. Some resources do not have information about availability zones, or may exist region wide. Those resources will have None as their availability zone. .. code-block:: python Location = dict( cloud=str(), region_name=str(), zone=str() or None, project=dict( id=str() or None, name=str() or None, domain_id=str() or None, domain_name=str() or None, ) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/multi-cloud-demo.rst0000664000175000017500000005172200000000000023173 0ustar00zuulzuul00000000000000Multi-Cloud Demo ================ This document contains a presentation in `presentty`_ format. If you want to walk through it like a presentation, install `presentty` and run: .. code:: bash presentty doc/source/user/multi-cloud-demo.rst The content is hopefully helpful even if it's not being narrated, so it's being included in the openstacksdk docs. .. _presentty: https://pypi.org/project/presentty Who am I? --------- Monty Taylor * OpenStack Infra Core * irc: mordred * twitter: @e_monty What are we going to talk about? -------------------------------- `OpenStackSDK` * a task and end-user oriented Python library * abstracts deployment differences * designed for multi-cloud * simple to use * massive scale * optional advanced features to handle 20k servers a day * Initial logic/design extracted from nodepool * Librified to re-use in Ansible OpenStackSDK is Free Software ----------------------------- * https://opendev.org/openstack/openstacksdk * openstack-discuss@lists.openstack.org * #openstack-sdks on oftc This talk is Free Software, too ------------------------------- * Written for presentty (https://pypi.org/project/presentty) * doc/source/user/multi-cloud-demo.rst * examples in examples/cloud * Paths subject to change - this is the first presentation in tree! Complete Example ---------------- .. code:: python from openstack import cloud as openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) for cloud_name, region_name in [ ('my-vexxhost', 'ca-ymq-1'), ('my-citycloud', 'Buf1'), ('my-internap', 'ams01'), ]: # Initialize cloud cloud = openstack.connect(cloud=cloud_name, region_name=region_name) # Upload an image to the cloud image = cloud.create_image( 'devuan-jessie', filename='devuan-jessie.qcow2', wait=True, ) # Find a flavor with at least 512M of RAM flavor = cloud.get_flavor_by_ram(512) # Boot a server, wait for it to boot, and then do whatever is needed # to get a public ip for it. cloud.create_server( 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True, ) Let's Take a Few Steps Back --------------------------- Multi-cloud is easy, but you need to know a few things. * Terminology * Config * OpenStackSDK API Cloud Terminology ----------------- Let's define a few terms, so that we can use them with ease: * `cloud` - logically related collection of services * `region` - completely independent subset of a given cloud * `patron` - human who has an account * `user` - account on a cloud * `project` - logical collection of cloud resources * `domain` - collection of users and projects Cloud Terminology Relationships ------------------------------- * A `cloud` has one or more `regions` * A `patron` has one or more `users` * A `patron` has one or more `projects` * A `cloud` has one or more `domains` * In a `cloud` with one `domain` it is named "default" * Each `patron` may have their own `domain` * Each `user` is in one `domain` * Each `project` is in one `domain` * A `user` has one or more `roles` on one or more `projects` HTTP Sessions ------------- * HTTP interactions are authenticated via keystone * Authenticating returns a `token` * An authenticated HTTP Session is shared across a `region` Cloud Regions ------------- A `cloud region` is the basic unit of REST interaction. * A `cloud` has a `service catalog` * The `service catalog` is returned in the `token` * The `service catalog` lists `endpoint` for each `service` in each `region` * A `region` is completely autonomous Users, Projects and Domains --------------------------- In clouds with multiple domains, project and user names are only unique within a region. * Names require `domain` information for uniqueness. IDs do not. * Providing `domain` information when not needed is fine. * `project_name` requires `project_domain_name` or `project_domain_id` * `project_id` does not * `username` requires `user_domain_name` or `user_domain_id` * `user_id` does not Confused Yet? ------------- Don't worry - you don't have to deal with most of that. Auth per cloud, select per region --------------------------------- In general, the thing you need to know is: * Configure authentication per `cloud` * Select config to use by `cloud` and `region` clouds.yaml ----------- Information about the clouds you want to connect to is stored in a file called `clouds.yaml`. `clouds.yaml` can be in your homedir: `~/.config/openstack/clouds.yaml` or system-wide: `/etc/openstack/clouds.yaml`. Information in your homedir, if it exists, takes precedence. Full docs on `clouds.yaml` are at https://docs.openstack.org/os-client-config/latest/ What about Mac and Windows? --------------------------- `USER_CONFIG_DIR` is different on Linux, OSX and Windows. * Linux: `~/.config/openstack` * OSX: `~/Library/Application Support/openstack` * Windows: `C:\\Users\\USERNAME\\AppData\\Local\\OpenStack\\openstack` `SITE_CONFIG_DIR` is different on Linux, OSX and Windows. * Linux: `/etc/openstack` * OSX: `/Library/Application Support/openstack` * Windows: `C:\\ProgramData\\OpenStack\\openstack` Config Terminology ------------------ For multi-cloud, think of two types: * `profile` - Facts about the `cloud` that are true for everyone * `cloud` - Information specific to a given `user` Apologies for the use of `cloud` twice. Environment Variables and Simple Usage -------------------------------------- * Environment variables starting with `OS_` go into a cloud called `envvars` * If you only have one cloud, you don't have to specify it * `OS_CLOUD` and `OS_REGION_NAME` are default values for `cloud` and `region_name` TOO MUCH TALKING - NOT ENOUGH CODE ---------------------------------- basic clouds.yaml for the example code -------------------------------------- Simple example of a clouds.yaml * Config for a named `cloud` "my-citycloud" * Reference a well-known "named" profile: `citycloud` * `os-client-config` has a built-in list of profiles at https://docs.openstack.org/openstacksdk/latest/user/config/vendor-support.html * Vendor profiles contain various advanced config * `cloud` name can match `profile` name (using different names for clarity) .. code:: yaml clouds: my-citycloud: profile: citycloud auth: username: mordred project_id: 65222a4d09ea4c68934fa1028c77f394 user_domain_id: d0919bd5e8d74e49adf0e145807ffc38 project_domain_id: d0919bd5e8d74e49adf0e145807ffc38 Where's the password? secure.yaml ----------- * Optional additional file just like `clouds.yaml` * Values overlaid on `clouds.yaml` * Useful if you want to protect secrets more stringently Example secure.yaml ------------------- * No, my password isn't XXXXXXXX * `cloud` name should match `clouds.yaml` * Optional - I actually keep mine in my `clouds.yaml` .. code:: yaml clouds: my-citycloud: auth: password: XXXXXXXX more clouds.yaml ---------------- More information can be provided. * Use v3 of the `identity` API - even if others are present * Use `https://image-ca-ymq-1.vexxhost.net/v2` for `image` API instead of what's in the catalog .. code:: yaml my-vexxhost: identity_api_version: 3 image_endpoint_override: https://image-ca-ymq-1.vexxhost.net/v2 profile: vexxhost auth: user_domain_id: default project_domain_id: default project_name: d8af8a8f-a573-48e6-898a-af333b970a2d username: 0b8c435b-cc4d-4e05-8a47-a2ada0539af1 Much more complex clouds.yaml example ------------------------------------- * Not using a profile - all settings included * In the `ams01` `region` there are two networks with undiscoverable qualities * Each one are labeled here so choices can be made * Any of the settings can be specific to a `region` if needed * `region` settings override `cloud` settings * `cloud` does not support `floating-ips` .. code:: yaml my-internap: auth: auth_url: https://identity.api.cloud.inap.com username: api-55f9a00fb2619 project_name: inap-17037 identity_api_version: 3 floating_ip_source: None regions: - name: ams01 values: networks: - name: inap-17037-WAN1654 routes_externally: true default_interface: true - name: inap-17037-LAN3631 routes_externally: false Complete Example Again ---------------------- .. code:: python from openstack import cloud as openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) for cloud_name, region_name in [ ('my-vexxhost', 'ca-ymq-1'), ('my-citycloud', 'Buf1'), ('my-internap', 'ams01')]: # Initialize cloud cloud = openstack.connect(cloud=cloud_name, region_name=region_name) # Upload an image to the cloud image = cloud.create_image( 'devuan-jessie', filename='devuan-jessie.qcow2', wait=True) # Find a flavor with at least 512M of RAM flavor = cloud.get_flavor_by_ram(512) # Boot a server, wait for it to boot, and then do whatever is needed # to get a public ip for it. cloud.create_server( 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True) Step By Step ------------ Import the library ------------------ .. code:: python from openstack import cloud as openstack Logging ------- * `openstacksdk` uses standard python logging * ``openstack.enable_logging`` does easy defaults * Squelches some meaningless warnings * `debug` * Logs openstacksdk loggers at debug level * `http_debug` Implies `debug`, turns on HTTP tracing .. code:: python # Initialize and turn on debug logging openstack.enable_logging(debug=True) Example with Debug Logging -------------------------- * examples/cloud/debug-logging.py .. code:: python from openstack import cloud as openstack openstack.enable_logging(debug=True) cloud = openstack.connect(cloud='my-vexxhost', region_name='ca-ymq-1') cloud.get_image('Ubuntu 16.04.1 LTS [2017-03-03]') Example with HTTP Debug Logging ------------------------------- * examples/cloud/http-debug-logging.py .. code:: python from openstack import cloud as openstack openstack.enable_logging(http_debug=True) cloud = openstack.connect( cloud='my-vexxhost', region_name='ca-ymq-1') cloud.get_image('Ubuntu 16.04.1 LTS [2017-03-03]') Cloud Regions ------------- * `cloud` constructor needs `cloud` and `region_name` * `openstack.connect` is a helper factory function .. code:: python for cloud_name, region_name in [ ('my-vexxhost', 'ca-ymq-1'), ('my-citycloud', 'Buf1'), ('my-internap', 'ams01') ]: # Initialize cloud cloud = openstack.connect(cloud=cloud_name, region_name=region_name) Upload an Image --------------- * Picks the correct upload mechanism * **SUGGESTION** Always upload your own base images .. code:: python # Upload an image to the cloud image = cloud.create_image( 'devuan-jessie', filename='devuan-jessie.qcow2', wait=True, ) Always Upload an Image ---------------------- Ok. You don't have to. But, for multi-cloud... * Images with same content are named different on different clouds * Images with same name on different clouds can have different content * Upload your own to all clouds, both problems go away * Download from OS vendor or build with `diskimage-builder` Find a flavor ------------- * Flavors are all named differently on clouds * Flavors can be found via RAM * `get_flavor_by_ram` finds the smallest matching flavor .. code:: python # Find a flavor with at least 512M of RAM flavor = cloud.get_flavor_by_ram(512) Create a server --------------- * my-vexxhost * Boot server * Wait for `status==ACTIVE` * my-internap * Boot server on network `inap-17037-WAN1654` * Wait for `status==ACTIVE` * my-citycloud * Boot server * Wait for `status==ACTIVE` * Find the `port` for the `fixed_ip` for `server` * Create `floating-ip` on that `port` * Wait for `floating-ip` to attach .. code:: python # Boot a server, wait for it to boot, and then do whatever is needed # to get a public ip for it. cloud.create_server( 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True) Wow. We didn't even deploy Wordpress! ------------------------------------- Image and Flavor by Name or ID ------------------------------ * Pass string to image/flavor * Image/Flavor will be found by name or ID * Common pattern * examples/cloud/create-server-name-or-id.py .. code:: python from openstack import cloud as openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) for cloud_name, region_name, image, flavor in [ ('my-vexxhost', 'ca-ymq-1', 'Ubuntu 16.04.1 LTS [2017-03-03]', 'v1-standard-4'), ('my-citycloud', 'Buf1', 'Ubuntu 16.04 Xenial Xerus', '4C-4GB-100GB'), ('my-internap', 'ams01', 'Ubuntu 16.04 LTS (Xenial Xerus)', 'A1.4')]: # Initialize cloud cloud = openstack.connect(cloud=cloud_name, region_name=region_name) # Boot a server, wait for it to boot, and then do whatever is needed # to get a public ip for it. server = cloud.create_server( 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True) print(server.name) print(server['name']) cloud.pprint(server) # Delete it - this is a demo cloud.delete_server(server, wait=True, delete_ips=True) Delete Servers -------------- * `delete_ips` Delete any `floating_ips` the server may have .. code:: python cloud.delete_server('my-server', wait=True, delete_ips=True) Image and Flavor by Dict ------------------------ * Pass dict to image/flavor * If you know if the value is Name or ID * Common pattern * examples/cloud/create-server-dict.py .. code:: python from openstack import cloud as openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) for cloud_name, region_name, image, flavor_id in [ ('my-vexxhost', 'ca-ymq-1', 'Ubuntu 16.04.1 LTS [2017-03-03]', '5cf64088-893b-46b5-9bb1-ee020277635d'), ('my-citycloud', 'Buf1', 'Ubuntu 16.04 Xenial Xerus', '0dab10b5-42a2-438e-be7b-505741a7ffcc'), ('my-internap', 'ams01', 'Ubuntu 16.04 LTS (Xenial Xerus)', 'A1.4')]: # Initialize cloud cloud = openstack.connect(cloud=cloud_name, region_name=region_name) # Boot a server, wait for it to boot, and then do whatever is needed # to get a public ip for it. server = cloud.create_server( 'my-server', image=image, flavor=dict(id=flavor_id), wait=True, auto_ip=True) # Delete it - this is a demo cloud.delete_server(server, wait=True, delete_ips=True) Munch Objects ------------- * Behave like a dict and an object * examples/cloud/munch-dict-object.py .. code:: python from openstack import cloud as openstack openstack.enable_logging(debug=True) cloud = openstack.connect(cloud='zetta', region_name='no-osl1') image = cloud.get_image('Ubuntu 14.04 (AMD64) [Local Storage]') print(image.name) print(image['name']) API Organized by Logical Resource --------------------------------- * list_servers * search_servers * get_server * create_server * delete_server * update_server For other things, it's still {verb}_{noun} * attach_volume * wait_for_server * add_auto_ip Cleanup Script -------------- * Sometimes my examples had bugs * examples/cloud/cleanup-servers.py .. code:: python from openstack import cloud as openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) for cloud_name, region_name in [ ('my-vexxhost', 'ca-ymq-1'), ('my-citycloud', 'Buf1'), ('my-internap', 'ams01')]: # Initialize cloud cloud = openstack.connect(cloud=cloud_name, region_name=region_name) for server in cloud.search_servers('my-server'): cloud.delete_server(server, wait=True, delete_ips=True) Normalization ------------- * examples/cloud/normalization.py .. code:: python from openstack import cloud as openstack openstack.enable_logging() cloud = openstack.connect(cloud='fuga', region_name='cystack') image = cloud.get_image( 'Ubuntu 16.04 LTS - Xenial Xerus - 64-bit - Fuga Cloud Based Image') cloud.pprint(image) Strict Normalized Results ------------------------- * Return only the declared model * examples/cloud/strict-mode.py .. code:: python from openstack import cloud as openstack openstack.enable_logging() cloud = openstack.connect( cloud='fuga', region_name='cystack', strict=True) image = cloud.get_image( 'Ubuntu 16.04 LTS - Xenial Xerus - 64-bit - Fuga Cloud Based Image') cloud.pprint(image) How Did I Find the Image Name for the Last Example? --------------------------------------------------- * I often make stupid little utility scripts * examples/cloud/find-an-image.py .. code:: python from openstack import cloud as openstack openstack.enable_logging() cloud = openstack.connect(cloud='fuga', region_name='cystack') cloud.pprint([ image for image in cloud.list_images() if 'ubuntu' in image.name.lower()]) Added / Modified Information ---------------------------- * Servers need more extra help * Fetch addresses dict from neutron * Figure out which IPs are good * `detailed` - defaults to True, add everything * `bare` - no extra calls - don't even fix broken things * `bare` is still normalized * examples/cloud/server-information.py .. code:: python from openstack import cloud as openstack openstack.enable_logging(debug=True) cloud = openstack.connect(cloud='my-citycloud', region_name='Buf1') try: server = cloud.create_server( 'my-server', image='Ubuntu 16.04 Xenial Xerus', flavor=dict(id='0dab10b5-42a2-438e-be7b-505741a7ffcc'), wait=True, auto_ip=True) print("\n\nFull Server\n\n") cloud.pprint(server) print("\n\nTurn Detailed Off\n\n") cloud.pprint(cloud.get_server('my-server', detailed=False)) print("\n\nBare Server\n\n") cloud.pprint(cloud.get_server('my-server', bare=True)) finally: # Delete it - this is a demo cloud.delete_server(server, wait=True, delete_ips=True) Exceptions ---------- * All openstacksdk exceptions are subclasses of `OpenStackCloudException` * Direct REST calls throw `OpenStackCloudHTTPError` * `OpenStackCloudHTTPError` subclasses `OpenStackCloudException` and `requests.exceptions.HTTPError` * `OpenStackCloudURINotFound` for 404 * `OpenStackCloudBadRequest` for 400 User Agent Info --------------- * Set `app_name` and `app_version` for User Agents * (ssh ... `region_name` is optional if the cloud has one region) * examples/cloud/user-agent.py .. code:: python from openstack import cloud as openstack openstack.enable_logging(http_debug=True) cloud = openstack.connect( cloud='datacentred', app_name='AmazingApp', app_version='1.0', ) cloud.list_networks() Uploading Large Objects ----------------------- * swift has a maximum object size * Large Objects are uploaded specially * openstacksdk figures this out and does it * multi-threaded * examples/cloud/upload-object.py .. code:: python from openstack import cloud as openstack openstack.enable_logging(debug=True) cloud = openstack.connect(cloud='ovh', region_name='SBG1') cloud.create_object( container='my-container', name='my-object', filename='/home/mordred/briarcliff.sh3d', ) cloud.delete_object('my-container', 'my-object') cloud.delete_container('my-container') Uploading Large Objects ----------------------- * Default max_file_size is 5G * This is a conference demo * Let's force a segment_size * One MILLION bytes * examples/cloud/upload-object.py .. code:: python from openstack import cloud as openstack openstack.enable_logging(debug=True) cloud = openstack.connect(cloud='ovh', region_name='SBG1') cloud.create_object( container='my-container', name='my-object', filename='/home/mordred/briarcliff.sh3d', segment_size=1000000, ) cloud.delete_object('my-container', 'my-object') cloud.delete_container('my-container') Service Conditionals -------------------- .. code:: python from openstack import cloud as openstack openstack.enable_logging(debug=True) cloud = openstack.connect(cloud='kiss', region_name='region1') print(cloud.has_service('network')) print(cloud.has_service('container-orchestration')) Service Conditional Overrides ----------------------------- * Sometimes clouds are weird and figuring that out won't work .. code:: python from openstack import cloud as openstack openstack.enable_logging(debug=True) cloud = openstack.connect(cloud='rax', region_name='DFW') print(cloud.has_service('network')) .. code:: yaml clouds: rax: profile: rackspace auth: username: mordred project_id: 245018 # This is already in profile: rackspace has_network: false FIN --- ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1292572 openstacksdk-4.0.0/doc/source/user/proxies/0000775000175000017500000000000000000000000020743 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/accelerator.rst0000664000175000017500000000221700000000000023763 0ustar00zuulzuul00000000000000Accelerator API =============== .. automodule:: openstack.accelerator.v2._proxy The Accelerator Class --------------------- The accelerator high-level interface is available through the ``accelerator`` member of a :class:`~openstack.connection.Connection` object. The ``accelerator`` member will only be added if the service is detected. Device Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.accelerator.v2._proxy.Proxy :noindex: :members: devices, get_device Deployable Operations ^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.accelerator.v2._proxy.Proxy :noindex: :members: deployables, get_deployable, update_deployable Device Profile Operations ^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.accelerator.v2._proxy.Proxy :noindex: :members: device_profiles, get_device_profile, create_device_profile, delete_device_profile Accelerator Request Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.accelerator.v2._proxy.Proxy :noindex: :members: accelerator_requests, get_accelerator_request, create_accelerator_request, delete_accelerator_request, update_accelerator_request ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/baremetal.rst0000664000175000017500000000674000000000000023440 0ustar00zuulzuul00000000000000Baremetal API ============= For details on how to use baremetal, see :doc:`/user/guides/baremetal` .. automodule:: openstack.baremetal.v1._proxy The Baremetal Class ------------------- The baremetal high-level interface is available through the ``baremetal`` member of a :class:`~openstack.connection.Connection` object. The ``baremetal`` member will only be added if the service is detected. Node Operations ^^^^^^^^^^^^^^^ .. autoclass:: openstack.baremetal.v1._proxy.Proxy :noindex: :members: nodes, create_node, find_node, get_node, update_node, patch_node, delete_node, set_node_provision_state, get_node_boot_device, set_node_boot_device, get_node_supported_boot_devices, set_node_boot_mode, set_node_secure_boot, inject_nmi_to_node, wait_for_nodes_provision_state, set_node_power_state, wait_for_node_power_state, wait_for_node_reservation, validate_node, set_node_maintenance, unset_node_maintenance, delete_node, list_node_vendor_passthru, get_node_console, enable_node_console, disable_node_console Node Trait Operations ^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.baremetal.v1._proxy.Proxy :noindex: :members: add_node_trait, remove_node_trait, set_node_traits Port Operations ^^^^^^^^^^^^^^^ .. autoclass:: openstack.baremetal.v1._proxy.Proxy :noindex: :members: ports, find_port, get_port, create_port, update_port, delete_port, patch_port Port Group Operations ^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.baremetal.v1._proxy.Proxy :noindex: :members: port_groups, find_port_group, get_port_group, create_port_group, update_port_group, delete_port_group, patch_port_group Driver Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.baremetal.v1._proxy.Proxy :noindex: :members: drivers, get_driver Chassis Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.baremetal.v1._proxy.Proxy :noindex: :members: chassis, find_chassis, get_chassis, create_chassis, update_chassis, patch_chassis, delete_chassis VIF Operations ^^^^^^^^^^^^^^ .. autoclass:: openstack.baremetal.v1._proxy.Proxy :noindex: :members: list_node_vifs, attach_vif_to_node, detach_vif_from_node Allocation Operations ^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.baremetal.v1._proxy.Proxy :noindex: :members: allocations, get_allocation, create_allocation, update_allocation, patch_allocation, delete_allocation, wait_for_allocation Volume Connector Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.baremetal.v1._proxy.Proxy :noindex: :members: volume_connectors, find_volume_connector, get_volume_connector, create_volume_connector, update_volume_connector, patch_volume_connector, delete_volume_connector Volume Target Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.baremetal.v1._proxy.Proxy :noindex: :members: volume_targets, find_volume_target, get_volume_target, create_volume_target, update_volume_target, patch_volume_target, delete_volume_target Deploy Template Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.baremetal.v1._proxy.Proxy :noindex: :members: deploy_templates, get_deploy_template, create_deploy_template, update_deploy_template, patch_deploy_template, delete_deploy_template Utilities --------- Building config drives ^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: openstack.baremetal.configdrive :noindex: :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/baremetal_introspection.rst0000664000175000017500000000133200000000000026410 0ustar00zuulzuul00000000000000Baremetal Introspection API =========================== .. automodule:: openstack.baremetal_introspection.v1._proxy The Baremetal Introspection Proxy --------------------------------- The baremetal introspection high-level interface is available through the ``baremetal_introspection`` member of a :class:`~openstack.connection.Connection` object. The ``baremetal_introspection`` member will only be added if the service is detected. Introspection Process Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.baremetal_introspection.v1._proxy.Proxy :noindex: :members: introspections, get_introspection, get_introspection_data, start_introspection, wait_for_introspection, abort_introspection ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/block_storage_v2.rst0000664000175000017500000000455500000000000024733 0ustar00zuulzuul00000000000000Block Storage API ================= For details on how to use block_storage, see :doc:`/user/guides/block_storage` .. automodule:: openstack.block_storage.v2._proxy The BlockStorage Class ---------------------- The block_storage high-level interface is available through the ``block_storage`` member of a :class:`~openstack.connection.Connection` object. The ``block_storage`` member will only be added if the service is detected. Volume Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v2._proxy.Proxy :noindex: :members: create_volume, delete_volume, get_volume, find_volume, volumes, get_volume_metadata, set_volume_metadata, delete_volume_metadata, extend_volume, retype_volume, set_volume_bootable_status, reset_volume_status, set_volume_image_metadata, delete_volume_image_metadata, attach_volume, detach_volume, unmanage_volume, migrate_volume, complete_volume_migration Backup Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v2._proxy.Proxy :noindex: :members: create_backup, delete_backup, get_backup, backups, restore_backup Capabilities Operations ^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v2._proxy.Proxy :noindex: :members: get_capabilities Limits Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v2._proxy.Proxy :noindex: :members: get_limits Type Operations ^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v2._proxy.Proxy :noindex: :members: create_type, delete_type, get_type, types Snapshot Operations ^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v2._proxy.Proxy :noindex: :members: create_snapshot, delete_snapshot, get_snapshot, snapshots Stats Operations ^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v2._proxy.Proxy :noindex: :members: backend_pools QuotaClassSet Operations ^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v2._proxy.Proxy :noindex: :members: get_quota_class_set, update_quota_class_set QuotaSet Operations ^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v2._proxy.Proxy :noindex: :members: get_quota_set, get_quota_set_defaults, revert_quota_set, update_quota_set Helpers ^^^^^^^ .. autoclass:: openstack.block_storage.v2._proxy.Proxy :noindex: :members: wait_for_status, wait_for_delete ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/block_storage_v3.rst0000664000175000017500000001233600000000000024730 0ustar00zuulzuul00000000000000Block Storage API ================= For details on how to use block_storage, see :doc:`/user/guides/block_storage` .. automodule:: openstack.block_storage.v3._proxy The BlockStorage Class ---------------------- The block_storage high-level interface is available through the ``block_storage`` member of a :class:`~openstack.connection.Connection` object. The ``block_storage`` member will only be added if the service is detected. Volume Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: create_volume, delete_volume, update_volume, get_volume, find_volume, volumes, get_volume_metadata, set_volume_metadata, delete_volume_metadata, extend_volume, set_volume_readonly, retype_volume, set_volume_bootable_status, reset_volume_status, set_volume_image_metadata, delete_volume_image_metadata, revert_volume_to_snapshot, attach_volume, detach_volume, unmanage_volume, migrate_volume, complete_volume_migration, upload_volume_to_image, reserve_volume, unreserve_volume, begin_volume_detaching, abort_volume_detaching, init_volume_attachment, terminate_volume_attachment, manage_volume, Backend Pools Operations ^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: backend_pools Backup Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: create_backup, delete_backup, get_backup, find_backup, backups, restore_backup, reset_backup, Availability Zone Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: availability_zones Limits Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: get_limits Capabilities Operations ^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: get_capabilities Group Operations ^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: create_group, create_group_from_source, delete_group, update_group, get_group, find_group, groups, reset_group_state Group Snapshot Operations ^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: create_group_snapshot, delete_group_snapshot, get_group_snapshot, find_group_snapshot, group_snapshots, reset_group_snapshot_state Group Type Operations ^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: create_group_type, delete_group_type, update_group_type, get_group_type, find_group_type, group_types, fetch_group_type_group_specs, create_group_type_group_specs, get_group_type_group_specs_property, update_group_type_group_specs_property, delete_group_type_group_specs_property Service Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: find_service, services, enable_service, disable_service, thaw_service, freeze_service, failover_service Type Operations ^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: create_type, delete_type, update_type, get_type, find_type, types, update_type_extra_specs, delete_type_extra_specs, get_type_access, add_type_access, remove_type_access, get_type_encryption, create_type_encryption, delete_type_encryption, update_type_encryption Snapshot Operations ^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: create_snapshot, delete_snapshot, update_snapshot, get_snapshot, find_snapshot, snapshots, get_snapshot_metadata, set_snapshot_metadata, delete_snapshot_metadata, reset_snapshot, set_snapshot_status, manage_snapshot, unmanage_snapshot Stats Operations ^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: backend_pools QuotaClassSet Operations ^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: get_quota_class_set, update_quota_class_set QuotaSet Operations ^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: get_quota_set, get_quota_set_defaults, revert_quota_set, update_quota_set BlockStorageSummary Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: summary Attachments ^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: create_attachment, get_attachment, attachments, delete_attachment, update_attachment, complete_attachment Transfer Operations ^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: create_transfer, delete_transfer, find_transfer, get_transfer, transfers, accept_transfer Helpers ^^^^^^^ .. autoclass:: openstack.block_storage.v3._proxy.Proxy :noindex: :members: wait_for_status, wait_for_delete ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/clustering.rst0000664000175000017500000000543600000000000023664 0ustar00zuulzuul00000000000000Cluster API =========== .. automodule:: openstack.clustering.v1._proxy The Cluster Class ----------------- The cluster high-level interface is available through the ``cluster`` member of a :class:`~openstack.connection.Connection` object. The ``cluster`` member will only be added if the service is detected. Build Info Operations ^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.clustering.v1._proxy.Proxy :noindex: :members: get_build_info Profile Type Operations ^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.clustering.v1._proxy.Proxy :noindex: :members: profile_types, get_profile_type Profile Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.clustering.v1._proxy.Proxy :noindex: :members: create_profile, update_profile, delete_profile, get_profile, find_profile, profiles, validate_profile Policy Type Operations ^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.clustering.v1._proxy.Proxy :noindex: :members: policy_types, get_policy_type Policy Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.clustering.v1._proxy.Proxy :noindex: :members: create_policy, update_policy, delete_policy, get_policy, find_policy, policies validate_policy Cluster Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.clustering.v1._proxy.Proxy :noindex: :members: create_cluster, update_cluster, delete_cluster, get_cluster, find_cluster, clusters, check_cluster, recover_cluster, resize_cluster, scale_in_cluster, scale_out_cluster, collect_cluster_attrs, perform_operation_on_cluster, add_nodes_to_cluster, remove_nodes_from_cluster, replace_nodes_in_cluster, attach_policy_to_cluster, update_cluster_policy, detach_policy_from_cluster, get_cluster_policy, cluster_policies Node Operations ^^^^^^^^^^^^^^^ .. autoclass:: openstack.clustering.v1._proxy.Proxy :noindex: :members: create_node, update_node, delete_node, get_node, find_node, nodes, check_node, recover_node, perform_operation_on_node, adopt_node Receiver Operations ^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.clustering.v1._proxy.Proxy :noindex: :members: create_receiver, update_receiver, delete_receiver, get_receiver, find_receiver, receivers Action Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.clustering.v1._proxy.Proxy :noindex: :members: get_action, actions Event Operations ^^^^^^^^^^^^^^^^ .. autoclass:: openstack.clustering.v1._proxy.Proxy :noindex: :members: get_event, events Helper Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.clustering.v1._proxy.Proxy :noindex: :members: wait_for_delete, wait_for_status Service Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.clustering.v1._proxy.Proxy :noindex: :members: services ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/compute.rst0000664000175000017500000001247600000000000023163 0ustar00zuulzuul00000000000000Compute API =========== For details on how to use compute, see :doc:`/user/guides/compute` .. automodule:: openstack.compute.v2._proxy The Compute Class ----------------- The compute high-level interface is available through the ``compute`` member of a :class:`~openstack.connection.Connection` object. The ``compute`` member will only be added if the service is detected. Server Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: create_server, update_server, delete_server, get_server, find_server, servers, get_server_metadata, set_server_metadata, delete_server_metadata, wait_for_server, create_server_image, backup_server Network Actions *************** .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: add_fixed_ip_to_server, remove_fixed_ip_from_server, add_floating_ip_to_server, remove_floating_ip_from_server, fetch_server_security_groups, add_security_group_to_server, remove_security_group_from_server Starting, Stopping, etc. ************************ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: start_server, stop_server, suspend_server, resume_server, reboot_server, restore_server, shelve_server, unshelve_server, lock_server, unlock_server, pause_server, unpause_server, rescue_server, unrescue_server, evacuate_server, migrate_server, get_server_console_output, live_migrate_server Modifying a Server ****************** .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: resize_server, confirm_server_resize, revert_server_resize, rebuild_server, reset_server_state, change_server_password, get_server_password Image Operations ^^^^^^^^^^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: images, get_image, find_image, delete_image, get_image_metadata, set_image_metadata, delete_image_metadata Flavor Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: create_flavor, delete_flavor, get_flavor, find_flavor, flavors, flavor_add_tenant_access, flavor_remove_tenant_access, get_flavor_access, fetch_flavor_extra_specs, create_flavor_extra_specs, get_flavor_extra_specs_property, update_flavor_extra_specs_property, delete_flavor_extra_specs_property Service Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: services, enable_service, disable_service, update_service_forced_down, delete_service, update_service, find_service Volume Attachment Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: create_volume_attachment, update_volume_attachment, delete_volume_attachment, get_volume_attachment, volume_attachments Keypair Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: create_keypair, delete_keypair, get_keypair, find_keypair, keypairs Server IPs ^^^^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: server_ips Server Group Operations ^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: create_server_group, delete_server_group, get_server_group, find_server_group, server_groups Server Interface Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: create_server_interface, delete_server_interface, get_server_interface, server_interfaces, Server Tag Operations ^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: add_tag_to_server, remove_tag_from_server, remove_tags_from_server Availability Zone Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: availability_zones Limits Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: get_limits Hypervisor Operations ^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: get_hypervisor, find_hypervisor, hypervisors, get_hypervisor_uptime Extension Operations ^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: find_extension, extensions QuotaClassSet Operations ^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: get_quota_class_set, update_quota_class_set QuotaSet Operations ^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: get_quota_set, get_quota_set_defaults, revert_quota_set, update_quota_set Server Migration Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: abort_server_migration, force_complete_server_migration, get_server_migration, server_migrations Migration Operations ^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: migrations Helpers ^^^^^^^ .. autoclass:: openstack.compute.v2._proxy.Proxy :noindex: :members: wait_for_delete ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/container_infrastructure_management.rst0000664000175000017500000000205100000000000031011 0ustar00zuulzuul00000000000000Container Infrastructure Management =================================== .. automodule:: openstack.container_infrastructure_management.v1._proxy Cluster Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.container_infrastructure_management.v1._proxy.Proxy :noindex: :members: create_cluster, delete_cluster, update_cluster, get_cluster, find_cluster, clusters Cluster Certificates Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.container_infrastructure_management.v1._proxy.Proxy :noindex: :members: create_cluster_certificate, get_cluster_certificate Cluster Templates Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.container_infrastructure_management.v1._proxy.Proxy :noindex: :members: create_cluster_template, delete_cluster_template, find_cluster_template, get_cluster_template, cluster_templates, update_cluster_template Service Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.container_infrastructure_management.v1._proxy.Proxy :noindex: :members: services ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/database.rst0000664000175000017500000000213100000000000023236 0ustar00zuulzuul00000000000000Database API ============ For details on how to use database, see :doc:`/user/guides/database` .. automodule:: openstack.database.v1._proxy The Database Class ------------------ The database high-level interface is available through the ``database`` member of a :class:`~openstack.connection.Connection` object. The ``database`` member will only be added if the service is detected. Database Operations ^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.database.v1._proxy.Proxy :noindex: :members: create_database, delete_database, get_database, find_database, databases Flavor Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.database.v1._proxy.Proxy :noindex: :members: get_flavor, find_flavor, flavors Instance Operations ^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.database.v1._proxy.Proxy :noindex: :members: create_instance, update_instance, delete_instance, get_instance, find_instance, instances User Operations ^^^^^^^^^^^^^^^ .. autoclass:: openstack.database.v1._proxy.Proxy :noindex: :members: create_user, delete_user, get_user, find_user, users ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/dns.rst0000664000175000017500000000357100000000000022267 0ustar00zuulzuul00000000000000DNS API ======= For details on how to use dns, see :doc:`/user/guides/dns` .. automodule:: openstack.dns.v2._proxy The DNS Class ------------- The dns high-level interface is available through the ``dns`` member of a :class:`~openstack.connection.Connection` object. The ``dns`` member will only be added if the service is detected. DNS Zone Operations ^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.dns.v2._proxy.Proxy :noindex: :members: create_zone, delete_zone, get_zone, find_zone, zones, abandon_zone, xfr_zone Recordset Operations ^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.dns.v2._proxy.Proxy :noindex: :members: create_recordset, update_recordset, get_recordset, delete_recordset, recordsets Zone Import Operations ^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.dns.v2._proxy.Proxy :noindex: :members: zone_imports, create_zone_import, get_zone_import, delete_zone_import Zone Export Operations ^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.dns.v2._proxy.Proxy :noindex: :members: zone_exports, create_zone_export, get_zone_export, get_zone_export_text, delete_zone_export FloatingIP Operations ^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.dns.v2._proxy.Proxy :noindex: :members: floating_ips, get_floating_ip, update_floating_ip Zone Transfer Operations ^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.dns.v2._proxy.Proxy :noindex: :members: zone_transfer_requests, get_zone_transfer_request, create_zone_transfer_request, update_zone_transfer_request, delete_zone_transfer_request, zone_transfer_accepts, get_zone_transfer_accept, create_zone_transfer_accept Zone Share Operations ^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.dns.v2._proxy.Proxy :noindex: :members: create_zone_share, delete_zone_share, get_zone_share, find_zone_share, zone_shares ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/identity_v2.rst0000664000175000017500000000210600000000000023734 0ustar00zuulzuul00000000000000Identity API v2 =============== For details on how to use identity, see :doc:`/user/guides/identity` .. automodule:: openstack.identity.v2._proxy The Identity v2 Class --------------------- The identity high-level interface is available through the ``identity`` member of a :class:`~openstack.connection.Connection` object. The ``identity`` member will only be added if the service is detected. Extension Operations ^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v2._proxy.Proxy :noindex: :members: get_extension, extensions User Operations ^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v2._proxy.Proxy :noindex: :members: create_user, update_user, delete_user, get_user, find_user, users Role Operations ^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v2._proxy.Proxy :noindex: :members: create_role, update_role, delete_role, get_role, find_role, roles Tenant Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v2._proxy.Proxy :noindex: :members: create_tenant, update_tenant, delete_tenant, get_tenant, find_tenant, tenants ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/identity_v3.rst0000664000175000017500000001323600000000000023743 0ustar00zuulzuul00000000000000Identity API v3 =============== For details on how to use identity, see :doc:`/user/guides/identity` .. automodule:: openstack.identity.v3._proxy The Identity v3 Class --------------------- The identity high-level interface is available through the ``identity`` member of a :class:`~openstack.connection.Connection` object. The ``identity`` member will only be added if the service is detected. Credential Operations ^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: create_credential, update_credential, delete_credential, get_credential, find_credential, credentials Domain Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: create_domain, update_domain, delete_domain, get_domain, find_domain, domains Domain Config Operations ^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: create_domain_config, delete_domain_config, get_domain_config, update_domain_config Endpoint Operations ^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: create_endpoint, update_endpoint, delete_endpoint, get_endpoint, find_endpoint, endpoints Group Operations ^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: create_group, update_group, delete_group, get_group, find_group, groups, add_user_to_group, remove_user_from_group, check_user_in_group, group_users Policy Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: create_policy, update_policy, delete_policy, get_policy, find_policy, policies Project Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: create_project, update_project, delete_project, get_project, find_project, projects, user_projects Service Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: create_service, update_service, delete_service, get_service, find_service, services User Operations ^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: create_user, update_user, delete_user, get_user, find_user, users, Trust Operations ^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: create_trust, delete_trust, get_trust, find_trust, trusts Region Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: create_region, update_region, delete_region, get_region, find_region, regions Role Operations ^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: create_role, update_role, delete_role, get_role, find_role, roles Role Assignment Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: role_assignments, role_assignments_filter, assign_project_role_to_user, unassign_project_role_from_user, validate_user_has_project_role, assign_project_role_to_group, unassign_project_role_from_group, validate_group_has_project_role, assign_domain_role_to_user, unassign_domain_role_from_user, validate_user_has_domain_role, assign_domain_role_to_group, unassign_domain_role_from_group, validate_group_has_domain_role, assign_system_role_to_user, unassign_system_role_from_user, validate_user_has_system_role, assign_system_role_to_group, unassign_system_role_from_group, validate_group_has_system_role Registered Limit Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: registered_limits, get_registered_limit, create_registered_limit, update_registered_limit, delete_registered_limit Limit Operations ^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: limits, get_limit, create_limit, update_limit, delete_limit Application Credential Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: application_credentials, get_application_credential, create_application_credential, find_application_credential, delete_application_credential Federation Protocol Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: create_federation_protocol, delete_federation_protocol, find_federation_protocol, get_federation_protocol, federation_protocols, update_federation_protocol Mapping Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: create_mapping, delete_mapping, find_mapping, get_mapping, mappings, update_mapping Identity Provider Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: create_identity_provider, delete_identity_provider, find_identity_provider, get_identity_provider, identity_providers, update_identity_provider Access Rule Operations ^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: access_rules, access_rules, delete_access_rule Service Provider Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.identity.v3._proxy.Proxy :noindex: :members: create_service_provider, delete_service_provider, find_service_provider, get_service_provider, service_providers, update_service_provider ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/image_v1.rst0000664000175000017500000000101600000000000023163 0ustar00zuulzuul00000000000000Image API v1 ============ For details on how to use image, see :doc:`/user/guides/image` .. automodule:: openstack.image.v1._proxy The Image v1 Class ------------------ The image high-level interface is available through the ``image`` member of a :class:`~openstack.connection.Connection` object. The ``image`` member will only be added if the service is detected. .. autoclass:: openstack.image.v1._proxy.Proxy :noindex: :members: upload_image, update_image, delete_image, get_image, find_image, images ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/image_v2.rst0000664000175000017500000000606500000000000023175 0ustar00zuulzuul00000000000000Image API v2 ============ For details on how to use image, see :doc:`/user/guides/image` .. automodule:: openstack.image.v2._proxy The Image v2 Class ------------------ The image high-level interface is available through the ``image`` member of a :class:`~openstack.connection.Connection` object. The ``image`` member will only be added if the service is detected. Image Operations ^^^^^^^^^^^^^^^^ .. autoclass:: openstack.image.v2._proxy.Proxy :noindex: :members: create_image, import_image, upload_image, download_image, update_image, delete_image, get_image, find_image, images, deactivate_image, reactivate_image, stage_image, add_tag, remove_tag Member Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.image.v2._proxy.Proxy :noindex: :members: add_member, remove_member, update_member, get_member, find_member, members Task Operations ^^^^^^^^^^^^^^^ .. autoclass:: openstack.image.v2._proxy.Proxy :noindex: :members: tasks, create_task, get_task, wait_for_task Schema Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.image.v2._proxy.Proxy :noindex: :members: get_images_schema, get_image_schema, get_members_schema, get_member_schema, get_tasks_schema, get_task_schema, get_metadef_namespace_schema, get_metadef_namespaces_schema, get_metadef_resource_type_schema, get_metadef_resource_types_schema, get_metadef_object_schema, get_metadef_objects_schema, get_metadef_property_schema, get_metadef_properties_schema, get_metadef_tag_schema, get_metadef_tags_schema Service Info Discovery Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.image.v2._proxy.Proxy :noindex: :members: stores, get_import_info Metadef Namespace Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.image.v2._proxy.Proxy :noindex: :members: create_metadef_namespace, delete_metadef_namespace, get_metadef_namespace, metadef_namespaces, update_metadef_namespace Metadef Object Operations ^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.image.v2._proxy.Proxy :noindex: :members: create_metadef_object, delete_metadef_object, get_metadef_object, metadef_objects, update_metadef_object Metadef Resource Type Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.image.v2._proxy.Proxy :noindex: :members: metadef_resource_types, metadef_resource_type_associations, create_metadef_resource_type_association, delete_metadef_resource_type_association Metadef Property Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.image.v2._proxy.Proxy :noindex: :members: create_metadef_property, update_metadef_property, delete_metadef_property, get_metadef_property Helpers ^^^^^^^ .. autoclass:: openstack.image.v2._proxy.Proxy :noindex: :members: wait_for_delete Cache Operations ^^^^^^^^^^^^^^^^ .. autoclass:: openstack.image.v2._proxy.Proxy :noindex: :members: cache_delete_image, queue_image, get_image_cache, clear_cache ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/key_manager.rst0000664000175000017500000000203300000000000023755 0ustar00zuulzuul00000000000000KeyManager API ============== For details on how to use key_management, see :doc:`/user/guides/key_manager` .. automodule:: openstack.key_manager.v1._proxy The KeyManager Class -------------------- The key_management high-level interface is available through the ``key_manager`` member of a :class:`~openstack.connection.Connection` object. The ``key_manager`` member will only be added if the service is detected. Secret Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.key_manager.v1._proxy.Proxy :noindex: :members: create_secret, update_secret, delete_secret, get_secret, find_secret, secrets Container Operations ^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.key_manager.v1._proxy.Proxy :noindex: :members: create_container, update_container, delete_container, get_container, find_container, containers Order Operations ^^^^^^^^^^^^^^^^ .. autoclass:: openstack.key_manager.v1._proxy.Proxy :noindex: :members: create_order, update_order, delete_order, get_order, find_order, orders ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/load_balancer_v2.rst0000664000175000017500000000726700000000000024666 0ustar00zuulzuul00000000000000Load Balancer v2 API ==================== .. automodule:: openstack.load_balancer.v2._proxy The LoadBalancer Class ---------------------- The load_balancer high-level interface is available through the ``load_balancer`` member of a :class:`~openstack.connection.Connection` object. The ``load_balancer`` member will only be added if the service is detected. Load Balancer Operations ^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.load_balancer.v2._proxy.Proxy :noindex: :members: create_load_balancer, delete_load_balancer, find_load_balancer, get_load_balancer, get_load_balancer_statistics, load_balancers, update_load_balancer, failover_load_balancer, wait_for_load_balancer Listener Operations ^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.load_balancer.v2._proxy.Proxy :noindex: :members: create_listener, delete_listener, find_listener, get_listener, get_listener_statistics, listeners, update_listener Pool Operations ^^^^^^^^^^^^^^^ .. autoclass:: openstack.load_balancer.v2._proxy.Proxy :noindex: :members: create_pool, delete_pool, find_pool, get_pool, pools, update_pool Member Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.load_balancer.v2._proxy.Proxy :noindex: :members: create_member, delete_member, find_member, get_member, members, update_member Health Monitor Operations ^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.load_balancer.v2._proxy.Proxy :noindex: :members: create_health_monitor, delete_health_monitor, find_health_monitor, get_health_monitor, health_monitors, update_health_monitor L7 Policy Operations ^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.load_balancer.v2._proxy.Proxy :noindex: :members: create_l7_policy, delete_l7_policy, find_l7_policy, get_l7_policy, l7_policies, update_l7_policy L7 Rule Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.load_balancer.v2._proxy.Proxy :noindex: :members: create_l7_rule, delete_l7_rule, find_l7_rule, get_l7_rule, l7_rules, update_l7_rule Provider Operations ^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.load_balancer.v2._proxy.Proxy :noindex: :members: providers, provider_flavor_capabilities Flavor Profile Operations ^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.load_balancer.v2._proxy.Proxy :noindex: :members: create_flavor_profile, get_flavor_profile, flavor_profiles, delete_flavor_profile, find_flavor_profile, update_flavor_profile Flavor Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.load_balancer.v2._proxy.Proxy :noindex: :members: create_flavor, get_flavor, flavors, delete_flavor, find_flavor, update_flavor Quota Operations ^^^^^^^^^^^^^^^^ .. autoclass:: openstack.load_balancer.v2._proxy.Proxy :noindex: :members: update_quota, delete_quota, quotas, get_quota, get_quota_default Amphora Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.load_balancer.v2._proxy.Proxy :noindex: :members: amphorae, get_amphora, find_amphora, configure_amphora, failover_amphora Availability Zone Profile Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.load_balancer.v2._proxy.Proxy :noindex: :members: create_availability_zone_profile, get_availability_zone_profile, availability_zone_profiles, delete_availability_zone_profile, find_availability_zone_profile, update_availability_zone_profile Availability Zone Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.load_balancer.v2._proxy.Proxy :noindex: :members: create_availability_zone, get_availability_zone, availability_zones, delete_availability_zone, find_availability_zone, update_availability_zone ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/message_v2.rst0000664000175000017500000000207000000000000023527 0ustar00zuulzuul00000000000000Message API v2 ============== For details on how to use message, see :doc:`/user/guides/message` .. automodule:: openstack.message.v2._proxy The Message v2 Class -------------------- The message high-level interface is available through the ``message`` member of a :class:`~openstack.connection.Connection` object. The ``message`` member will only be added if the service is detected. Message Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.message.v2._proxy.Proxy :noindex: :members: post_message, delete_message, get_message, messages Queue Operations ^^^^^^^^^^^^^^^^ .. autoclass:: openstack.message.v2._proxy.Proxy :noindex: :members: create_queue, delete_queue, get_queue, queues Claim Operations ^^^^^^^^^^^^^^^^ .. autoclass:: openstack.message.v2._proxy.Proxy :noindex: :members: create_claim, update_claim, delete_claim, get_claim Subscription Operations ^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.message.v2._proxy.Proxy :noindex: :members: create_subscription, delete_subscription, get_subscription, subscriptions ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/network.rst0000664000175000017500000003147000000000000023173 0ustar00zuulzuul00000000000000Network API =========== For details on how to use network, see :doc:`/user/guides/network` .. automodule:: openstack.network.v2._proxy The Network Class ----------------- The network high-level interface is available through the ``network`` member of a :class:`~openstack.connection.Connection` object. The ``network`` member will only be added if the service is detected. Network Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_network, update_network, delete_network, get_network, find_network, networks, get_network_ip_availability, find_network_ip_availability, network_ip_availabilities, add_dhcp_agent_to_network, remove_dhcp_agent_from_network, dhcp_agent_hosting_networks, Port Operations ^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_port, create_ports, update_port, delete_port, get_port, find_port, ports, add_ip_to_port, remove_ip_from_port Router Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_router, update_router, delete_router, get_router, find_router, routers, add_gateway_to_router, remove_gateway_from_router, add_external_gateways, update_external_gateways, remove_external_gateways, add_interface_to_router, remove_interface_from_router, add_extra_routes_to_router, remove_extra_routes_from_router, create_conntrack_helper, update_conntrack_helper, delete_conntrack_helper, get_conntrack_helper, conntrack_helpers Floating IP Operations ^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_ip, update_ip, delete_ip, get_ip, find_ip, find_available_ip, ips Pool Operations ^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_pool, update_pool, delete_pool, get_pool, find_pool, pools, create_pool_member, update_pool_member, delete_pool_member, get_pool_member, find_pool_member, pool_members Auto Allocated Topology Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: delete_auto_allocated_topology, get_auto_allocated_topology, validate_auto_allocated_topology Default Security Group Rules Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_default_security_group_rule, find_default_security_group_rule, get_default_security_group_rule, delete_default_security_group_rule, default_security_group_rules Security Group Operations ^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_security_group, update_security_group, delete_security_group, get_security_group, get_security_group_rule, find_security_group, find_security_group_rule, security_group_rules, security_groups, create_security_group_rule, create_security_group_rules, delete_security_group_rule Address Group Operations ^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_address_group, delete_address_group, find_address_group, get_address_group, address_groups, update_address_group, add_addresses_to_address_group, remove_addresses_from_address_group Availability Zone Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: availability_zones Address Scope Operations ^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_address_scope, update_address_scope, delete_address_scope, get_address_scope, find_address_scope, address_scopes Quota Operations ^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: update_quota, delete_quota, get_quota, get_quota_default, quotas QoS Operations ^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_qos_policy, update_qos_policy, delete_qos_policy, get_qos_policy, find_qos_policy, qos_policies, get_qos_rule_type, find_qos_rule_type, qos_rule_types, create_qos_minimum_bandwidth_rule, update_qos_minimum_bandwidth_rule, delete_qos_minimum_bandwidth_rule, get_qos_minimum_bandwidth_rule, find_qos_minimum_bandwidth_rule, qos_minimum_bandwidth_rules, create_qos_minimum_packet_rate_rule, update_qos_minimum_packet_rate_rule, delete_qos_minimum_packet_rate_rule, get_qos_minimum_packet_rate_rule, find_qos_minimum_packet_rate_rule, qos_minimum_packet_rate_rules, create_qos_bandwidth_limit_rule, update_qos_bandwidth_limit_rule, delete_qos_bandwidth_limit_rule, get_qos_bandwidth_limit_rule, find_qos_bandwidth_limit_rule, qos_bandwidth_limit_rules, create_qos_dscp_marking_rule, update_qos_dscp_marking_rule, delete_qos_dscp_marking_rule, get_qos_dscp_marking_rule, find_qos_dscp_marking_rule, qos_dscp_marking_rules Agent Operations ^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: delete_agent, update_agent, get_agent, agents, agent_hosted_routers, routers_hosting_l3_agents, network_hosting_dhcp_agents, add_router_to_agent, remove_router_from_agent RBAC Operations ^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_rbac_policy, update_rbac_policy, delete_rbac_policy, get_rbac_policy, find_rbac_policy, rbac_policies Listener Operations ^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_listener, update_listener, delete_listener, get_listener, find_listener, listeners Subnet Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_subnet, update_subnet, delete_subnet, get_subnet, get_subnet_ports, find_subnet, subnets, create_subnet_pool, update_subnet_pool, delete_subnet_pool, get_subnet_pool, find_subnet_pool, subnet_pools Load Balancer Operations ^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_load_balancer, update_load_balancer, delete_load_balancer, get_load_balancer, find_load_balancer, load_balancers Health Monitor Operations ^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_health_monitor, update_health_monitor, delete_health_monitor, get_health_monitor, find_health_monitor, health_monitors Metering Label Operations ^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_metering_label, update_metering_label, delete_metering_label, get_metering_label, find_metering_label, metering_labels, create_metering_label_rule, update_metering_label_rule, delete_metering_label_rule, get_metering_label_rule, find_metering_label_rule, metering_label_rules Segment Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_segment, update_segment, delete_segment, get_segment, find_segment, segments Flavor Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_flavor, update_flavor, delete_flavor, get_flavor, find_flavor, flavors Service Profile Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_service_profile, update_service_profile, delete_service_profile, get_service_profile, find_service_profile, service_profiles, associate_flavor_with_service_profile, disassociate_flavor_from_service_profile Tag Operations ^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: set_tags VPNaaS Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_vpn_service, update_vpn_service, delete_vpn_service, get_vpn_service, find_vpn_service, vpn_services, create_vpn_endpoint_group, update_vpn_endpoint_group, delete_vpn_endpoint_group, get_vpn_endpoint_group, find_vpn_endpoint_group, vpn_endpoint_groups, create_vpn_ipsec_site_connection, update_vpn_ipsec_site_connection, delete_vpn_ipsec_site_connection, get_vpn_ipsec_site_connection, find_vpn_ipsec_site_connection, vpn_ipsec_site_connections, create_vpn_ike_policy, update_vpn_ike_policy, delete_vpn_ike_policy, get_vpn_ike_policy, find_vpn_ike_policy, vpn_ike_policies, create_vpn_ipsec_policy, update_vpn_ipsec_policy, delete_vpn_ipsec_policy, get_vpn_ipsec_policy, find_vpn_ipsec_policy, vpn_ipsec_policies Extension Operations ^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: find_extension, extensions Service Provider Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: service_providers Local IP Operations ^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_local_ip, delete_local_ip, find_local_ip, get_local_ip, local_ips, update_local_ip, create_local_ip_association, delete_local_ip_association, find_local_ip_association, get_local_ip_association, local_ip_associations Ndp Proxy Operations ^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_ndp_proxy, get_ndp_proxy, find_ndp_proxy, delete_ndp_proxy, ndp_proxies, update_ndp_proxy BGP Operations ^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_bgp_peer, delete_bgp_peer, find_bgp_peer, get_bgp_peer, update_bgp_peer, bgp_peers, create_bgp_speaker, delete_bgp_speaker, find_bgp_speaker, get_bgp_speaker, update_bgp_speaker, bgp_speakers, add_bgp_peer_to_speaker, remove_bgp_peer_from_speaker, add_gateway_network_to_speaker, remove_gateway_network_from_speaker, get_advertised_routes_of_speaker, get_bgp_dragents_hosting_speaker, add_bgp_speaker_to_dragent, get_bgp_speakers_hosted_by_dragent, remove_bgp_speaker_from_dragent Tap As A Service Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_tap_flow, delete_tap_flow, find_tap_flow, get_tap_flow, update_tap_flow, tap_flows, create_tap_service, delete_tap_service, find_tap_service, update_tap_service, tap_services BGPVPN operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_bgpvpn, delete_bgpvpn, find_bgpvpn, get_bgpvpn, update_bgpvpn, bgpvpns, create_bgpvpn_network_association, delete_bgpvpn_network_association, get_bgpvpn_network_association, bgpvpn_network_associations, create_bgpvpn_port_association, delete_bgpvpn_port_association, find_bgpvpn_port_association, get_bgpvpn_port_association, update_bgpvpn_port_association, bgpvpn_port_associations, create_bgpvpn_router_association, delete_bgpvpn_router_association, get_bgpvpn_router_association, update_bgpvpn_router_association, bgpvpn_router_associations SFC operations ^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_sfc_flow_classifier, delete_sfc_flow_classifier, find_sfc_flow_classifier, get_sfc_flow_classifier, update_sfc_flow_classifier, create_sfc_port_chain, delete_sfc_port_chain, find_sfc_port_chain, get_sfc_port_chain, update_sfc_port_chain, create_sfc_port_pair, delete_sfc_port_pair, find_sfc_port_pair, get_sfc_port_pair, update_sfc_port_pair, create_sfc_port_pair_group, delete_sfc_port_pair_group, find_sfc_port_pair_group, get_sfc_port_pair_group, update_sfc_port_pair_group, create_sfc_service_graph, delete_sfc_service_graph, find_sfc_service_graph, get_sfc_service_graph, update_sfc_service_graph Tap Mirror operations ^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.network.v2._proxy.Proxy :noindex: :members: create_tap_mirror, delete_tap_mirror, find_tap_mirror, get_tap_mirror, tap_mirrors, update_tap_mirror ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/object_store.rst0000664000175000017500000000207000000000000024156 0ustar00zuulzuul00000000000000Object Store API ================ For details on how to use this API, see :doc:`/user/guides/object_store` .. automodule:: openstack.object_store.v1._proxy The Object Store Class ---------------------- The Object Store high-level interface is exposed as the ``object_store`` object on :class:`~openstack.connection.Connection` objects. Account Operations ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.object_store.v1._proxy.Proxy :noindex: :members: get_account_metadata, set_account_metadata, delete_account_metadata Container Operations ^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.object_store.v1._proxy.Proxy :noindex: :members: create_container, delete_container, containers, get_container_metadata, set_container_metadata, delete_container_metadata Object Operations ^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.object_store.v1._proxy.Proxy :noindex: :members: upload_object, download_object, copy_object, delete_object, get_object, objects, get_object_metadata, set_object_metadata, delete_object_metadata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/orchestration.rst0000664000175000017500000000347500000000000024372 0ustar00zuulzuul00000000000000Orchestration API ================= For details on how to use orchestration, see :doc:`/user/guides/orchestration` .. automodule:: openstack.orchestration.v1._proxy The Orchestration Class ----------------------- The orchestration high-level interface is available through the ``orchestration`` member of a :class:`~openstack.connection.Connection` object. The ``orchestration`` member will only be added if the service is detected. Stack Operations ^^^^^^^^^^^^^^^^ .. autoclass:: openstack.orchestration.v1._proxy.Proxy :noindex: :members: create_stack, stacks,find_stack, update_stack, delete_stack, get_stack, export_stack, get_stack_template, get_stack_environment Stack Resource Operations ^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.orchestration.v1._proxy.Proxy :noindex: :members: resources Stack Action Operations ^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.orchestration.v1._proxy.Proxy :noindex: :members: suspend_stack, resume_stack, check_stack Stack Event Operations ^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.orchestration.v1._proxy.Proxy :noindex: :members: stack_events Stack Template Operations ^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.orchestration.v1._proxy.Proxy :noindex: :members: validate_template Software Configuration Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.orchestration.v1._proxy.Proxy :noindex: :members: create_software_config, delete_software_config, get_software_config, software_configs Software Deployment Operations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.orchestration.v1._proxy.Proxy :noindex: :members: create_software_deployment, update_software_deployment, delete_software_deployment, get_software_deployment, software_deployments ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/placement.rst0000664000175000017500000000265400000000000023454 0ustar00zuulzuul00000000000000Placement API ============= .. automodule:: openstack.placement.v1._proxy The Placement Class ------------------- The placement high-level interface is available through the ``placement`` member of a :class:`~openstack.connection.Connection` object. The ``placement`` member will only be added if the service is detected. Resource Classes ^^^^^^^^^^^^^^^^ .. autoclass:: openstack.placement.v1._proxy.Proxy :noindex: :members: create_resource_class, update_resource_class, delete_resource_class, get_resource_class, resource_classes Resource Providers ^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.placement.v1._proxy.Proxy :noindex: :members: create_resource_provider, update_resource_provider, delete_resource_provider, get_resource_provider, find_resource_provider, resource_providers, get_resource_provider_aggregates, set_resource_provider_aggregates Resource Provider Inventories ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.placement.v1._proxy.Proxy :noindex: :members: create_resource_provider_inventory, update_resource_provider_inventory, delete_resource_provider_inventory, get_resource_provider_inventory, resource_provider_inventories Traits ^^^^^^ .. autoclass:: openstack.placement.v1._proxy.Proxy :noindex: :members: create_trait, delete_trait, get_trait, traits ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/shared_file_system.rst0000664000175000017500000001327400000000000025355 0ustar00zuulzuul00000000000000Shared File System API ====================== .. automodule:: openstack.shared_file_system.v2._proxy The Shared File System Class ---------------------------- The high-level interface for accessing the shared file systems service API is available through the ``shared_file_system`` member of a :class:`~openstack .connection.Connection` object. The ``shared_file_system`` member will only be added if the service is detected. ``share`` is an alias of the ``shared_file_system`` member. Shared File System Availability Zones ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Interact with Availability Zones supported by the Shared File Systems service. .. autoclass:: openstack.shared_file_system.v2._proxy.Proxy :noindex: :members: availability_zones Shared File System Shares ^^^^^^^^^^^^^^^^^^^^^^^^^ Interact with Shares supported by the Shared File Systems service. .. autoclass:: openstack.shared_file_system.v2._proxy.Proxy :noindex: :members: shares, get_share, delete_share, update_share, create_share, revert_share_to_snapshot, resize_share, find_share, manage_share, unmanage_share Shared File System Storage Pools ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Interact with the storage pool statistics exposed by the Shared File Systems Service. .. autoclass:: openstack.shared_file_system.v2._proxy.Proxy :noindex: :members: storage_pools Shared File System User Messages ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ View and manipulate asynchronous user messages emitted by the Shared File Systems service. .. autoclass:: openstack.shared_file_system.v2._proxy.Proxy :noindex: :members: user_messages, get_user_message, delete_user_message Shared File System Limits ^^^^^^^^^^^^^^^^^^^^^^^^^ Get absolute limits of resources supported by the Shared File Systems service. .. autoclass:: openstack.shared_file_system.v2._proxy.Proxy :noindex: :members: limits Shared File System Snapshots ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Interact with Share Snapshots supported by the Shared File Systems service. .. autoclass:: openstack.shared_file_system.v2._proxy.Proxy :noindex: :members: share_snapshots, get_share_snapshot, delete_share_snapshot, update_share_snapshot, create_share_snapshot Shared File System Share Snapshot Instances ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Interact with Share Snapshot Instances supported by the Shared File Systems service. .. autoclass:: openstack.shared_file_system.v2._proxy.Proxy :noindex: :members: share_snapshot_instances, get_share_snapshot_instance Shared File System Share Networks ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Create and manipulate Share Networks with the Shared File Systems service. .. autoclass:: openstack.shared_file_system.v2._proxy.Proxy :noindex: :members: share_networks, get_share_network, delete_share_network, update_share_network, create_share_network Shared File System Share Instances ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Administrators can list, show information for, explicitly set the state of, and force-delete share instances within the Shared File Systems Service. .. autoclass:: openstack.shared_file_system.v2._proxy.Proxy :noindex: :members: share_instances, get_share_instance, reset_share_instance_status, delete_share_instance Shared File System Share Network Subnets ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Create and manipulate Share Network Subnets with the Shared File Systems service. .. autoclass:: openstack.shared_file_system.v2._proxy.Proxy :noindex: :members: share_network_subnets, get_share_network_subnet, create_share_network_subnet, delete_share_network_subnet Shared File System Share Access Rules ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Create, View, and Delete access rules for shares from the Shared File Systems service. Access rules can also have their deletion and visibility restricted during creation. A lock reason can also be specified. The deletion restriction can be removed during the access removal. .. autoclass:: openstack.shared_file_system.v2._proxy.Proxy :noindex: :members: access_rules, get_access_rule, create_access_rule, delete_access_rule Shared File System Share Groups ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Interact with Share groups supported by the Shared File Systems service. .. autoclass:: openstack.shared_file_system.v2._proxy.Proxy :noindex: :members: share_groups, get_share_group, delete_share_group, update_share_group, create_share_group, find_share_group Shared File System Share Group Snapshots ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Interact with Share Group Snapshots by the Shared File Systems service. .. autoclass:: openstack.shared_file_system.v2._proxy.Proxy :noindex: :members: share_group_snapshots, get_share_group_snapshot, create_share_group_snapshot, reset_share_group_snapshot_status, update_share_group_snapshot, delete_share_group_snapshot Shared File System Share Metadata ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ List, Get, Create, Update, and Delete metadata for shares from the Shared File Systems service. .. autoclass:: openstack.shared_file_system.v2._proxy.Proxy :noindex: :members: get_share_metadata, get_share_metadata_item, create_share_metadata, update_share_metadata, delete_share_metadata Shared File System Resource Locks ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Create, list, update and delete locks for resources. When a resource is locked, it means that it can be deleted only by services, admins or the user that created the lock. .. autoclass:: openstack.shared_file_system.v2._proxy.Proxy :noindex: :members: resource_locks, get_resource_lock, update_resource_lock, delete_resource_lock, create_resource_lock ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/proxies/workflow.rst0000664000175000017500000000171400000000000023352 0ustar00zuulzuul00000000000000Workflow API ============ .. automodule:: openstack.workflow.v2._proxy The Workflow Class ------------------ The workflow high-level interface is available through the ``workflow`` member of a :class:`~openstack.connection.Connection` object. The ``workflow`` member will only be added if the service is detected. Workflow Operations ^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.workflow.v2._proxy.Proxy :noindex: :members: create_workflow, update_workflow, delete_workflow, get_workflow, find_workflow, workflows Execution Operations ^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.workflow.v2._proxy.Proxy :noindex: :members: create_execution, delete_execution, get_execution, find_execution, executions Cron Trigger Operations ^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: openstack.workflow.v2._proxy.Proxy :noindex: :members: create_cron_trigger, delete_cron_trigger, get_cron_trigger, find_cron_trigger, cron_triggers ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resource.rst0000664000175000017500000000054700000000000021641 0ustar00zuulzuul00000000000000Resource ======== .. automodule:: openstack.resource Components ---------- .. autoclass:: openstack.resource.Body :members: .. autoclass:: openstack.resource.Header :members: .. autoclass:: openstack.resource.URI :members: The Resource class ------------------ .. autoclass:: openstack.resource.Resource :members: :member-order: bysource ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.0732303 openstacksdk-4.0.0/doc/source/user/resources/0000775000175000017500000000000000000000000021264 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1292572 openstacksdk-4.0.0/doc/source/user/resources/accelerator/0000775000175000017500000000000000000000000023550 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/accelerator/index.rst0000664000175000017500000000023700000000000025413 0ustar00zuulzuul00000000000000Accelerator v2 Resources ======================== .. toctree:: :maxdepth: 1 v2/device v2/deployable v2/device_profile v2/accelerator_request ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1332593 openstacksdk-4.0.0/doc/source/user/resources/accelerator/v2/0000775000175000017500000000000000000000000024077 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/accelerator/v2/accelerator_request.rst0000664000175000017500000000060700000000000030670 0ustar00zuulzuul00000000000000openstack.accelerator.v2.accelerator_request ============================================ .. automodule:: openstack.accelerator.v2.accelerator_request The AcceleratorRequest Class ---------------------------- The ``AcceleratorRequest`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.accelerator.v2.accelerator_request.AcceleratorRequest :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/accelerator/v2/deployable.rst0000664000175000017500000000051500000000000026752 0ustar00zuulzuul00000000000000openstack.accelerator.v2.deployable ============================================ .. automodule:: openstack.accelerator.v2.deployable The Deployable Class -------------------- The ``Deployable`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.accelerator.v2.deployable.Deployable :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/accelerator/v2/device.rst0000664000175000017500000000046500000000000026075 0ustar00zuulzuul00000000000000openstack.accelerator.v2.device ============================================ .. automodule:: openstack.accelerator.v2.device The Device Class -------------------- The ``Device`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.accelerator.v2.device.Device :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/accelerator/v2/device_profile.rst0000664000175000017500000000054500000000000027614 0ustar00zuulzuul00000000000000openstack.accelerator.v2.device_profile ============================================ .. automodule:: openstack.accelerator.v2.device_profile The DeviceProfile Class ----------------------- The ``DeviceProfile`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.accelerator.v2.device_profile.DeviceProfile :members: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1332593 openstacksdk-4.0.0/doc/source/user/resources/baremetal/0000775000175000017500000000000000000000000023220 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/baremetal/index.rst0000664000175000017500000000035400000000000025063 0ustar00zuulzuul00000000000000Baremetal Resources =================== .. toctree:: :maxdepth: 1 v1/driver v1/chassis v1/node v1/port v1/port_group v1/allocation v1/volume_connector v1/volume_target v1/deploy_templates v1/conductor ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1332593 openstacksdk-4.0.0/doc/source/user/resources/baremetal/v1/0000775000175000017500000000000000000000000023546 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/baremetal/v1/allocation.rst0000664000175000017500000000047300000000000026431 0ustar00zuulzuul00000000000000openstack.baremetal.v1.Allocation ================================= .. automodule:: openstack.baremetal.v1.allocation The Allocation Class -------------------- The ``Allocation`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.baremetal.v1.allocation.Allocation :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/baremetal/v1/chassis.rst0000664000175000017500000000044300000000000025736 0ustar00zuulzuul00000000000000openstack.baremetal.v1.chassis ============================== .. automodule:: openstack.baremetal.v1.chassis The Chassis Class ----------------- The ``Chassis`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.baremetal.v1.chassis.Chassis :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/baremetal/v1/conductor.rst0000664000175000017500000000046300000000000026303 0ustar00zuulzuul00000000000000openstack.baremetal.v1.conductor ================================ .. automodule:: openstack.baremetal.v1.conductor The Conductor Class ------------------- The ``Conductor`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.baremetal.v1.conductor.Conductor :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/baremetal/v1/deploy_templates.rst0000664000175000017500000000054400000000000027655 0ustar00zuulzuul00000000000000openstack.baremetal.v1.deploy_templates ======================================= .. automodule:: openstack.baremetal.v1.deploy_templates The DeployTemplate Class ------------------------- The ``DeployTemplate`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.baremetal.v1.deploy_templates.DeployTemplate :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/baremetal/v1/driver.rst0000664000175000017500000000043300000000000025573 0ustar00zuulzuul00000000000000openstack.baremetal.v1.driver ============================= .. automodule:: openstack.baremetal.v1.driver The Driver Class ---------------- The ``Driver`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.baremetal.v1.driver.Driver :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/baremetal/v1/node.rst0000664000175000017500000000146600000000000025234 0ustar00zuulzuul00000000000000openstack.baremetal.v1.Node =========================== .. automodule:: openstack.baremetal.v1.node The Node Class -------------- The ``Node`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.baremetal.v1.node.Node :members: The PowerAction Class ^^^^^^^^^^^^^^^^^^^^^ The ``PowerAction`` enumeration represents known power actions. .. autoclass:: openstack.baremetal.v1.node.PowerAction :members: The ValidationResult Class ^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``ValidationResult`` class represents the result of a validation. .. autoclass:: openstack.baremetal.v1.node.ValidationResult :members: The WaitResult Class ^^^^^^^^^^^^^^^^^^^^ The ``WaitResult`` class represents the result of waiting for several nodes. .. autoclass:: openstack.baremetal.v1.node.WaitResult ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/baremetal/v1/port.rst0000664000175000017500000000041300000000000025262 0ustar00zuulzuul00000000000000openstack.baremetal.v1.port =========================== .. automodule:: openstack.baremetal.v1.port The Port Class -------------- The ``Port`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.baremetal.v1.port.Port :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/baremetal/v1/port_group.rst0000664000175000017500000000046700000000000026507 0ustar00zuulzuul00000000000000openstack.baremetal.v1.port_group ================================= .. automodule:: openstack.baremetal.v1.port_group The PortGroup Class ------------------- The ``PortGroup`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.baremetal.v1.port_group.PortGroup :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/baremetal/v1/volume_connector.rst0000664000175000017500000000054700000000000027667 0ustar00zuulzuul00000000000000openstack.baremetal.v1.volume_connector ======================================= .. automodule:: openstack.baremetal.v1.volume_connector The VolumeConnector Class ------------------------- The ``VolumeConnector`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.baremetal.v1.volume_connector.VolumeConnector :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/baremetal/v1/volume_target.rst0000664000175000017500000000052500000000000027157 0ustar00zuulzuul00000000000000openstack.baremetal.v1.volume_target ======================================= .. automodule:: openstack.baremetal.v1.volume_target The VolumeTarget Class ------------------------- The ``VolumeTarget`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.baremetal.v1.volume_target.VolumeTarget :members: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1332593 openstacksdk-4.0.0/doc/source/user/resources/baremetal_introspection/0000775000175000017500000000000000000000000026200 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/baremetal_introspection/index.rst0000664000175000017500000000022000000000000030033 0ustar00zuulzuul00000000000000Baremetal Introspection Resources ================================= .. toctree:: :maxdepth: 1 v1/introspection v1/introspection_rule ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1372612 openstacksdk-4.0.0/doc/source/user/resources/baremetal_introspection/v1/0000775000175000017500000000000000000000000026526 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/baremetal_introspection/v1/introspection.rst0000664000175000017500000000061300000000000032160 0ustar00zuulzuul00000000000000openstack.baremetal_introspection.v1.Introspection ================================================== .. automodule:: openstack.baremetal_introspection.v1.introspection The Introspection Class ----------------------- The ``Introspection`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.baremetal_introspection.v1.introspection.Introspection :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/baremetal_introspection/v1/introspection_rule.rst0000664000175000017500000000066100000000000033212 0ustar00zuulzuul00000000000000openstack.baremetal_introspection.v1.introspection_rule ======================================================== .. automodule:: openstack.baremetal_introspection.v1.introspection_rule The IntrospectionRule Class ---------------------------- The ``IntrospectionRule`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.baremetal_introspection.v1.introspection_rule.IntrospectionRule :members: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1372612 openstacksdk-4.0.0/doc/source/user/resources/block_storage/0000775000175000017500000000000000000000000024102 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/index.rst0000664000175000017500000000040000000000000025735 0ustar00zuulzuul00000000000000Block Storage Resources ======================= Block Storage v2 Resources -------------------------- .. toctree:: :maxdepth: 1 :glob: v2/* Block Storage v3 Resources -------------------------- .. toctree:: :maxdepth: 1 :glob: v3/* ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1372612 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v2/0000775000175000017500000000000000000000000024431 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v2/backup.rst0000664000175000017500000000045300000000000026432 0ustar00zuulzuul00000000000000openstack.block_storage.v2.backup ================================= .. automodule:: openstack.block_storage.v2.backup The Backup Class ---------------- The ``Backup`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v2.backup.Backup :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v2/capabilities.rst0000664000175000017500000000053300000000000027615 0ustar00zuulzuul00000000000000openstack.block_storage.v2.capabilities ======================================= .. automodule:: openstack.block_storage.v2.capabilities The Capabilities Class ---------------------- The ``Capabilities`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v2.capabilities.Capabilities :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v2/limits.rst0000664000175000017500000000157600000000000026475 0ustar00zuulzuul00000000000000openstack.block_storage.v2.limits ================================= .. automodule:: openstack.block_storage.v2.limits The AbsoluteLimit Class ----------------------- The ``AbsoluteLimit`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v2.limits.AbsoluteLimit :members: The Limits Class ---------------- The ``Limit`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v2.limits.Limits :members: The RateLimit Class ------------------- The ``RateLimit`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v2.limits.RateLimit :members: The RateLimits Class -------------------- The ``RateLimits`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v2.limits.RateLimits :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v2/quota_set.rst0000664000175000017500000000047700000000000027177 0ustar00zuulzuul00000000000000openstack.block_storage.v2.quota_set ==================================== .. automodule:: openstack.block_storage.v2.quota_set The QuotaSet Class ------------------ The ``QuotaSet`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v2.quota_set.QuotaSet :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v2/snapshot.rst0000664000175000017500000000104200000000000027017 0ustar00zuulzuul00000000000000openstack.block_storage.v2.snapshot =================================== .. automodule:: openstack.block_storage.v2.snapshot The Snapshot Class ------------------ The ``Snapshot`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v2.snapshot.Snapshot :members: The SnapshotDetail Class ------------------------ The ``SnapshotDetail`` class inherits from :class:`~openstack.block_storage.v2.snapshot.Snapshot`. .. autoclass:: openstack.block_storage.v2.snapshot.SnapshotDetail :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v2/stats.rst0000664000175000017500000000044300000000000026322 0ustar00zuulzuul00000000000000openstack.block_storage.v2.stats ================================ .. automodule:: openstack.block_storage.v2.stats The Pools Class --------------- The ``Pools`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v2.stats.Pools :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v2/type.rst0000664000175000017500000000043400000000000026145 0ustar00zuulzuul00000000000000openstack.block_storage.v2.type =============================== .. automodule:: openstack.block_storage.v2.type The Type Class -------------- The ``Type`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v2.type.Type :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v2/volume.rst0000664000175000017500000000045300000000000026474 0ustar00zuulzuul00000000000000openstack.block_storage.v2.volume ================================= .. automodule:: openstack.block_storage.v2.volume The Volume Class ---------------- The ``Volume`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v2.volume.Volume :members: ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.141263 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/0000775000175000017500000000000000000000000024432 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/attachment.rst0000664000175000017500000000057100000000000027317 0ustar00zuulzuul00000000000000openstack.block_storage.v3.attachment ===================================== .. automodule:: openstack.block_storage.v3.attachment The Volume Attachment Class --------------------------- The ``Volume Attachment`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.attachment.Attachment :members: create, update, complete ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/availability_zone.rst0000664000175000017500000000057700000000000030702 0ustar00zuulzuul00000000000000openstack.block_storage.v3.availability_zone ============================================ .. automodule:: openstack.block_storage.v3.availability_zone The AvailabilityZone Class -------------------------- The ``AvailabilityZone`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.availability_zone.AvailabilityZone :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/backup.rst0000664000175000017500000000045300000000000026433 0ustar00zuulzuul00000000000000openstack.block_storage.v3.backup ================================= .. automodule:: openstack.block_storage.v3.backup The Backup Class ---------------- The ``Backup`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.backup.Backup :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/block_storage_summary.rst0000664000175000017500000000064200000000000031561 0ustar00zuulzuul00000000000000openstack.block_storage.v3.block_storage_summary ================================================ .. automodule:: openstack.block_storage.v3.block_storage_summary The Block Storage Summary Class ------------------------------- The ``Block Storage Summary`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.block_storage_summary.BlockStorageSummary :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/capabilities.rst0000664000175000017500000000053300000000000027616 0ustar00zuulzuul00000000000000openstack.block_storage.v3.capabilities ======================================= .. automodule:: openstack.block_storage.v3.capabilities The Capabilities Class ---------------------- The ``Capabilities`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.capabilities.Capabilities :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/extension.rst0000664000175000017500000000050300000000000027176 0ustar00zuulzuul00000000000000openstack.block_storage.v3.extension ==================================== .. automodule:: openstack.block_storage.v3.extension The Extension Class ------------------- The ``Extension`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.extension.Extension :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/group.rst0000664000175000017500000000044300000000000026321 0ustar00zuulzuul00000000000000openstack.block_storage.v3.group ================================ .. automodule:: openstack.block_storage.v3.group The Group Class --------------- The ``Group`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.group.Group :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/group_snapshot.rst0000664000175000017500000000054700000000000030245 0ustar00zuulzuul00000000000000openstack.block_storage.v3.group_snapshot ========================================= .. automodule:: openstack.block_storage.v3.group_snapshot The GroupSnapshot Class ----------------------- The ``GroupSnapshot`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.group_snapshot.GroupSnapshot :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/group_type.rst0000664000175000017500000000050700000000000027363 0ustar00zuulzuul00000000000000openstack.block_storage.v3.group_type ===================================== .. automodule:: openstack.block_storage.v3.group_type The GroupType Class ------------------- The ``GroupType`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.group_type.GroupType :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/limits.rst0000664000175000017500000000157700000000000026477 0ustar00zuulzuul00000000000000openstack.block_storage.v3.limits ================================= .. automodule:: openstack.block_storage.v3.limits The AbsoluteLimit Class ----------------------- The ``AbsoluteLimit`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.limits.AbsoluteLimit :members: The Limits Class ---------------- The ``Limits`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.limits.Limits :members: The RateLimit Class ------------------- The ``RateLimit`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.limits.RateLimit :members: The RateLimits Class -------------------- The ``RateLimits`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.limits.RateLimits :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/quota_set.rst0000664000175000017500000000047700000000000027200 0ustar00zuulzuul00000000000000openstack.block_storage.v3.quota_set ==================================== .. automodule:: openstack.block_storage.v3.quota_set The QuotaSet Class ------------------ The ``QuotaSet`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.quota_set.QuotaSet :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/resource_filter.rst0000664000175000017500000000055700000000000030367 0ustar00zuulzuul00000000000000openstack.block_storage.v3.resource_filter ========================================== .. automodule:: openstack.block_storage.v3.resource_filter The ResourceFilter Class ------------------------ The ``ResourceFilter`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.resource_filter.ResourceFilter :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/service.rst0000664000175000017500000000046300000000000026627 0ustar00zuulzuul00000000000000openstack.block_storage.v3.service ================================== .. automodule:: openstack.block_storage.v3.service The Service Class ----------------- The ``Service`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.service.Service :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/snapshot.rst0000664000175000017500000000104200000000000027020 0ustar00zuulzuul00000000000000openstack.block_storage.v3.snapshot =================================== .. automodule:: openstack.block_storage.v3.snapshot The Snapshot Class ------------------ The ``Snapshot`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.snapshot.Snapshot :members: The SnapshotDetail Class ------------------------ The ``SnapshotDetail`` class inherits from :class:`~openstack.block_storage.v3.snapshot.Snapshot`. .. autoclass:: openstack.block_storage.v3.snapshot.SnapshotDetail :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/stats.rst0000664000175000017500000000044300000000000026323 0ustar00zuulzuul00000000000000openstack.block_storage.v3.stats ================================ .. automodule:: openstack.block_storage.v3.stats The Pools Class --------------- The ``Pools`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.stats.Pools :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/transfer.rst0000664000175000017500000000052000000000000027005 0ustar00zuulzuul00000000000000openstack.block_storage.v3.transfer =================================== .. automodule:: openstack.block_storage.v3.transfer The Volume Transfer Class ------------------------- The ``Volume Transfer`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.transfer.Transfer :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/type.rst0000664000175000017500000000075500000000000026154 0ustar00zuulzuul00000000000000openstack.block_storage.v3.type =============================== .. automodule:: openstack.block_storage.v3.type The Type Class -------------- The ``Type`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.type.Type :members: The TypeEncryption Class ------------------------ The ``TypeEncryption`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.type.TypeEncryption :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/block_storage/v3/volume.rst0000664000175000017500000000045300000000000026475 0ustar00zuulzuul00000000000000openstack.block_storage.v3.volume ================================= .. automodule:: openstack.block_storage.v3.volume The Volume Class ---------------- The ``Volume`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.block_storage.v3.volume.Volume :members: ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.141263 openstacksdk-4.0.0/doc/source/user/resources/clustering/0000775000175000017500000000000000000000000023443 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/clustering/index.rst0000664000175000017500000000035200000000000025304 0ustar00zuulzuul00000000000000Cluster Resources ================= .. toctree:: :maxdepth: 1 v1/build_info v1/profile_type v1/profile v1/policy_type v1/policy v1/cluster v1/node v1/cluster_policy v1/receiver v1/action v1/event ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.145265 openstacksdk-4.0.0/doc/source/user/resources/clustering/v1/0000775000175000017500000000000000000000000023771 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/clustering/v1/action.rst0000664000175000017500000000043700000000000026004 0ustar00zuulzuul00000000000000openstack.clustering.v1.action ============================== .. automodule:: openstack.clustering.v1.action The Action Class ---------------- The ``Action`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.clustering.v1.action.Action :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/clustering/v1/build_info.rst0000664000175000017500000000047300000000000026641 0ustar00zuulzuul00000000000000openstack.clustering.v1.build_info ================================== .. automodule:: openstack.clustering.v1.build_info The BuildInfo Class ------------------- The ``BuildInfo`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.clustering.v1.build_info.BuildInfo :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/clustering/v1/cluster.rst0000664000175000017500000000044700000000000026211 0ustar00zuulzuul00000000000000openstack.clustering.v1.Cluster =============================== .. automodule:: openstack.clustering.v1.cluster The Cluster Class ----------------- The ``Cluster`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.clustering.v1.cluster.Cluster :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/clustering/v1/cluster_policy.rst0000664000175000017500000000053300000000000027564 0ustar00zuulzuul00000000000000openstack.clustering.v1.cluster_policy ====================================== .. automodule:: openstack.clustering.v1.cluster_policy The ClusterPolicy Class ----------------------- The ``ClusterPolicy`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.clustering.v1.cluster_policy.ClusterPolicy :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/clustering/v1/event.rst0000664000175000017500000000042700000000000025647 0ustar00zuulzuul00000000000000openstack.clustering.v1.event ============================= .. automodule:: openstack.clustering.v1.event The Event Class --------------- The ``Event`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.clustering.v1.event.Event :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/clustering/v1/node.rst0000664000175000017500000000041700000000000025452 0ustar00zuulzuul00000000000000openstack.clustering.v1.Node ============================ .. automodule:: openstack.clustering.v1.node The Node Class -------------- The ``Node`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.clustering.v1.node.Node :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/clustering/v1/policy.rst0000664000175000017500000000043700000000000026026 0ustar00zuulzuul00000000000000openstack.clustering.v1.policy ============================== .. automodule:: openstack.clustering.v1.policy The Policy Class ---------------- The ``Policy`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.clustering.v1.policy.Policy :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/clustering/v1/policy_type.rst0000664000175000017500000000050300000000000027061 0ustar00zuulzuul00000000000000openstack.clustering.v1.policy_type =================================== .. automodule:: openstack.clustering.v1.policy_type The PolicyType Class -------------------- The ``PolicyType`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.clustering.v1.policy_type.PolicyType :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/clustering/v1/profile.rst0000664000175000017500000000044700000000000026170 0ustar00zuulzuul00000000000000openstack.clustering.v1.profile =============================== .. automodule:: openstack.clustering.v1.profile The Profile Class ----------------- The ``Profile`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.clustering.v1.profile.Profile :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/clustering/v1/profile_type.rst0000664000175000017500000000051300000000000027223 0ustar00zuulzuul00000000000000openstack.clustering.v1.profile_type ==================================== .. automodule:: openstack.clustering.v1.profile_type The ProfileType Class --------------------- The ``ProfileType`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.clustering.v1.profile_type.ProfileType :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/clustering/v1/receiver.rst0000664000175000017500000000045700000000000026335 0ustar00zuulzuul00000000000000openstack.clustering.v1.receiver ================================ .. automodule:: openstack.clustering.v1.receiver The Receiver Class ------------------ The ``Receiver`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.clustering.v1.receiver.Receiver :members: ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.145265 openstacksdk-4.0.0/doc/source/user/resources/compute/0000775000175000017500000000000000000000000022740 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/index.rst0000664000175000017500000000014000000000000024574 0ustar00zuulzuul00000000000000Compute Resources ================= .. toctree:: :maxdepth: 1 :glob: v2/* version ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.149267 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/0000775000175000017500000000000000000000000023267 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/aggregate.rst0000664000175000017500000000045300000000000025751 0ustar00zuulzuul00000000000000openstack.compute.v2.aggregate ============================== .. automodule:: openstack.compute.v2.aggregate The Aggregate Class ------------------- The ``Aggregate`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.aggregate.Aggregate :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/availability_zone.rst0000664000175000017500000000054700000000000027534 0ustar00zuulzuul00000000000000openstack.compute.v2.availability_zone ====================================== .. automodule:: openstack.compute.v2.availability_zone The AvailabilityZone Class -------------------------- The ``AvailabilityZone`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.availability_zone.AvailabilityZone :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/extension.rst0000664000175000017500000000045300000000000026037 0ustar00zuulzuul00000000000000openstack.compute.v2.extension ============================== .. automodule:: openstack.compute.v2.extension The Extension Class ------------------- The ``Extension`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.extension.Extension :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/flavor.rst0000664000175000017500000000074000000000000025313 0ustar00zuulzuul00000000000000openstack.compute.v2.flavor =========================== .. automodule:: openstack.compute.v2.flavor The Flavor Class ---------------- The ``Flavor`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.flavor.Flavor :members: The FlavorDetail Class ---------------------- The ``FlavorDetail`` class inherits from :class:`~openstack.compute.v2.flavor.Flavor`. .. autoclass:: openstack.compute.v2.flavor.FlavorDetail :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/hypervisor.rst0000664000175000017500000000046300000000000026236 0ustar00zuulzuul00000000000000openstack.compute.v2.hypervisor =============================== .. automodule:: openstack.compute.v2.hypervisor The Hypervisor Class -------------------- The ``Hypervisor`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.hypervisor.Hypervisor :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/image.rst0000664000175000017500000000072100000000000025103 0ustar00zuulzuul00000000000000openstack.compute.v2.image ========================== .. automodule:: openstack.compute.v2.image The Image Class --------------- The ``Image`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.image.Image :members: The ImageDetail Class --------------------- The ``ImageDetail`` class inherits from :class:`~openstack.compute.v2.image.Image`. .. autoclass:: openstack.compute.v2.image.ImageDetail :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/keypair.rst0000664000175000017500000000043300000000000025465 0ustar00zuulzuul00000000000000openstack.compute.v2.keypair ============================ .. automodule:: openstack.compute.v2.keypair The Keypair Class ----------------- The ``Keypair`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.keypair.Keypair :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/limits.rst0000664000175000017500000000123300000000000025321 0ustar00zuulzuul00000000000000openstack.compute.v2.limits =========================== .. automodule:: openstack.compute.v2.limits The Limits Class ---------------- The ``Limits`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.limits.Limits :members: The AbsoluteLimits Class ------------------------ The ``AbsoluteLimits`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.limits.AbsoluteLimits :members: The RateLimit Class ------------------- The ``RateLimit`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.limits.RateLimit :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/migration.rst0000664000175000017500000000045300000000000026014 0ustar00zuulzuul00000000000000openstack.compute.v2.migration ============================== .. automodule:: openstack.compute.v2.migration The Migration Class ------------------- The ``Migration`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.migration.Migration :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/quota_set.rst0000664000175000017500000000044700000000000026032 0ustar00zuulzuul00000000000000openstack.compute.v2.quota_set ============================== .. automodule:: openstack.compute.v2.quota_set The QuotaSet Class ------------------ The ``QuotaSet`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.quota_set.QuotaSet :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/server.rst0000664000175000017500000000042300000000000025326 0ustar00zuulzuul00000000000000openstack.compute.v2.server =========================== .. automodule:: openstack.compute.v2.server The Server Class ---------------- The ``Server`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.server.Server :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/server_action.rst0000664000175000017500000000105000000000000026660 0ustar00zuulzuul00000000000000openstack.compute.v2.server_action ================================== .. automodule:: openstack.compute.v2.server_action The ServerAction Class ---------------------- The ``ServerAction`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.server_action.ServerAction :members: The ServerActionEvent Class --------------------------- The ``ServerActionEvent`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.server_action.ServerActionEvent :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/server_diagnostics.rst0000664000175000017500000000055700000000000027725 0ustar00zuulzuul00000000000000openstack.compute.v2.server_diagnostics ======================================= .. automodule:: openstack.compute.v2.server_diagnostics The ServerDiagnostics Class --------------------------- The ``ServerDiagnostics`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.server_diagnostics.ServerDiagnostics :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/server_group.rst0000664000175000017500000000047700000000000026553 0ustar00zuulzuul00000000000000openstack.compute.v2.server_group ================================= .. automodule:: openstack.compute.v2.server_group The ServerGroup Class --------------------- The ``ServerGroup`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.server_group.ServerGroup :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/server_interface.rst0000664000175000017500000000053700000000000027354 0ustar00zuulzuul00000000000000openstack.compute.v2.server_interface ===================================== .. automodule:: openstack.compute.v2.server_interface The ServerInterface Class ------------------------- The ``ServerInterface`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.server_interface.ServerInterface :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/server_ip.rst0000664000175000017500000000044700000000000026024 0ustar00zuulzuul00000000000000openstack.compute.v2.server_ip ============================== .. automodule:: openstack.compute.v2.server_ip The ServerIP Class ------------------ The ``ServerIP`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.server_ip.ServerIP :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/server_migration.rst0000664000175000017500000000053700000000000027405 0ustar00zuulzuul00000000000000openstack.compute.v2.server_migration ===================================== .. automodule:: openstack.compute.v2.server_migration The ServerMigration Class ------------------------- The ``ServerMigration`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.server_migration.ServerMigration :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/server_remote_console.rst0000664000175000017500000000060300000000000030423 0ustar00zuulzuul00000000000000openstack.compute.v2.server_remote_console ========================================== .. automodule:: openstack.compute.v2.server_remote_console The ServerRemoteConsole Class ----------------------------- The ``ServerRemoteConsole`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.server_remote_console.ServerRemoteConsole :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/service.rst0000664000175000017500000000043300000000000025461 0ustar00zuulzuul00000000000000openstack.compute.v2.service ============================ .. automodule:: openstack.compute.v2.service The Service Class ----------------- The ``Service`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.service.Service :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/usage.rst0000664000175000017500000000071400000000000025127 0ustar00zuulzuul00000000000000openstack.compute.v2.usage ========================== .. automodule:: openstack.compute.v2.usage The Usage Class --------------- The ``Usage`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.usage.Usage :members: The ServerUsage Class --------------------- The ``ServerUsage`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.usage.ServerUsage :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/v2/volume_attachment.rst0000664000175000017500000000054700000000000027546 0ustar00zuulzuul00000000000000openstack.compute.v2.volume_attachment ====================================== .. automodule:: openstack.compute.v2.volume_attachment The VolumeAttachment Class -------------------------- The ``VolumeAttachment`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.volume_attachment.VolumeAttachment :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/compute/version.rst0000664000175000017500000000041700000000000025161 0ustar00zuulzuul00000000000000openstack.compute.version ========================= .. automodule:: openstack.compute.version The Version Class ----------------- The ``Version`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.version.Version :members: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1532688 openstacksdk-4.0.0/doc/source/user/resources/container_infrastructure_management/0000775000175000017500000000000000000000000030602 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/container_infrastructure_management/cluster.rst0000664000175000017500000000061400000000000033016 0ustar00zuulzuul00000000000000openstack.container_infrastructure_management.v1.cluster ======================================================== .. automodule:: openstack.container_infrastructure_management.v1.cluster The Cluster Class ------------------ The ``Cluster`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.container_infrastructure_management.v1.cluster.Cluster :members: ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=openstacksdk-4.0.0/doc/source/user/resources/container_infrastructure_management/cluster_certificate.rst 22 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/container_infrastructure_management/cluster_certificate0000664000175000017500000000075100000000000034553 0ustar00zuulzuul00000000000000openstack.container_infrastructure_management.v1.cluster_certificate ==================================================================== .. automodule:: openstack.container_infrastructure_management.v1.cluster_certificate The Cluster Certificate Class ----------------------------- The ``ClusterCertificate`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.container_infrastructure_management.v1.cluster_certificate.ClusterCertificate :members: ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=openstacksdk-4.0.0/doc/source/user/resources/container_infrastructure_management/cluster_template.rst 22 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/container_infrastructure_management/cluster_template.rs0000664000175000017500000000072100000000000034524 0ustar00zuulzuul00000000000000openstack.container_infrastructure_management.v1.cluster_template ================================================================= .. automodule:: openstack.container_infrastructure_management.v1.cluster_template The Cluster Template Class -------------------------- The ``ClusterTemplate`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/container_infrastructure_management/index.rst0000664000175000017500000000027400000000000032446 0ustar00zuulzuul00000000000000Container Infrastructure Management Resources ============================================= .. toctree:: :maxdepth: 1 cluster cluster_certificate cluster_template service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/container_infrastructure_management/service.rst0000664000175000017500000000061300000000000032774 0ustar00zuulzuul00000000000000openstack.container_infrastructure_management.v1.service ======================================================== .. automodule:: openstack.container_infrastructure_management.v1.service The Service Class ----------------- The ``Service`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.container_infrastructure_management.v1.service.Service :members: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1532688 openstacksdk-4.0.0/doc/source/user/resources/database/0000775000175000017500000000000000000000000023030 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/database/index.rst0000664000175000017500000000017300000000000024672 0ustar00zuulzuul00000000000000Database Resources ================== .. toctree:: :maxdepth: 1 v1/database v1/flavor v1/instance v1/user ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1532688 openstacksdk-4.0.0/doc/source/user/resources/database/v1/0000775000175000017500000000000000000000000023356 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/database/v1/database.rst0000664000175000017500000000044700000000000025661 0ustar00zuulzuul00000000000000openstack.database.v1.database ============================== .. automodule:: openstack.database.v1.database The Database Class ------------------ The ``Database`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.database.v1.database.Database :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/database/v1/flavor.rst0000664000175000017500000000042700000000000025404 0ustar00zuulzuul00000000000000openstack.database.v1.flavor ============================ .. automodule:: openstack.database.v1.flavor The Flavor Class ---------------- The ``Flavor`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.database.v1.flavor.Flavor :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/database/v1/instance.rst0000664000175000017500000000044700000000000025721 0ustar00zuulzuul00000000000000openstack.database.v1.instance ============================== .. automodule:: openstack.database.v1.instance The Instance Class ------------------ The ``Instance`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.database.v1.instance.Instance :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/database/v1/user.rst0000664000175000017500000000040700000000000025067 0ustar00zuulzuul00000000000000openstack.database.v1.user ========================== .. automodule:: openstack.database.v1.user The User Class -------------- The ``User`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.database.v1.user.User :members: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1532688 openstacksdk-4.0.0/doc/source/user/resources/dns/0000775000175000017500000000000000000000000022050 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/dns/index.rst0000664000175000017500000000026100000000000023710 0ustar00zuulzuul00000000000000DNS Resources ============= .. toctree:: :maxdepth: 1 v2/zone v2/zone_transfer v2/zone_export v2/zone_import v2/zone_share v2/floating_ip v2/recordset ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1532688 openstacksdk-4.0.0/doc/source/user/resources/dns/v2/0000775000175000017500000000000000000000000022377 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/dns/v2/floating_ip.rst0000664000175000017500000000044000000000000025422 0ustar00zuulzuul00000000000000openstack.dns.v2.floating_ip ============================ .. automodule:: openstack.dns.v2.floating_ip The FloatingIP Class -------------------- The ``DNS`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.dns.v2.floating_ip.FloatingIP :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/dns/v2/recordset.rst0000664000175000017500000000042500000000000025124 0ustar00zuulzuul00000000000000openstack.dns.v2.recordset ========================== .. automodule:: openstack.dns.v2.recordset The Recordset Class ------------------- The ``DNS`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.dns.v2.recordset.Recordset :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/dns/v2/zone.rst0000664000175000017500000000037300000000000024107 0ustar00zuulzuul00000000000000openstack.dns.v2.zone ============================== .. automodule:: openstack.dns.v2.zone The Zone Class -------------- The ``DNS`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.dns.v2.zone.Zone :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/dns/v2/zone_export.rst0000664000175000017500000000044000000000000025503 0ustar00zuulzuul00000000000000openstack.dns.v2.zone_export ============================ .. automodule:: openstack.dns.v2.zone_export The ZoneExport Class -------------------- The ``DNS`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.dns.v2.zone_export.ZoneExport :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/dns/v2/zone_import.rst0000664000175000017500000000044000000000000025474 0ustar00zuulzuul00000000000000openstack.dns.v2.zone_import ============================ .. automodule:: openstack.dns.v2.zone_import The ZoneImport Class -------------------- The ``DNS`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.dns.v2.zone_import.ZoneImport :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/dns/v2/zone_share.rst0000664000175000017500000000043100000000000025264 0ustar00zuulzuul00000000000000openstack.dns.v2.zone_share =========================== .. automodule:: openstack.dns.v2.zone_share The ZoneShare Class ------------------- The ``DNS`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.dns.v2.zone_share.ZoneShare :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/dns/v2/zone_transfer.rst0000664000175000017500000000102500000000000026006 0ustar00zuulzuul00000000000000openstack.dns.v2.zone_transfer ============================== .. automodule:: openstack.dns.v2.zone_transfer The ZoneTransferRequest Class ----------------------------- The ``DNS`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.dns.v2.zone_transfer.ZoneTransferRequest :members: The ZoneTransferAccept Class ---------------------------- The ``DNS`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.dns.v2.zone_transfer.ZoneTransferAccept :members: ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.157271 openstacksdk-4.0.0/doc/source/user/resources/identity/0000775000175000017500000000000000000000000023115 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/index.rst0000664000175000017500000000045400000000000024761 0ustar00zuulzuul00000000000000Identity Resources ================== Identity v2 Resources --------------------- .. toctree:: :maxdepth: 1 :glob: v2/* Identity v3 Resources --------------------- .. toctree:: :maxdepth: 1 :glob: v3/* Other Resources --------------- .. toctree:: :maxdepth: 1 version ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.157271 openstacksdk-4.0.0/doc/source/user/resources/identity/v2/0000775000175000017500000000000000000000000023444 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v2/extension.rst0000664000175000017500000000045700000000000026220 0ustar00zuulzuul00000000000000openstack.identity.v2.extension =============================== .. automodule:: openstack.identity.v2.extension The Extension Class ------------------- The ``Extension`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v2.extension.Extension :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v2/role.rst0000664000175000017500000000040700000000000025140 0ustar00zuulzuul00000000000000openstack.identity.v2.role ========================== .. automodule:: openstack.identity.v2.role The Role Class -------------- The ``Role`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v2.role.Role :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v2/tenant.rst0000664000175000017500000000042700000000000025472 0ustar00zuulzuul00000000000000openstack.identity.v2.tenant ============================ .. automodule:: openstack.identity.v2.tenant The Tenant Class ---------------- The ``Tenant`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v2.tenant.Tenant :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v2/user.rst0000664000175000017500000000040700000000000025155 0ustar00zuulzuul00000000000000openstack.identity.v2.user ========================== .. automodule:: openstack.identity.v2.user The User Class -------------- The ``User`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v2.user.User :members: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1612728 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/0000775000175000017500000000000000000000000023445 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/application_credential.rst0000664000175000017500000000062300000000000030675 0ustar00zuulzuul00000000000000openstack.identity.v3.application_credential ============================================ .. automodule:: openstack.identity.v3.application_credential The ApplicationCredential Class ------------------------------- The ``ApplicationCredential`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.application_credential.ApplicationCredential :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/credential.rst0000664000175000017500000000046700000000000026320 0ustar00zuulzuul00000000000000openstack.identity.v3.credential ================================ .. automodule:: openstack.identity.v3.credential The Credential Class -------------------- The ``Credential`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.credential.Credential :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/domain.rst0000664000175000017500000000042700000000000025451 0ustar00zuulzuul00000000000000openstack.identity.v3.domain ============================ .. automodule:: openstack.identity.v3.domain The Domain Class ---------------- The ``Domain`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.domain.Domain :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/domain_config.rst0000664000175000017500000000047700000000000027003 0ustar00zuulzuul00000000000000openstack.identity.v3.domain_config =================================== .. automodule:: openstack.identity.v3.domain_config The Domain Class ---------------- The ``DomainConfig`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.domain_config.DomainConfig :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/endpoint.rst0000664000175000017500000000044700000000000026024 0ustar00zuulzuul00000000000000openstack.identity.v3.endpoint ============================== .. automodule:: openstack.identity.v3.endpoint The Endpoint Class ------------------ The ``Endpoint`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.endpoint.Endpoint :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/federation_protocol.rst0000664000175000017500000000057300000000000030245 0ustar00zuulzuul00000000000000openstack.identity.v3.federation_protocol ========================================= .. automodule:: openstack.identity.v3.federation_protocol The FederationProtocol Class ---------------------------- The ``FederationProtocol`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.federation_protocol.FederationProtocol :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/group.rst0000664000175000017500000000041700000000000025335 0ustar00zuulzuul00000000000000openstack.identity.v3.group =========================== .. automodule:: openstack.identity.v3.group The Group Class --------------- The ``Group`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.group.Group :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/identity_provider.rst0000664000175000017500000000055300000000000027745 0ustar00zuulzuul00000000000000openstack.identity.v3.identity_provider ======================================= .. automodule:: openstack.identity.v3.identity_provider The IdentityProvider Class -------------------------- The ``IdentityProvider`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.identity_provider.IdentityProvider :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/limit.rst0000664000175000017500000000041700000000000025317 0ustar00zuulzuul00000000000000openstack.identity.v3.limit =========================== .. automodule:: openstack.identity.v3.limit The Limit Class --------------- The ``Limit`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.limit.Limit :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/mapping.rst0000664000175000017500000000043700000000000025636 0ustar00zuulzuul00000000000000openstack.identity.v3.mapping ============================= .. automodule:: openstack.identity.v3.mapping The Mapping Class ----------------- The ``Mapping`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.mapping.Mapping :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/policy.rst0000664000175000017500000000042700000000000025501 0ustar00zuulzuul00000000000000openstack.identity.v3.policy ============================ .. automodule:: openstack.identity.v3.policy The Policy Class ---------------- The ``Policy`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.policy.Policy :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/project.rst0000664000175000017500000000043700000000000025651 0ustar00zuulzuul00000000000000openstack.identity.v3.project ============================= .. automodule:: openstack.identity.v3.project The Project Class ----------------- The ``Project`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.project.Project :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/region.rst0000664000175000017500000000042700000000000025465 0ustar00zuulzuul00000000000000openstack.identity.v3.region ============================ .. automodule:: openstack.identity.v3.region The Region Class ---------------- The ``Region`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.region.Region :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/registered_limit.rst0000664000175000017500000000054300000000000027534 0ustar00zuulzuul00000000000000openstack.identity.v3.registered_limit ====================================== .. automodule:: openstack.identity.v3.registered_limit The RegisteredLimit Class ------------------------- The ``RegisteredLimit`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.registered_limit.RegisteredLimit :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/role.rst0000664000175000017500000000040700000000000025141 0ustar00zuulzuul00000000000000openstack.identity.v3.role ========================== .. automodule:: openstack.identity.v3.role The Role Class -------------- The ``Role`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.role.Role :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/role_assignment.rst0000664000175000017500000000053300000000000027371 0ustar00zuulzuul00000000000000openstack.identity.v3.role_assignment ===================================== .. automodule:: openstack.identity.v3.role_assignment The RoleAssignment Class ------------------------ The ``RoleAssignment`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.role_assignment.RoleAssignment :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/role_domain_group_assignment.rst0000664000175000017500000000067300000000000032141 0ustar00zuulzuul00000000000000openstack.identity.v3.role_domain_group_assignment ================================================== .. automodule:: openstack.identity.v3.role_domain_group_assignment The RoleDomainGroupAssignment Class ----------------------------------- The ``RoleDomainGroupAssignment`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.role_domain_group_assignment.RoleDomainGroupAssignment :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/role_domain_user_assignment.rst0000664000175000017500000000066300000000000031762 0ustar00zuulzuul00000000000000openstack.identity.v3.role_domain_user_assignment ================================================= .. automodule:: openstack.identity.v3.role_domain_user_assignment The RoleDomainUserAssignment Class ---------------------------------- The ``RoleDomainUserAssignment`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.role_domain_user_assignment.RoleDomainUserAssignment :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/role_project_group_assignment.rst0000664000175000017500000000070300000000000032332 0ustar00zuulzuul00000000000000openstack.identity.v3.role_project_group_assignment =================================================== .. automodule:: openstack.identity.v3.role_project_group_assignment The RoleProjectGroupAssignment Class ------------------------------------ The ``RoleProjectGroupAssignment`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.role_project_group_assignment.RoleProjectGroupAssignment :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/role_project_user_assignment.rst0000664000175000017500000000067300000000000032162 0ustar00zuulzuul00000000000000openstack.identity.v3.role_project_user_assignment ================================================== .. automodule:: openstack.identity.v3.role_project_user_assignment The RoleProjectUserAssignment Class ----------------------------------- The ``RoleProjectUserAssignment`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.role_project_user_assignment.RoleProjectUserAssignment :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/role_system_group_assignment.rst0000664000175000017500000000067300000000000032216 0ustar00zuulzuul00000000000000openstack.identity.v3.role_system_group_assignment ================================================== .. automodule:: openstack.identity.v3.role_system_group_assignment The RoleSystemGroupAssignment Class ----------------------------------- The ``RoleSystemGroupAssignment`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.role_system_group_assignment.RoleSystemGroupAssignment :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/role_system_user_assignment.rst0000664000175000017500000000066300000000000032037 0ustar00zuulzuul00000000000000openstack.identity.v3.role_system_user_assignment ================================================= .. automodule:: openstack.identity.v3.role_system_user_assignment The RoleSystemUserAssignment Class ---------------------------------- The ``RoleSystemUserAssignment`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.role_system_user_assignment.RoleSystemUserAssignment :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/service.rst0000664000175000017500000000043700000000000025643 0ustar00zuulzuul00000000000000openstack.identity.v3.service ============================= .. automodule:: openstack.identity.v3.service The Service Class ----------------- The ``Service`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.service.Service :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/system.rst0000664000175000017500000000042700000000000025526 0ustar00zuulzuul00000000000000openstack.identity.v3.system ============================ .. automodule:: openstack.identity.v3.system The System Class ---------------- The ``System`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.system.System :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/trust.rst0000664000175000017500000000041700000000000025362 0ustar00zuulzuul00000000000000openstack.identity.v3.trust =========================== .. automodule:: openstack.identity.v3.trust The Trust Class --------------- The ``Trust`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.trust.Trust :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/v3/user.rst0000664000175000017500000000040700000000000025156 0ustar00zuulzuul00000000000000openstack.identity.v3.user ========================== .. automodule:: openstack.identity.v3.user The User Class -------------- The ``User`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.v3.user.User :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/identity/version.rst0000664000175000017500000000042300000000000025333 0ustar00zuulzuul00000000000000openstack.identity.version ========================== .. automodule:: openstack.identity.version The Version Class ----------------- The ``Version`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.identity.version.Version :members: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1612728 openstacksdk-4.0.0/doc/source/user/resources/image/0000775000175000017500000000000000000000000022346 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/image/index.rst0000664000175000017500000000054400000000000024212 0ustar00zuulzuul00000000000000Image Resources =============== Image v1 Resources ------------------ .. toctree:: :maxdepth: 1 v1/image Image v2 Resources ------------------ .. toctree:: :maxdepth: 1 v2/image v2/member v2/metadef_namespace v2/metadef_object v2/metadef_resource_type v2/metadef_property v2/metadef_schema v2/task v2/service_info ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1612728 openstacksdk-4.0.0/doc/source/user/resources/image/v1/0000775000175000017500000000000000000000000022674 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/image/v1/image.rst0000664000175000017500000000040300000000000024505 0ustar00zuulzuul00000000000000openstack.image.v1.image ======================== .. automodule:: openstack.image.v1.image The Image Class --------------- The ``Image`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.image.v1.image.Image :members: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1652746 openstacksdk-4.0.0/doc/source/user/resources/image/v2/0000775000175000017500000000000000000000000022675 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/image/v2/image.rst0000664000175000017500000000040300000000000024506 0ustar00zuulzuul00000000000000openstack.image.v2.image ======================== .. automodule:: openstack.image.v2.image The Image Class --------------- The ``Image`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.image.v2.image.Image :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/image/v2/member.rst0000664000175000017500000000041300000000000024674 0ustar00zuulzuul00000000000000openstack.image.v2.member ========================= .. automodule:: openstack.image.v2.member The Member Class ---------------- The ``Member`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.image.v2.member.Member :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/image/v2/metadef_namespace.rst0000664000175000017500000000054200000000000027051 0ustar00zuulzuul00000000000000openstack.image.v2.metadef_namespace ===================================== .. automodule:: openstack.image.v2.metadef_namespace The MetadefNamespace Class ---------------------------- The ``MetadefNamespace`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.image.v2.metadef_namespace.MetadefNamespace :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/image/v2/metadef_object.rst0000664000175000017500000000051100000000000026357 0ustar00zuulzuul00000000000000openstack.image.v2.metadef_object ================================== .. automodule:: openstack.image.v2.metadef_object The MetadefObject Class ------------------------ The ``MetadefObject`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.image.v2.metadef_object.MetadefObject :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/image/v2/metadef_property.rst0000664000175000017500000000052700000000000027004 0ustar00zuulzuul00000000000000openstack.image.v2.metadef_property =================================== .. automodule:: openstack.image.v2.metadef_property The MetadefProperty Class ------------------------- The ``MetadefProperty`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.image.v2.metadef_property.MetadefProperty :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/image/v2/metadef_resource_type.rst0000664000175000017500000000123400000000000030004 0ustar00zuulzuul00000000000000openstack.image.v2.metadef_resource_type ======================================== .. automodule:: openstack.image.v2.metadef_resource_type The MetadefResourceType Class ----------------------------- The ``MetadefResourceType`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.image.v2.metadef_resource_type.MetadefResourceType :members: The MetadefResourceTypeAssociation Class ---------------------------------------- The ``MetadefResourceTypeAssociation`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.image.v2.metadef_resource_type.MetadefResourceTypeAssociation :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/image/v2/metadef_schema.rst0000664000175000017500000000050700000000000026356 0ustar00zuulzuul00000000000000openstack.image.v2.metadef_schema ================================= .. automodule:: openstack.image.v2.metadef_schema The MetadefSchema Class ----------------------- The ``MetadefSchema`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.image.v2.metadef_schema.MetadefSchema :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/image/v2/service_info.rst0000664000175000017500000000073400000000000026106 0ustar00zuulzuul00000000000000openstack.image.v2.service_info =============================== .. automodule:: openstack.image.v2.service_info The Store Class ---------------- The ``Store`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.image.v2.service_info.Store :members: The Import Info Class --------------------- The ``Import`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.image.v2.service_info.Import :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/image/v2/task.rst0000664000175000017500000000037300000000000024374 0ustar00zuulzuul00000000000000openstack.image.v2.task ======================= .. automodule:: openstack.image.v2.task The Task Class -------------- The ``Task`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.image.v2.task.Task :members: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1652746 openstacksdk-4.0.0/doc/source/user/resources/key_manager/0000775000175000017500000000000000000000000023546 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/key_manager/index.rst0000664000175000017500000000016200000000000025406 0ustar00zuulzuul00000000000000KeyManager Resources ==================== .. toctree:: :maxdepth: 1 v1/container v1/order v1/secret ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1652746 openstacksdk-4.0.0/doc/source/user/resources/key_manager/v1/0000775000175000017500000000000000000000000024074 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/key_manager/v1/container.rst0000664000175000017500000000047300000000000026614 0ustar00zuulzuul00000000000000openstack.key_manager.v1.container ================================== .. automodule:: openstack.key_manager.v1.container The Container Class ------------------- The ``Container`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.key_manager.v1.container.Container :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/key_manager/v1/order.rst0000664000175000017500000000043300000000000025741 0ustar00zuulzuul00000000000000openstack.key_manager.v1.order ============================== .. automodule:: openstack.key_manager.v1.order The Order Class --------------- The ``Order`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.key_manager.v1.order.Order :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/key_manager/v1/secret.rst0000664000175000017500000000044300000000000026114 0ustar00zuulzuul00000000000000openstack.key_manager.v1.secret =============================== .. automodule:: openstack.key_manager.v1.secret The Secret Class ---------------- The ``Secret`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.key_manager.v1.secret.Secret :members: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1652746 openstacksdk-4.0.0/doc/source/user/resources/load_balancer/0000775000175000017500000000000000000000000024032 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/load_balancer/index.rst0000664000175000017500000000050000000000000025666 0ustar00zuulzuul00000000000000Load Balancer Resources ======================= .. toctree:: :maxdepth: 1 v2/load_balancer v2/listener v2/pool v2/member v2/health_monitor v2/l7_policy v2/l7_rule v2/provider v2/flavor_profile v2/flavor v2/quota v2/amphora v2/availability_zone_profile v2/availability_zone ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1692767 openstacksdk-4.0.0/doc/source/user/resources/load_balancer/v2/0000775000175000017500000000000000000000000024361 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/load_balancer/v2/amphora.rst0000664000175000017500000000133500000000000026544 0ustar00zuulzuul00000000000000openstack.load_balancer.v2.amphora ================================== .. automodule:: openstack.load_balancer.v2.amphora The Amphora Class ----------------- The ``Amphora`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.amphora.Amphora :members: The AmphoraConfig Class ----------------------- The ``AmphoraConfig`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.amphora.AmphoraConfig :members: The AmphoraFailover Class ------------------------- The ``AmphoraFailover`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.amphora.AmphoraFailover :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/load_balancer/v2/availability_zone.rst0000664000175000017500000000057700000000000030631 0ustar00zuulzuul00000000000000openstack.load_balancer.v2.availability_zone ============================================ .. automodule:: openstack.load_balancer.v2.availability_zone The AvailabilityZone Class -------------------------- The ``AvailabilityZone`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.availability_zone.AvailabilityZone :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/load_balancer/v2/availability_zone_profile.rst0000664000175000017500000000067300000000000032346 0ustar00zuulzuul00000000000000openstack.load_balancer.v2.availability_zone_profile ==================================================== .. automodule:: openstack.load_balancer.v2.availability_zone_profile The AvailabilityZoneProfile Class --------------------------------- The ``AvailabilityZoneProfile`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/load_balancer/v2/flavor.rst0000664000175000017500000000045300000000000026406 0ustar00zuulzuul00000000000000openstack.load_balancer.v2.flavor ================================= .. automodule:: openstack.load_balancer.v2.flavor The Flavor Class ---------------- The ``Flavor`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.flavor.Flavor :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/load_balancer/v2/flavor_profile.rst0000664000175000017500000000054700000000000030132 0ustar00zuulzuul00000000000000openstack.load_balancer.v2.flavor_profile ========================================= .. automodule:: openstack.load_balancer.v2.flavor_profile The FlavorProfile Class ----------------------- The ``FlavorProfile`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.flavor_profile.FlavorProfile :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/load_balancer/v2/health_monitor.rst0000664000175000017500000000054700000000000030135 0ustar00zuulzuul00000000000000openstack.load_balancer.v2.health_monitor ========================================= .. automodule:: openstack.load_balancer.v2.health_monitor The HealthMonitor Class ----------------------- The ``HealthMonitor`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.health_monitor.HealthMonitor :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/load_balancer/v2/l7_policy.rst0000664000175000017500000000047700000000000027024 0ustar00zuulzuul00000000000000openstack.load_balancer.v2.l7_policy ==================================== .. automodule:: openstack.load_balancer.v2.l7_policy The L7Policy Class ------------------ The ``L7Policy`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.l7_policy.L7Policy :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/load_balancer/v2/l7_rule.rst0000664000175000017500000000045700000000000026472 0ustar00zuulzuul00000000000000openstack.load_balancer.v2.l7_rule ================================== .. automodule:: openstack.load_balancer.v2.l7_rule The L7Rule Class ---------------- The ``L7Rule`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.l7_rule.L7Rule :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/load_balancer/v2/listener.rst0000664000175000017500000000101500000000000026735 0ustar00zuulzuul00000000000000openstack.load_balancer.v2.listener =================================== .. automodule:: openstack.load_balancer.v2.listener The Listener Class ------------------ The ``Listener`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.listener.Listener :members: The ListenerStats Class ----------------------- The ``ListenerStats`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.listener.ListenerStats :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/load_balancer/v2/load_balancer.rst0000664000175000017500000000147100000000000027664 0ustar00zuulzuul00000000000000openstack.load_balancer.v2.load_balancer ======================================== .. automodule:: openstack.load_balancer.v2.load_balancer The LoadBalancer Class ---------------------- The ``LoadBalancer`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.load_balancer.LoadBalancer :members: The LoadBalancerStats Class --------------------------- The ``LoadBalancerStats`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.load_balancer.LoadBalancerStats :members: The LoadBalancerFailover Class ------------------------------ The ``LoadBalancerFailover`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.load_balancer.LoadBalancerFailover :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/load_balancer/v2/member.rst0000664000175000017500000000045300000000000026364 0ustar00zuulzuul00000000000000openstack.load_balancer.v2.member ================================= .. automodule:: openstack.load_balancer.v2.member The Member Class ---------------- The ``Member`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.member.Member :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/load_balancer/v2/pool.rst0000664000175000017500000000043300000000000026064 0ustar00zuulzuul00000000000000openstack.load_balancer.v2.pool =============================== .. automodule:: openstack.load_balancer.v2.pool The Pool Class -------------- The ``Pool`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.pool.Pool :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/load_balancer/v2/provider.rst0000664000175000017500000000110500000000000026742 0ustar00zuulzuul00000000000000openstack.load_balancer.v2.provider =================================== .. automodule:: openstack.load_balancer.v2.provider The Provider Class ------------------ The ``Provider`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.provider.Provider :members: The Provider Flavor Capabilities Class -------------------------------------- The ``ProviderFlavorCapabilities`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.provider.ProviderFlavorCapabilities :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/load_balancer/v2/quota.rst0000664000175000017500000000044300000000000026245 0ustar00zuulzuul00000000000000openstack.load_balancer.v2.quota ================================ .. automodule:: openstack.load_balancer.v2.quota The Quota Class --------------- The ``Quota`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.load_balancer.v2.quota.Quota :members: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1692767 openstacksdk-4.0.0/doc/source/user/resources/network/0000775000175000017500000000000000000000000022755 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/index.rst0000664000175000017500000000014500000000000024616 0ustar00zuulzuul00000000000000Network Resources ================= .. toctree:: :maxdepth: 1 :glob: v2/* v2/vpn/index ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1812825 openstacksdk-4.0.0/doc/source/user/resources/network/v2/0000775000175000017500000000000000000000000023304 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/address_group.rst0000664000175000017500000000050700000000000026701 0ustar00zuulzuul00000000000000openstack.network.v2.address_group ================================== .. automodule:: openstack.network.v2.address_group The AddressGroup Class ---------------------- The ``AddressGroup`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.address_group.AddressGroup :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/address_scope.rst0000664000175000017500000000050700000000000026656 0ustar00zuulzuul00000000000000openstack.network.v2.address_scope ================================== .. automodule:: openstack.network.v2.address_scope The AddressScope Class ---------------------- The ``AddressScope`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.address_scope.AddressScope :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/agent.rst0000664000175000017500000000041300000000000025132 0ustar00zuulzuul00000000000000openstack.network.v2.agent ========================== .. automodule:: openstack.network.v2.agent The Agent Class --------------- The ``Agent`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.agent.Agent :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/auto_allocated_topology.rst0000664000175000017500000000063000000000000030751 0ustar00zuulzuul00000000000000openstack.network.v2.auto_allocated_topology ============================================ .. automodule:: openstack.network.v2.auto_allocated_topology The Auto Allocated Topology Class --------------------------------- The ``Auto Allocated Toplogy`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.auto_allocated_topology.AutoAllocatedTopology :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/availability_zone.rst0000664000175000017500000000054700000000000027551 0ustar00zuulzuul00000000000000openstack.network.v2.availability_zone ====================================== .. automodule:: openstack.network.v2.availability_zone The AvailabilityZone Class -------------------------- The ``AvailabilityZone`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.availability_zone.AvailabilityZone :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/bgp_peer.rst0000664000175000017500000000043700000000000025625 0ustar00zuulzuul00000000000000openstack.network.v2.bgp_peer ============================= .. automodule:: openstack.network.v2.bgp_peer The BgpPeer Class ----------------- The ``BgpPeer`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.bgp_peer.BgpPeer :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/bgp_speaker.rst0000664000175000017500000000046700000000000026327 0ustar00zuulzuul00000000000000openstack.network.v2.bgp_speaker ================================ .. automodule:: openstack.network.v2.bgp_speaker The BgpSpeaker Class -------------------- The ``BgpSpeaker`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.bgp_speaker.BgpSpeaker :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/bgpvpn.rst0000664000175000017500000000042600000000000025334 0ustar00zuulzuul00000000000000openstack.network.v2.bgpvpn ============================= .. automodule:: openstack.network.v2.bgpvpn The BgpVpn Class ----------------- The ``BgpVpn`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.bgpvpn.BgpVpn :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/bgpvpn_network_association.rst0000664000175000017500000000065300000000000031503 0ustar00zuulzuul00000000000000openstack.network.v2.bgpvpn_network_association =============================================== .. automodule:: openstack.network.v2.bgpvpn_network_association The BgpVpnNetworkAssociation Class ---------------------------------- The ``BgpVpnNetworkAssociation`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.bgpvpn_network_association.BgpVpnNetworkAssociation :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/bgpvpn_port_association.rst0000664000175000017500000000062300000000000030773 0ustar00zuulzuul00000000000000openstack.network.v2.bgpvpn_port_association ============================================ .. automodule:: openstack.network.v2.bgpvpn_port_association The BgpVpnPortAssociation Class ------------------------------- The ``BgpVpnPortAssociation`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.bgpvpn_port_association.BgpVpnPortAssociation :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/bgpvpn_router_association.rst0000664000175000017500000000064300000000000031331 0ustar00zuulzuul00000000000000openstack.network.v2.bgpvpn_router_association ============================================== .. automodule:: openstack.network.v2.bgpvpn_router_association The BgpVpnRouterAssociation Class --------------------------------- The ``BgpVpnRouterAssociation`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.bgpvpn_router_association.BgpVpnRouterAssociation :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/extension.rst0000664000175000017500000000045300000000000026054 0ustar00zuulzuul00000000000000openstack.network.v2.extension ============================== .. automodule:: openstack.network.v2.extension The Extension Class ------------------- The ``Extension`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.extension.Extension :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/flavor.rst0000664000175000017500000000042300000000000025326 0ustar00zuulzuul00000000000000openstack.network.v2.flavor =========================== .. automodule:: openstack.network.v2.flavor The Flavor Class ---------------- The ``Flavor`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.flavor.Flavor :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/floating_ip.rst0000664000175000017500000000046700000000000026340 0ustar00zuulzuul00000000000000openstack.network.v2.floating_ip ================================ .. automodule:: openstack.network.v2.floating_ip The FloatingIP Class -------------------- The ``FloatingIP`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.floating_ip.FloatingIP :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/health_monitor.rst0000664000175000017500000000051700000000000027055 0ustar00zuulzuul00000000000000openstack.network.v2.health_monitor =================================== .. automodule:: openstack.network.v2.health_monitor The HealthMonitor Class ----------------------- The ``HealthMonitor`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.health_monitor.HealthMonitor :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/listener.rst0000664000175000017500000000044300000000000025664 0ustar00zuulzuul00000000000000openstack.network.v2.listener ============================= .. automodule:: openstack.network.v2.listener The Listener Class ------------------ The ``Listener`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.listener.Listener :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/load_balancer.rst0000664000175000017500000000050700000000000026606 0ustar00zuulzuul00000000000000openstack.network.v2.load_balancer ================================== .. automodule:: openstack.network.v2.load_balancer The LoadBalancer Class ---------------------- The ``LoadBalancer`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.load_balancer.LoadBalancer :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/local_ip.rst0000664000175000017500000000043700000000000025624 0ustar00zuulzuul00000000000000openstack.network.v2.local_ip ============================= .. automodule:: openstack.network.v2.local_ip The LocalIP Class ----------------- The ``LocalIP`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.local_ip.LocalIP :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/local_ip_association.rst0000664000175000017500000000057300000000000030221 0ustar00zuulzuul00000000000000openstack.network.v2.local_ip_association ========================================= .. automodule:: openstack.network.v2.local_ip_association The LocalIPAssociation Class ---------------------------- The ``LocalIPAssociation`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.local_ip_association.LocalIPAssociation :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/metering_label.rst0000664000175000017500000000051700000000000027012 0ustar00zuulzuul00000000000000openstack.network.v2.metering_label =================================== .. automodule:: openstack.network.v2.metering_label The MeteringLabel Class ----------------------- The ``MeteringLabel`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.metering_label.MeteringLabel :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/metering_label_rule.rst0000664000175000017500000000056300000000000030042 0ustar00zuulzuul00000000000000openstack.network.v2.metering_label_rule ======================================== .. automodule:: openstack.network.v2.metering_label_rule The MeteringLabelRule Class --------------------------- The ``MeteringLabelRule`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.metering_label_rule.MeteringLabelRule :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/ndp_proxy.rst0000664000175000017500000000044700000000000026065 0ustar00zuulzuul00000000000000openstack.network.v2.ndp_proxy ============================== .. automodule:: openstack.network.v2.ndp_proxy The NDPProxy Class ------------------ The ``NDPProxy`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.ndp_proxy.NDPProxy :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/network.rst0000664000175000017500000000043300000000000025527 0ustar00zuulzuul00000000000000openstack.network.v2.network ============================ .. automodule:: openstack.network.v2.network The Network Class ----------------- The ``Network`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.network.Network :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/network_ip_availability.rst0000664000175000017500000000062300000000000030752 0ustar00zuulzuul00000000000000openstack.network.v2.network_ip_availability ============================================ .. automodule:: openstack.network.v2.network_ip_availability The NetworkIPAvailability Class ------------------------------- The ``NetworkIPAvailability`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.network_ip_availability.NetworkIPAvailability :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/network_segment_range.rst0000664000175000017500000000060400000000000030425 0ustar00zuulzuul00000000000000openstack.network.v2.network_segment_range ========================================== .. automodule:: openstack.network.v2.network_segment_range The NetworkSegmentRange Class ----------------------------- The ``NetworkSegmentRange`` class inherits from :class:`~openstack.resource .Resource`. .. autoclass:: openstack.network.v2.network_segment_range.NetworkSegmentRange :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/pool.rst0000664000175000017500000000040300000000000025004 0ustar00zuulzuul00000000000000openstack.network.v2.pool ========================= .. automodule:: openstack.network.v2.pool The Pool Class -------------- The ``Pool`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.pool.Pool :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/pool_member.rst0000664000175000017500000000046700000000000026345 0ustar00zuulzuul00000000000000openstack.network.v2.pool_member ================================ .. automodule:: openstack.network.v2.pool_member The PoolMember Class -------------------- The ``PoolMember`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.pool_member.PoolMember :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/port.rst0000664000175000017500000000040300000000000025017 0ustar00zuulzuul00000000000000openstack.network.v2.port ========================= .. automodule:: openstack.network.v2.port The Port Class -------------- The ``Port`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.port.Port :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/qos_bandwidth_limit_rule.rst0000664000175000017500000000062700000000000031116 0ustar00zuulzuul00000000000000openstack.network.v2.qos_bandwidth_limit_rule ============================================= .. automodule:: openstack.network.v2.qos_bandwidth_limit_rule The QoSBandwidthLimitRule Class ------------------------------- The ``QoSBandwidthLimitRule`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/qos_dscp_marking_rule.rst0000664000175000017500000000057700000000000030421 0ustar00zuulzuul00000000000000openstack.network.v2.qos_dscp_marking_rule ========================================== .. automodule:: openstack.network.v2.qos_dscp_marking_rule The QoSDSCPMarkingRule Class ---------------------------- The ``QoSDSCPMarkingRule`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/qos_minimum_bandwidth_rule.rst0000664000175000017500000000064700000000000031455 0ustar00zuulzuul00000000000000openstack.network.v2.qos_minimum_bandwidth_rule =============================================== .. automodule:: openstack.network.v2.qos_minimum_bandwidth_rule The QoSMinimumBandwidthRule Class --------------------------------- The ``QoSMinimumBandwidthRule`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/qos_minimum_packet_rate_rule.rst0000664000175000017500000000066300000000000031771 0ustar00zuulzuul00000000000000openstack.network.v2.qos_minimum_packet_rate_rule ================================================= .. automodule:: openstack.network.v2.qos_minimum_packet_rate_rule The QoSMinimumPacketRateRule Class ---------------------------------- The ``QoSMinimumPacketRateRule`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/qos_policy.rst0000664000175000017500000000045700000000000026225 0ustar00zuulzuul00000000000000openstack.network.v2.qos_policy =============================== .. automodule:: openstack.network.v2.qos_policy The QoSPolicy Class ------------------- The ``QoSPolicy`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.qos_policy.QoSPolicy :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/qos_rule_type.rst0000664000175000017500000000050300000000000026726 0ustar00zuulzuul00000000000000openstack.network.v2.qos_rule_type ================================== .. automodule:: openstack.network.v2.qos_rule_type The QoSRuleType Class --------------------- The ``QoSRuleType`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.qos_rule_type.QoSRuleType :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/quota.rst0000664000175000017500000000041300000000000025165 0ustar00zuulzuul00000000000000openstack.network.v2.quota ========================== .. automodule:: openstack.network.v2.quota The Quota Class --------------- The ``Quota`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.quota.Quota :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/rbac_policy.rst0000664000175000017500000000046700000000000026333 0ustar00zuulzuul00000000000000openstack.network.v2.rbac_policy ================================ .. automodule:: openstack.network.v2.rbac_policy The RBACPolicy Class -------------------- The ``RBACPolicy`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.rbac_policy.RBACPolicy :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/router.rst0000664000175000017500000000042300000000000025355 0ustar00zuulzuul00000000000000openstack.network.v2.router =========================== .. automodule:: openstack.network.v2.router The Router Class ---------------- The ``Router`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.router.Router :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/security_group.rst0000664000175000017500000000051700000000000027124 0ustar00zuulzuul00000000000000openstack.network.v2.security_group =================================== .. automodule:: openstack.network.v2.security_group The SecurityGroup Class ----------------------- The ``SecurityGroup`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.security_group.SecurityGroup :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/security_group_rule.rst0000664000175000017500000000056300000000000030154 0ustar00zuulzuul00000000000000openstack.network.v2.security_group_rule ======================================== .. automodule:: openstack.network.v2.security_group_rule The SecurityGroupRule Class --------------------------- The ``SecurityGroupRule`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.security_group_rule.SecurityGroupRule :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/segment.rst0000664000175000017500000000043300000000000025500 0ustar00zuulzuul00000000000000openstack.network.v2.segment ============================ .. automodule:: openstack.network.v2.segment The Segment Class ----------------- The ``Segment`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.segment.Segment :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/service_profile.rst0000664000175000017500000000052700000000000027222 0ustar00zuulzuul00000000000000openstack.network.v2.service_profile ==================================== .. automodule:: openstack.network.v2.service_profile The ServiceProfile Class ------------------------ The ``ServiceProfile`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.service_profile.ServiceProfile :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/service_provider.rst0000664000175000017500000000054200000000000027411 0ustar00zuulzuul00000000000000openstack.network.v2.service_provider ===================================== .. automodule:: openstack.network.v2.service_provider The Service Provider Class -------------------------- The ``Service Provider`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.service_provider.ServiceProvider :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/sfc_flow_classifier.rst0000664000175000017500000000056300000000000030050 0ustar00zuulzuul00000000000000openstack.network.v2.sfc_flow_classifier ======================================== .. automodule:: openstack.network.v2.sfc_flow_classifier The SfcFlowClassifier Class --------------------------- The ``SfcFlowClassifier`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/sfc_port_chain.rst0000664000175000017500000000051300000000000027016 0ustar00zuulzuul00000000000000openstack.network.v2.sfc_port_chain =================================== .. automodule:: openstack.network.v2.sfc_port_chain The SfcPortChain Class ---------------------- The ``SfcPortChain`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.sfc_port_chain.SfcPortChain :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/sfc_port_pair.rst0000664000175000017500000000050300000000000026666 0ustar00zuulzuul00000000000000openstack.network.v2.sfc_port_pair ================================== .. automodule:: openstack.network.v2.sfc_port_pair The SfcPortPair Class --------------------- The ``SfcPortPair`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.sfc_port_pair.SfcPortPair :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/sfc_port_pair_group.rst0000664000175000017500000000055700000000000030113 0ustar00zuulzuul00000000000000openstack.network.v2.sfc_port_pair_group ======================================== .. automodule:: openstack.network.v2.sfc_port_pair_group The SfcPortPairGroup Class -------------------------- The ``SfcPortPairGroup`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.sfc_port_pair_group.SfcPortPairGroup :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/sfc_service_graph.rst0000664000175000017500000000054300000000000027514 0ustar00zuulzuul00000000000000openstack.network.v2.sfc_service_graph ====================================== .. automodule:: openstack.network.v2.sfc_service_graph The SfcServiceGraph Class ------------------------- The ``SfcServiceGraph`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.sfc_service_graph.SfcServiceGraph :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/subnet.rst0000664000175000017500000000042300000000000025335 0ustar00zuulzuul00000000000000openstack.network.v2.subnet =========================== .. automodule:: openstack.network.v2.subnet The Subnet Class ---------------- The ``Subnet`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.subnet.Subnet :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/subnet_pool.rst0000664000175000017500000000046700000000000026376 0ustar00zuulzuul00000000000000openstack.network.v2.subnet_pool ================================ .. automodule:: openstack.network.v2.subnet_pool The SubnetPool Class -------------------- The ``SubnetPool`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.subnet_pool.SubnetPool :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/tap_flow.rst0000664000175000017500000000043700000000000025655 0ustar00zuulzuul00000000000000openstack.network.v2.tap_flow ============================= .. automodule:: openstack.network.v2.tap_flow The TapFlow Class ----------------- The ``TapFlow`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.tap_flow.TapFlow :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/tap_mirror.rst0000664000175000017500000000045700000000000026222 0ustar00zuulzuul00000000000000openstack.network.v2.tap_mirror =============================== .. automodule:: openstack.network.v2.tap_mirror The TapMirror Class ------------------- The ``TapMirror`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.tap_mirror.TapMirror :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/tap_service.rst0000664000175000017500000000046700000000000026351 0ustar00zuulzuul00000000000000openstack.network.v2.tap_service ================================ .. automodule:: openstack.network.v2.tap_service The TapService Class -------------------- The ``TapService`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.tap_service.TapService :members: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1852844 openstacksdk-4.0.0/doc/source/user/resources/network/v2/vpn/0000775000175000017500000000000000000000000024107 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/vpn/endpoint_group.rst0000664000175000017500000000055300000000000027700 0ustar00zuulzuul00000000000000openstack.network.v2.vpn_endpoint_group ======================================= .. automodule:: openstack.network.v2.vpn_endpoint_group The VpnEndpointGroup Class -------------------------- The ``VpnEndpointGroup`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/vpn/ike_policy.rst0000664000175000017500000000051300000000000026767 0ustar00zuulzuul00000000000000openstack.network.v2.vpn_ike_policy =================================== .. automodule:: openstack.network.v2.vpn_ike_policy The VpnIkePolicy Class ---------------------- The ``VpnIkePolicy`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.vpn_ike_policy.VpnIkePolicy :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/vpn/index.rst0000664000175000017500000000012000000000000025741 0ustar00zuulzuul00000000000000VPNaaS Resources ================ .. toctree:: :maxdepth: 1 :glob: * ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/vpn/ipsec_policy.rst0000664000175000017500000000053300000000000027324 0ustar00zuulzuul00000000000000openstack.network.v2.vpn_ipsec_policy ===================================== .. automodule:: openstack.network.v2.vpn_ipsec_policy The VpnIpsecPolicy Class ------------------------ The ``VpnIpsecPolicy`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/vpn/ipsec_site_connection.rst0000664000175000017500000000063700000000000031215 0ustar00zuulzuul00000000000000openstack.network.v2.vpn_ipsec_site_connection ============================================== .. automodule:: openstack.network.v2.vpn_ipsec_site_connection The VpnIPSecSiteConnection Class -------------------------------- The ``VpnIPSecSiteConnection`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/network/v2/vpn/service.rst0000664000175000017500000000046700000000000026310 0ustar00zuulzuul00000000000000openstack.network.v2.vpn_service ================================ .. automodule:: openstack.network.v2.vpn_service The VpnService Class -------------------- The ``VpnService`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.vpn_service.VpnService :members: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1852844 openstacksdk-4.0.0/doc/source/user/resources/object_store/0000775000175000017500000000000000000000000023746 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/object_store/index.rst0000664000175000017500000000016500000000000025611 0ustar00zuulzuul00000000000000Object Store Resources ====================== .. toctree:: :maxdepth: 1 v1/account v1/container v1/obj ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1852844 openstacksdk-4.0.0/doc/source/user/resources/object_store/v1/0000775000175000017500000000000000000000000024274 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/object_store/v1/account.rst0000664000175000017500000000045700000000000026470 0ustar00zuulzuul00000000000000openstack.object_store.v1.account ================================= .. automodule:: openstack.object_store.v1.account The Account Class ----------------- The ``Account`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.object_store.v1.account.Account :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/object_store/v1/container.rst0000664000175000017500000000047700000000000027020 0ustar00zuulzuul00000000000000openstack.object_store.v1.container =================================== .. automodule:: openstack.object_store.v1.container The Container Class ------------------- The ``Container`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.object_store.v1.container.Container :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/object_store/v1/obj.rst0000664000175000017500000000043300000000000025600 0ustar00zuulzuul00000000000000openstack.object_store.v1.obj ============================= .. automodule:: openstack.object_store.v1.obj The Object Class ---------------- The ``Object`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.object_store.v1.obj.Object :members: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1852844 openstacksdk-4.0.0/doc/source/user/resources/orchestration/0000775000175000017500000000000000000000000024150 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/orchestration/index.rst0000664000175000017500000000037200000000000026013 0ustar00zuulzuul00000000000000Orchestration Resources ======================= .. toctree:: :maxdepth: 1 v1/resource v1/software_config v1/software_deployment v1/stack v1/stack_environment v1/stack_event v1/stack_files v1/stack_template v1/template ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1892862 openstacksdk-4.0.0/doc/source/user/resources/orchestration/v1/0000775000175000017500000000000000000000000024476 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/orchestration/v1/resource.rst0000664000175000017500000000047300000000000027063 0ustar00zuulzuul00000000000000openstack.orchestration.v1.resource =================================== .. automodule:: openstack.orchestration.v1.resource The Resource Class ------------------ The ``Resource`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.orchestration.v1.resource.Resource :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/orchestration/v1/software_config.rst0000664000175000017500000000055700000000000030416 0ustar00zuulzuul00000000000000openstack.orchestration.v1.software_config ========================================== .. automodule:: openstack.orchestration.v1.software_config The SoftwareConfig Class ------------------------ The ``SoftwareConfig`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.orchestration.v1.software_config.SoftwareConfig :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/orchestration/v1/software_deployment.rst0000664000175000017500000000061700000000000031326 0ustar00zuulzuul00000000000000openstack.orchestration.v1.software_deployment ============================================== .. automodule:: openstack.orchestration.v1.software_deployment The SoftwareDeployment Class ---------------------------- The ``SoftwareDeployment`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.orchestration.v1.software_deployment.SoftwareDeployment :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/orchestration/v1/stack.rst0000664000175000017500000000044300000000000026336 0ustar00zuulzuul00000000000000openstack.orchestration.v1.stack ================================ .. automodule:: openstack.orchestration.v1.stack The Stack Class --------------- The ``Stack`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.orchestration.v1.stack.Stack :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/orchestration/v1/stack_environment.rst0000664000175000017500000000057700000000000030772 0ustar00zuulzuul00000000000000openstack.orchestration.v1.stack_environment ============================================ .. automodule:: openstack.orchestration.v1.stack_environment The StackEnvironment Class -------------------------- The ``StackEnvironment`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.orchestration.v1.stack_environment.StackEnvironment :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/orchestration/v1/stack_event.rst0000664000175000017500000000051700000000000027541 0ustar00zuulzuul00000000000000openstack.orchestration.v1.stack_event ====================================== .. automodule:: openstack.orchestration.v1.stack_event The StackEvent Class -------------------- The ``StackEvent`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.orchestration.v1.stack_event.StackEvent :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/orchestration/v1/stack_files.rst0000664000175000017500000000051700000000000027522 0ustar00zuulzuul00000000000000openstack.orchestration.v1.stack_files ====================================== .. automodule:: openstack.orchestration.v1.stack_files The StackFiles Class -------------------- The ``StackFiles`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.orchestration.v1.stack_files.StackFiles :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/orchestration/v1/stack_template.rst0000664000175000017500000000054700000000000030236 0ustar00zuulzuul00000000000000openstack.orchestration.v1.stack_template ========================================= .. automodule:: openstack.orchestration.v1.stack_template The StackTemplate Class ----------------------- The ``StackTemplate`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.orchestration.v1.stack_template.StackTemplate :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/orchestration/v1/template.rst0000664000175000017500000000047300000000000027047 0ustar00zuulzuul00000000000000openstack.orchestration.v1.template =================================== .. automodule:: openstack.orchestration.v1.template The Template Class ------------------ The ``Template`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.orchestration.v1.template.Template :members: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1892862 openstacksdk-4.0.0/doc/source/user/resources/placement/0000775000175000017500000000000000000000000023234 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/placement/index.rst0000664000175000017500000000025000000000000025072 0ustar00zuulzuul00000000000000Placement v1 Resources ====================== .. toctree:: :maxdepth: 1 v1/resource_class v1/resource_provider v1/resource_provider_inventory v1/trait ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1892862 openstacksdk-4.0.0/doc/source/user/resources/placement/v1/0000775000175000017500000000000000000000000023562 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/placement/v1/resource_class.rst0000664000175000017500000000052700000000000027334 0ustar00zuulzuul00000000000000openstack.placement.v1.resource_class ===================================== .. automodule:: openstack.placement.v1.resource_class The ResourceClass Class ----------------------- The ``ResourceClass`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.placement.v1.resource_class.ResourceClass :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/placement/v1/resource_provider.rst0000664000175000017500000000055700000000000030064 0ustar00zuulzuul00000000000000openstack.placement.v1.resource_provider ======================================== .. automodule:: openstack.placement.v1.resource_provider The ResourceProvider Class -------------------------- The ``ResourceProvider`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.placement.v1.resource_provider.ResourceProvider :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/placement/v1/resource_provider_inventory.rst0000664000175000017500000000067300000000000032200 0ustar00zuulzuul00000000000000openstack.placement.v1.resource_provider_inventory ================================================== .. automodule:: openstack.placement.v1.resource_provider_inventory The ResourceProviderInventory Class ----------------------------------- The ``ResourceProviderInventory`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.placement.v1.resource_provider_inventory.ResourceProviderInventory :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/placement/v1/trait.rst0000664000175000017500000000042300000000000025436 0ustar00zuulzuul00000000000000openstack.placement.v1.trait ============================ .. automodule:: openstack.placement.v1.trait The Trait Class --------------- The ``Trait`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.placement.v1.trait.Trait :members: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1892862 openstacksdk-4.0.0/doc/source/user/resources/shared_file_system/0000775000175000017500000000000000000000000025135 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/shared_file_system/index.rst0000664000175000017500000000064600000000000027004 0ustar00zuulzuul00000000000000Shared File System service resources ==================================== .. toctree:: :maxdepth: 1 v2/availability_zone v2/storage_pool v2/limit v2/share v2/share_instance v2/share_network_subnet v2/share_snapshot v2/share_snapshot_instance v2/share_network v2/user_message v2/share_group v2/share_access_rule v2/share_group_snapshot v2/resource_locks v2/quota_class_set ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.193288 openstacksdk-4.0.0/doc/source/user/resources/shared_file_system/v2/0000775000175000017500000000000000000000000025464 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/shared_file_system/v2/availability_zone.rst0000664000175000017500000000062300000000000031724 0ustar00zuulzuul00000000000000openstack.shared_file_system.v2.availability_zone ================================================= .. automodule:: openstack.shared_file_system.v2.availability_zone The AvailabilityZone Class -------------------------- The ``AvailabilityZone`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.shared_file_system.v2.availability_zone.AvailabilityZone :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/shared_file_system/v2/limit.rst0000664000175000017500000000046700000000000027343 0ustar00zuulzuul00000000000000openstack.shared_file_system.v2.limit ===================================== .. automodule:: openstack.shared_file_system.v2.limit The Limit Class --------------- The ``Limit`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.shared_file_system.v2.limit.Limit :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/shared_file_system/v2/quota_class_set.rst0000664000175000017500000000064400000000000031413 0ustar00zuulzuul00000000000000openstack.shared_file_system.v2.quota_class_set =============================================== .. automodule:: openstack.shared_file_system.v2.quota_class_set The QuotaClassSet Class ----------------------- The ``QuotaClassSet`` class inherits from :class:`~openstack.resource.Resource` and can be used to query quota class .. autoclass:: openstack.shared_file_system.v2.quota_class_set.QuotaClassSet :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/shared_file_system/v2/resource_locks.rst0000664000175000017500000000057300000000000031245 0ustar00zuulzuul00000000000000openstack.shared_file_system.v2.resource_locks ============================================== .. automodule:: openstack.shared_file_system.v2.resource_locks The Resource Locks Class ------------------------ The ``ResourceLock`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.shared_file_system.v2.resource_locks.ResourceLock :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/shared_file_system/v2/share.rst0000664000175000017500000000046700000000000027327 0ustar00zuulzuul00000000000000openstack.shared_file_system.v2.share ===================================== .. automodule:: openstack.shared_file_system.v2.share The Share Class --------------- The ``Share`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.shared_file_system.v2.share.Share :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/shared_file_system/v2/share_access_rule.rst0000664000175000017500000000061700000000000031674 0ustar00zuulzuul00000000000000openstack.shared_file_system.v2.share_access_rule ================================================= .. automodule:: openstack.shared_file_system.v2.share_access_rule The ShareAccessRule Class ------------------------- The ``ShareAccessRule`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.shared_file_system.v2.share_access_rule.ShareAccessRule :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/shared_file_system/v2/share_group.rst0000664000175000017500000000054300000000000030536 0ustar00zuulzuul00000000000000openstack.shared_file_system.v2.share_group =========================================== .. automodule:: openstack.shared_file_system.v2.share_group The ShareGroup Class -------------------- The ``ShareGroup`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.shared_file_system.v2.share_group.ShareGroup :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/shared_file_system/v2/share_group_snapshot.rst0000664000175000017500000000064700000000000032462 0ustar00zuulzuul00000000000000openstack.shared_file_system.v2.share_group_snapshot ==================================================== .. automodule:: openstack.shared_file_system.v2.share_group_snapshot The ShareGroupSnapshot Class ---------------------------- The ``ShareGroupSnapshot`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.shared_file_system.v2.share_group_snapshot.ShareGroupSnapshot :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/shared_file_system/v2/share_instance.rst0000664000175000017500000000057300000000000031211 0ustar00zuulzuul00000000000000openstack.shared_file_system.v2.share_instance ============================================== .. automodule:: openstack.shared_file_system.v2.share_instance The ShareInstance Class ----------------------- The ``ShareInstance`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.shared_file_system.v2.share_instance.ShareInstance :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/shared_file_system/v2/share_network.rst0000664000175000017500000000056300000000000031075 0ustar00zuulzuul00000000000000openstack.shared_file_system.v2.share_network ============================================= .. automodule:: openstack.shared_file_system.v2.share_network The ShareNetwork Class ---------------------- The ``ShareNetwork`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.shared_file_system.v2.share_network.ShareNetwork :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/shared_file_system/v2/share_network_subnet.rst0000664000175000017500000000064700000000000032460 0ustar00zuulzuul00000000000000openstack.shared_file_system.v2.share_network_subnet ==================================================== .. automodule:: openstack.shared_file_system.v2.share_network_subnet The ShareNetworkSubnet Class ---------------------------- The ``ShareNetworkSubnet`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.shared_file_system.v2.share_network_subnet.ShareNetworkSubnet :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/shared_file_system/v2/share_snapshot.rst0000664000175000017500000000057300000000000031244 0ustar00zuulzuul00000000000000openstack.shared_file_system.v2.share_snapshot ============================================== .. automodule:: openstack.shared_file_system.v2.share_snapshot The ShareSnapshot Class ----------------------- The ``ShareSnapshot`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.shared_file_system.v2.share_snapshot.ShareSnapshot :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/shared_file_system/v2/share_snapshot_instance.rst0000664000175000017500000000067700000000000033135 0ustar00zuulzuul00000000000000openstack.shared_file_system.v2.share_snapshot_instance ======================================================= .. automodule:: openstack.shared_file_system.v2.share_snapshot_instance The ShareSnapshotInstance Class ------------------------------- The ``ShareSnapshotInstance`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.shared_file_system.v2.share_snapshot_instance.ShareSnapshotInstance :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/shared_file_system/v2/storage_pool.rst0000664000175000017500000000055300000000000030716 0ustar00zuulzuul00000000000000openstack.shared_file_system.v2.storage_pool ============================================ .. automodule:: openstack.shared_file_system.v2.storage_pool The StoragePool Class --------------------- The ``StoragePool`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.shared_file_system.v2.storage_pool.StoragePool :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/shared_file_system/v2/user_message.rst0000664000175000017500000000055300000000000030703 0ustar00zuulzuul00000000000000openstack.shared_file_system.v2.user_message ============================================ .. automodule:: openstack.shared_file_system.v2.user_message The UserMessage Class --------------------- The ``UserMessage`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.shared_file_system.v2.user_message.UserMessage :members: ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.193288 openstacksdk-4.0.0/doc/source/user/resources/workflow/0000775000175000017500000000000000000000000023136 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/workflow/index.rst0000664000175000017500000000016600000000000025002 0ustar00zuulzuul00000000000000Workflow Resources ================== .. toctree:: :maxdepth: 1 v2/execution v2/workflow v2/crontrigger ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.193288 openstacksdk-4.0.0/doc/source/user/resources/workflow/v2/0000775000175000017500000000000000000000000023465 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/workflow/v2/crontrigger.rst0000664000175000017500000000050300000000000026542 0ustar00zuulzuul00000000000000openstack.workflow.v2.cron_trigger ================================== .. automodule:: openstack.workflow.v2.cron_trigger The CronTrigger Class --------------------- The ``CronTrigger`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.workflow.v2.cron_trigger.CronTrigger :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/workflow/v2/execution.rst0000664000175000017500000000045700000000000026230 0ustar00zuulzuul00000000000000openstack.workflow.v2.execution =============================== .. automodule:: openstack.workflow.v2.execution The Execution Class ------------------- The ``Execution`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.workflow.v2.execution.Execution :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/resources/workflow/v2/workflow.rst0000664000175000017500000000044700000000000026076 0ustar00zuulzuul00000000000000openstack.workflow.v2.workflow ============================== .. automodule:: openstack.workflow.v2.workflow The Workflow Class ------------------ The ``Workflow`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.workflow.v2.workflow.Workflow :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/service_description.rst0000664000175000017500000000033000000000000024043 0ustar00zuulzuul00000000000000ServiceDescription ================== .. automodule:: openstack.service_description ServiceDescription object ------------------------- .. autoclass:: openstack.service_description.ServiceDescription :members: ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.193288 openstacksdk-4.0.0/doc/source/user/testing/0000775000175000017500000000000000000000000020727 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/testing/fakes.rst0000664000175000017500000000007700000000000022556 0ustar00zuulzuul00000000000000Fakes ===== .. automodule:: openstack.test.fakes :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/testing/index.rst0000664000175000017500000000024300000000000022567 0ustar00zuulzuul00000000000000======================================== Testing applications using OpenStack SDK ======================================== .. toctree:: :maxdepth: 1 fakes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/transition_from_profile.rst0000664000175000017500000001726200000000000024751 0ustar00zuulzuul00000000000000Transition from Profile ======================= .. note:: This section describes migrating code from a previous interface of openstacksdk and can be ignored by people writing new code. If you have code that currently uses the :class:`~openstack.profile.Profile` object and/or an ``authenticator`` instance from an object based on ``openstack.auth.base.BaseAuthPlugin``, that code should be updated to use the :class:`~openstack.config.cloud_region.CloudRegion` object instead. .. important:: :class:`~openstack.profile.Profile` is going away. Existing code using it should be migrated as soon as possible. Writing Code that Works with Both --------------------------------- These examples should all work with both the old and new interface, with one caveat. With the old interface, the ``CloudConfig`` object comes from the ``os-client-config`` library, and in the new interface that has been moved into the SDK. In order to write code that works with both the old and new interfaces, use the following code to import the config namespace: .. code-block:: python try: from openstack import config as occ except ImportError: from os_client_config import config as occ The examples will assume that the config module has been imported in that manner. .. note:: Yes, there is an easier and less verbose way to do all of these. These are verbose to handle both the old and new interfaces in the same codebase. Replacing authenticator ----------------------- There is no direct replacement for ``openstack.auth.base.BaseAuthPlugin``. ``openstacksdk`` uses the `keystoneauth`_ library for authentication and HTTP interactions. `keystoneauth`_ has `auth plugins`_ that can be used to control how authentication is done. The ``auth_type`` config parameter can be set to choose the correct authentication method to be used. Replacing Profile ----------------- The right way to replace the use of ``openstack.profile.Profile`` depends a bit on what you're trying to accomplish. Common patterns are listed below, but in general the approach is either to pass a cloud name to the `openstack.connection.Connection` constructor, or to construct a `openstack.config.cloud_region.CloudRegion` object and pass it to the constructor. All of the examples on this page assume that you want to support old and new interfaces simultaneously. There are easier and less verbose versions of each that are available if you can just make a clean transition. Getting a Connection to a named cloud from clouds.yaml ------------------------------------------------------ If you want is to construct a `openstack.connection.Connection` based on parameters configured in a ``clouds.yaml`` file, or from environment variables: .. code-block:: python import openstack.connection conn = connection.from_config(cloud_name='name-of-cloud-you-want') Getting a Connection from python arguments avoiding clouds.yaml --------------------------------------------------------------- If, on the other hand, you want to construct a `openstack.connection.Connection`, but are in a context where reading config from a clouds.yaml file is undesirable, such as inside of a Service: * create a `openstack.config.loader.OpenStackConfig` object, telling it to not load yaml files. Optionally pass an ``app_name`` and ``app_version`` which will be added to user-agent strings. * get a `openstack.config.cloud_region.CloudRegion` object from it * get a `openstack.connection.Connection` .. code-block:: python try: from openstack import config as occ except ImportError: from os_client_config import config as occ from openstack import connection loader = occ.OpenStackConfig( load_yaml_files=False, app_name='spectacular-app', app_version='1.0') cloud_region = loader.get_one_cloud( region_name='my-awesome-region', auth_type='password', auth=dict( auth_url='https://auth.example.com', username='amazing-user', user_domain_name='example-domain', project_name='astounding-project', user_project_name='example-domain', password='super-secret-password', )) conn = connection.from_config(cloud_config=cloud_region) .. note:: app_name and app_version are completely optional, and auth_type defaults to 'password'. They are shown here for clarity as to where they should go if they want to be set. Getting a Connection from python arguments and optionally clouds.yaml --------------------------------------------------------------------- If you want to make a connection from python arguments and want to allow one of them to optionally be ``cloud`` to allow selection of a named cloud, it's essentially the same as the previous example, except without ``load_yaml_files=False``. .. code-block:: python try: from openstack import config as occ except ImportError: from os_client_config import config as occ from openstack import connection loader = occ.OpenStackConfig( app_name='spectacular-app', app_version='1.0') cloud_region = loader.get_one_cloud( region_name='my-awesome-region', auth_type='password', auth=dict( auth_url='https://auth.example.com', username='amazing-user', user_domain_name='example-domain', project_name='astounding-project', user_project_name='example-domain', password='super-secret-password', )) conn = connection.from_config(cloud_config=cloud_region) Parameters to get_one_cloud --------------------------- The most important things to note are: * ``auth_type`` specifies which kind of authentication plugin to use. It controls how authentication is done, as well as what parameters are required. * ``auth`` is a dictionary containing the parameters needed by the auth plugin. The most common information it needs are user, project, domain, auth_url and password. * The rest of the keyword arguments to ``openstack.config.loader.OpenStackConfig.get_one_cloud`` are either parameters needed by the `keystoneauth Session`_ object, which control how HTTP connections are made, or parameters needed by the `keystoneauth Adapter`_ object, which control how services are found in the Keystone Catalog. For `keystoneauth Adapter`_ parameters, since there is one `openstack.connection.Connection` object but many services, per-service parameters are formed by using the official ``service_type`` of the service in question. For instance, to override the endpoint for the ``compute`` service, the parameter ``compute_endpoint_override`` would be used. ``region_name`` in ``openstack.profile.Profile`` was a per-service parameter. This is no longer a valid concept. An `openstack.connection.Connection` is a connection to a region of a cloud. If you are in an extreme situation where you have one service in one region and a different service in a different region, you must use two different `openstack.connection.Connection` objects. .. note:: service_type, although a parameter for keystoneauth1.adapter.Adapter, is not a valid parameter for get_one_cloud. service_type is the key by which services are referred, so saying 'compute_service_type="henry"' doesn't have any meaning. .. _keystoneauth: https://docs.openstack.org/keystoneauth/latest/ .. _auth plugins: https://docs.openstack.org/keystoneauth/latest/authentication-plugins.html .. _keystoneauth Adapter: https://docs.openstack.org/keystoneauth/latest/api/keystoneauth1.html#keystoneauth1.adapter.Adapter .. _keystoneauth Session: https://docs.openstack.org/keystoneauth/latest/api/keystoneauth1.html#keystoneauth1.session.Session ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/utils.rst0000664000175000017500000000006400000000000021144 0ustar00zuulzuul00000000000000Utilities ========= .. automodule:: openstack.utils ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/doc/source/user/warnings.rst0000664000175000017500000000142000000000000021631 0ustar00zuulzuul00000000000000Warnings ======== openstacksdk uses the `warnings`__ infrastructure to warn users about deprecated resources and resource fields, as well as deprecated behavior in openstacksdk itself. These warnings are derived from ``Warning`` or ``DeprecationWarning``. In Python, warnings are emitted by default while deprecation warnings are silenced by default and must be turned on using the ``-Wa`` Python command line option or the ``PYTHONWARNINGS`` environment variable. If you are writing an application that uses openstacksdk, you may wish to enable some of these warnings during test runs to ensure you migrate away from deprecated behavior. Available warnings ------------------ .. automodule:: openstack.warnings :members: .. __: https://docs.python.org/3/library/warnings.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/docs-requirements.txt0000664000175000017500000000005500000000000020441 0ustar00zuulzuul00000000000000-r requirements.txt -r test-requirements.txt ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.193288 openstacksdk-4.0.0/examples/0000775000175000017500000000000000000000000016045 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/__init__.py0000664000175000017500000000000000000000000020144 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.193288 openstacksdk-4.0.0/examples/baremetal/0000775000175000017500000000000000000000000020001 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/baremetal/list.py0000664000175000017500000000135000000000000021325 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ List resources from the Bare Metal service. """ def list_nodes(conn): print("List Nodes:") for node in conn.baremetal.nodes(): print(node) # TODO(dtantsur): other resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/baremetal/provisioning.py0000664000175000017500000000251700000000000023106 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Operations with the provision state in the Bare Metal service. """ def manage_and_inspect_node(conn, uuid): node = conn.baremetal.find_node(uuid) print('Before:', node.provision_state) conn.baremetal.set_node_provision_state(node, 'manage') conn.baremetal.wait_for_nodes_provision_state([node], 'manageable') conn.baremetal.set_node_provision_state(node, 'inspect') res = conn.baremetal.wait_for_nodes_provision_state([node], 'manageable') print('After:', res[0].provision_state) def provide_node(conn, uuid): node = conn.baremetal.find_node(uuid) print('Before:', node.provision_state) conn.baremetal.set_node_provision_state(node, 'provide') res = conn.baremetal.wait_for_nodes_provision_state([node], 'available') print('After:', res[0].provision_state) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1972902 openstacksdk-4.0.0/examples/cloud/0000775000175000017500000000000000000000000017153 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/cloud/cleanup-servers.py0000664000175000017500000000175700000000000022655 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import cloud as openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) for cloud_name, region_name in [ ('my-vexxhost', 'ca-ymq-1'), ('my-citycloud', 'Buf1'), ('my-internap', 'ams01'), ]: # Initialize cloud cloud = openstack.connect(cloud=cloud_name, region_name=region_name) for server in cloud.search_servers('my-server'): cloud.delete_server(server, wait=True, delete_ips=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/cloud/create-server-dict.py0000664000175000017500000000277300000000000023226 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import cloud as openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) for cloud_name, region_name, image, flavor_id in [ ( 'my-vexxhost', 'ca-ymq-1', 'Ubuntu 16.04.1 LTS [2017-03-03]', '5cf64088-893b-46b5-9bb1-ee020277635d', ), ( 'my-citycloud', 'Buf1', 'Ubuntu 16.04 Xenial Xerus', '0dab10b5-42a2-438e-be7b-505741a7ffcc', ), ('my-internap', 'ams01', 'Ubuntu 16.04 LTS (Xenial Xerus)', 'A1.4'), ]: # Initialize cloud cloud = openstack.connect(cloud=cloud_name, region_name=region_name) # Boot a server, wait for it to boot, and then do whatever is needed # to get a public ip for it. server = cloud.create_server( 'my-server', image=image, flavor=dict(id=flavor_id), wait=True, auto_ip=True, ) # Delete it - this is a demo cloud.delete_server(server, wait=True, delete_ips=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/cloud/create-server-name-or-id.py0000664000175000017500000000300000000000000024213 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import cloud as openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) for cloud_name, region_name, image, flavor in [ ( 'my-vexxhost', 'ca-ymq-1', 'Ubuntu 16.04.1 LTS [2017-03-03]', 'v1-standard-4', ), ('my-citycloud', 'Buf1', 'Ubuntu 16.04 Xenial Xerus', '4C-4GB-100GB'), ('my-internap', 'ams01', 'Ubuntu 16.04 LTS (Xenial Xerus)', 'A1.4'), ]: # Initialize cloud cloud = openstack.connect(cloud=cloud_name, region_name=region_name) cloud.delete_server('my-server', wait=True, delete_ips=True) # Boot a server, wait for it to boot, and then do whatever is needed # to get a public ip for it. server = cloud.create_server( 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True ) print(server.name) print(server['name']) cloud.pprint(server) # Delete it - this is a demo cloud.delete_server(server, wait=True, delete_ips=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/cloud/debug-logging.py0000664000175000017500000000135400000000000022242 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import cloud as openstack openstack.enable_logging(debug=True) cloud = openstack.connect(cloud='my-vexxhost', region_name='ca-ymq-1') cloud.get_image('Ubuntu 16.04.1 LTS [2017-03-03]') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/cloud/find-an-image.py0000664000175000017500000000140600000000000022122 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import cloud as openstack openstack.enable_logging() cloud = openstack.connect(cloud='fuga', region_name='cystack') cloud.pprint( [image for image in cloud.list_images() if 'ubuntu' in image.name.lower()] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/cloud/http-debug-logging.py0000664000175000017500000000136100000000000023215 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import cloud as openstack openstack.enable_logging(http_debug=True) cloud = openstack.connect(cloud='my-vexxhost', region_name='ca-ymq-1') cloud.get_image('Ubuntu 16.04.1 LTS [2017-03-03]') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/cloud/munch-dict-object.py0000664000175000017500000000137400000000000023031 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import cloud as openstack openstack.enable_logging(debug=True) cloud = openstack.connect(cloud='ovh', region_name='SBG1') image = cloud.get_image('Ubuntu 16.10') print(image.name) print(image['name']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/cloud/normalization.py0000664000175000017500000000143600000000000022417 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import cloud as openstack openstack.enable_logging() cloud = openstack.connect(cloud='fuga', region_name='cystack') image = cloud.get_image( 'Ubuntu 16.04 LTS - Xenial Xerus - 64-bit - Fuga Cloud Based Image' ) cloud.pprint(image) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/cloud/server-information.py0000664000175000017500000000233300000000000023357 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import openstack openstack.enable_logging(debug=True) cloud = openstack.connect(cloud='my-citycloud', region_name='Buf1') try: server = cloud.create_server( 'my-server', image='Ubuntu 16.04 Xenial Xerus', flavor=dict(id='0dab10b5-42a2-438e-be7b-505741a7ffcc'), wait=True, auto_ip=True, ) print("\n\nFull Server\n\n") cloud.pprint(server) print("\n\nTurn Detailed Off\n\n") cloud.pprint(cloud.get_server('my-server', detailed=False)) print("\n\nBare Server\n\n") cloud.pprint(cloud.get_server('my-server', bare=True)) finally: # Delete it - this is a demo cloud.delete_server(server, wait=True, delete_ips=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/cloud/service-conditional-overrides.py0000664000175000017500000000127000000000000025466 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import openstack openstack.enable_logging(debug=True) cloud = openstack.connect(cloud='rax', region_name='DFW') print(cloud.has_service('network')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/cloud/service-conditionals.py0000664000175000017500000000136100000000000023652 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import openstack openstack.enable_logging(debug=True) cloud = openstack.connect(cloud='kiss', region_name='region1') print(cloud.has_service('network')) print(cloud.has_service('container-orchestration')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/cloud/strict-mode.py0000664000175000017500000000142300000000000021757 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import openstack openstack.enable_logging() cloud = openstack.connect(cloud='fuga', region_name='cystack', strict=True) image = cloud.get_image( 'Ubuntu 16.04 LTS - Xenial Xerus - 64-bit - Fuga Cloud Based Image' ) cloud.pprint(image) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/cloud/upload-large-object.py0000664000175000017500000000160000000000000023342 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import openstack openstack.enable_logging(debug=True) cloud = openstack.connect(cloud='ovh', region_name='SBG1') cloud.create_object( container='my-container', name='my-object', filename='/home/mordred/briarcliff.sh3d', segment_size=1000000, ) cloud.delete_object('my-container', 'my-object') cloud.delete_container('my-container') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/cloud/upload-object.py0000664000175000017500000000160000000000000022252 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import openstack openstack.enable_logging(debug=True) cloud = openstack.connect(cloud='ovh', region_name='SBG1') cloud.create_object( container='my-container', name='my-object', filename='/home/mordred/briarcliff.sh3d', segment_size=1000000, ) cloud.delete_object('my-container', 'my-object') cloud.delete_container('my-container') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/cloud/user-agent.py0000664000175000017500000000132400000000000021577 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import openstack openstack.enable_logging(http_debug=True) cloud = openstack.connect( cloud='datacentred', app_name='AmazingApp', app_version='1.0' ) cloud.list_networks() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.201292 openstacksdk-4.0.0/examples/clustering/0000775000175000017500000000000000000000000020224 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/clustering/__init__.py0000664000175000017500000000000000000000000022323 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/clustering/action.py0000664000175000017500000000211600000000000022053 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Managing policies in the Cluster service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/clustering.html """ ACTION_ID = "06ad259b-d6ab-4eb2-a0fa-fb144437eab1" def list_actions(conn): print("List Actions:") for actions in conn.clustering.actions(): print(actions.to_dict()) for actions in conn.clustering.actions(sort='name:asc'): print(actions.to_dict()) def get_action(conn): print("Get Action:") action = conn.clustering.get_action(ACTION_ID) print(action.to_dict()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/clustering/cluster.py0000664000175000017500000001017000000000000022256 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Managing policies in the Cluster service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/clustering.html """ CLUSTER_NAME = "Test_Cluster" CLUSTER_ID = "47d808e5-ce75-4a1e-bfd2-4ed4639e8640" PROFILE_ID = "b0e3a680-e270-4eb8-9361-e5c9503fba0a" NODE_ID = "dd803d4a-015d-4223-b15f-db29bad3146c" POLICY_ID = "c0e3a680-e270-4eb8-9361-e5c9503fba00" def list_cluster(conn): print("List clusters:") for cluster in conn.clustering.clusters(): print(cluster.to_dict()) for cluster in conn.clustering.clusters(sort='name:asc'): print(cluster.to_dict()) def create_cluster(conn): print("Create cluster:") spec = { "name": CLUSTER_NAME, "profile_id": PROFILE_ID, "min_size": 0, "max_size": -1, "desired_capacity": 1, } cluster = conn.clustering.create_cluster(**spec) print(cluster.to_dict()) def get_cluster(conn): print("Get cluster:") cluster = conn.clustering.get_cluster(CLUSTER_ID) print(cluster.to_dict()) def find_cluster(conn): print("Find cluster:") cluster = conn.clustering.find_cluster(CLUSTER_ID) print(cluster.to_dict()) def update_cluster(conn): print("Update cluster:") spec = { "name": "Test_Cluster001", "profile_id": "c0e3a680-e270-4eb8-9361-e5c9503fba0a", "profile_only": True, } cluster = conn.clustering.update_cluster(CLUSTER_ID, **spec) print(cluster.to_dict()) def delete_cluster(conn): print("Delete cluster:") conn.clustering.delete_cluster(CLUSTER_ID) print("Cluster deleted.") # cluster support force delete conn.clustering.delete_cluster(CLUSTER_ID, False, True) print("Cluster deleted") def add_nodes_to_cluster(conn): print("Add nodes to cluster:") node_ids = [NODE_ID] res = conn.clustering.add_nodes_to_cluster(CLUSTER_ID, node_ids) print(res) def remove_nodes_from_cluster(conn): print("Remove nodes from a cluster:") node_ids = [NODE_ID] res = conn.clustering.remove_nodes_from_cluster(CLUSTER_ID, node_ids) print(res) def replace_nodes_in_cluster(conn): print("Replace the nodes in a cluster with specified nodes:") old_node = NODE_ID new_node = "cd803d4a-015d-4223-b15f-db29bad3146c" spec = {old_node: new_node} res = conn.clustering.replace_nodes_in_cluster(CLUSTER_ID, **spec) print(res) def scale_out_cluster(conn): print("Inflate the size of a cluster:") res = conn.clustering.scale_out_cluster(CLUSTER_ID, 1) print(res) def scale_in_cluster(conn): print("Shrink the size of a cluster:") res = conn.clustering.scale_in_cluster(CLUSTER_ID, 1) print(res) def resize_cluster(conn): print("Resize of cluster:") spec = { 'min_size': 1, 'max_size': 6, 'adjustment_type': 'EXACT_CAPACITY', 'number': 2, } res = conn.clustering.resize_cluster(CLUSTER_ID, **spec) print(res) def attach_policy_to_cluster(conn): print("Attach policy to a cluster:") spec = {'enabled': True} res = conn.clustering.attach_policy_to_cluster( CLUSTER_ID, POLICY_ID, **spec ) print(res) def detach_policy_from_cluster(conn): print("Detach a policy from a cluster:") res = conn.clustering.detach_policy_from_cluster(CLUSTER_ID, POLICY_ID) print(res) def check_cluster(conn): print("Check cluster:") res = conn.clustering.check_cluster(CLUSTER_ID) print(res) def recover_cluster(conn): print("Recover cluster:") spec = {'check': True} res = conn.clustering.recover_cluster(CLUSTER_ID, **spec) print(res) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/clustering/event.py0000664000175000017500000000207700000000000021725 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Managing policies in the Cluster service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/clustering.html """ EVENT_ID = "5d982071-76c5-4733-bf35-b9e38a563c99" def list_events(conn): print("List Events:") for events in conn.clustering.events(): print(events.to_dict()) for events in conn.clustering.events(sort='name:asc'): print(events.to_dict()) def get_event(conn): print("Get Event:") event = conn.clustering.get_event(EVENT_ID) print(event.to_dict()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/clustering/node.py0000664000175000017500000000424600000000000021531 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Managing policies in the Cluster service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/clustering.html """ NODE_NAME = 'Test_Node' NODE_ID = 'dd803d4a-015d-4223-b15f-db29bad3146c' PROFILE_ID = "b0e3a680-e270-4eb8-9361-e5c9503fba0a" def list_nodes(conn): print("List Nodes:") for node in conn.clustering.nodes(): print(node.to_dict()) for node in conn.clustering.nodes(sort='asc:name'): print(node.to_dict()) def create_node(conn): print("Create Node:") spec = { 'name': NODE_NAME, 'profile_id': PROFILE_ID, } node = conn.clustering.create_node(**spec) print(node.to_dict()) def get_node(conn): print("Get Node:") node = conn.clustering.get_node(NODE_ID) print(node.to_dict()) def find_node(conn): print("Find Node:") node = conn.clustering.find_node(NODE_ID) print(node.to_dict()) def update_node(conn): print("Update Node:") spec = { 'name': 'Test_Node01', 'profile_id': 'c0e3a680-e270-4eb8-9361-e5c9503fba0b', } node = conn.clustering.update_node(NODE_ID, **spec) print(node.to_dict()) def delete_node(conn): print("Delete Node:") conn.clustering.delete_node(NODE_ID) print("Node deleted.") # node support force delete conn.clustering.delete_node(NODE_ID, False, True) print("Node deleted") def check_node(conn): print("Check Node:") node = conn.clustering.check_node(NODE_ID) print(node) def recover_node(conn): print("Recover Node:") spec = {'check': True} node = conn.clustering.recover_node(NODE_ID, **spec) print(node) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/clustering/policy.py0000664000175000017500000000351600000000000022102 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Managing policies in the Cluster service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/clustering.html """ def list_policies(conn): print("List Policies:") for policy in conn.clustering.policies(): print(policy.to_dict()) for policy in conn.clustering.policies(sort='name:asc'): print(policy.to_dict()) def create_policy(conn): print("Create Policy:") attrs = { 'name': 'dp01', 'spec': { 'policy': 'senlin.policy.deletion', 'version': 1.0, 'properties': { 'criteria': 'oldest_first', 'destroy_after_deletion': True, }, }, } policy = conn.clustering.create_policy(attrs) print(policy.to_dict()) def get_policy(conn): print("Get Policy:") policy = conn.clustering.get_policy('dp01') print(policy.to_dict()) def find_policy(conn): print("Find Policy:") policy = conn.clustering.find_policy('dp01') print(policy.to_dict()) def update_policy(conn): print("Update Policy:") policy = conn.clustering.update_policy('dp01', name='dp02') print(policy.to_dict()) def delete_policy(conn): print("Delete Policy:") conn.clustering.delete_policy('dp01') print("Policy deleted.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/clustering/policy_type.py0000664000175000017500000000173700000000000023146 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Managing policy types in the Cluster service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/clustering.html """ def list_policy_types(conn): print("List Policy Types:") for pt in conn.clustering.policy_types(): print(pt.to_dict()) def get_policy_type(conn): print("Get Policy Type:") pt = conn.clustering.get_policy_type('senlin.policy.deletion-1.0') print(pt.to_dict()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/clustering/profile.py0000664000175000017500000000406500000000000022243 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from examples.connect import FLAVOR_NAME from examples.connect import IMAGE_NAME from examples.connect import NETWORK_NAME from examples.connect import SERVER_NAME """ Managing profiles in the Cluster service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/clustering.html """ def list_profiles(conn): print("List Profiles:") for profile in conn.clustering.profiles(): print(profile.to_dict()) for profile in conn.clustering.profiles(sort='name:asc'): print(profile.to_dict()) def create_profile(conn): print("Create Profile:") spec = { 'profile': 'os.nova.server', 'version': 1.0, 'name': 'os_server', 'properties': { 'name': SERVER_NAME, 'flavor': FLAVOR_NAME, 'image': IMAGE_NAME, 'networks': {'network': NETWORK_NAME}, }, } profile = conn.clustering.create_profile(spec) print(profile.to_dict()) def get_profile(conn): print("Get Profile:") profile = conn.clustering.get_profile('os_server') print(profile.to_dict()) def find_profile(conn): print("Find Profile:") profile = conn.clustering.find_profile('os_server') print(profile.to_dict()) def update_profile(conn): print("Update Profile:") profile = conn.clustering.update_profile('os_server', name='old_server') print(profile.to_dict()) def delete_profile(conn): print("Delete Profile:") conn.clustering.delete_profile('os_server') print("Profile deleted.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/clustering/profile_type.py0000664000175000017500000000173600000000000023306 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Managing profile types in the Cluster service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/clustering.html """ def list_profile_types(conn): print("List Profile Types:") for pt in conn.clustering.profile_types(): print(pt.to_dict()) def get_profile_type(conn): print("Get Profile Type:") pt = conn.clustering.get_profile_type('os.nova.server-1.0') print(pt.to_dict()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/clustering/receiver.py0000664000175000017500000000376700000000000022417 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Managing policies in the Cluster service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/clustering.html """ FAKE_NAME = 'test_receiver' CLUSTER_ID = "ae63a10b-4a90-452c-aef1-113a0b255ee3" def list_receivers(conn): print("List Receivers:") for receiver in conn.clustering.receivers(): print(receiver.to_dict()) for receiver in conn.clustering.receivers(sort='name:asc'): print(receiver.to_dict()) def create_receiver(conn): print("Create Receiver:") # Build the receiver attributes and create the recever. spec = { "action": "CLUSTER_SCALE_OUT", "cluster_id": CLUSTER_ID, "name": FAKE_NAME, "params": {"count": "1"}, "type": "webhook", } receiver = conn.clustering.create_receiver(**spec) print(receiver.to_dict()) def get_receiver(conn): print("Get Receiver:") receiver = conn.clustering.get_receiver(FAKE_NAME) print(receiver.to_dict()) def find_receiver(conn): print("Find Receiver:") receiver = conn.clustering.find_receiver(FAKE_NAME) print(receiver.to_dict()) def update_receiver(conn): print("Update Receiver:") spec = {"name": "test_receiver2", "params": {"count": "2"}} receiver = conn.clustering.update_receiver(FAKE_NAME, **spec) print(receiver.to_dict()) def delete_receiver(conn): print("Delete Receiver:") conn.clustering.delete_receiver(FAKE_NAME) print("Receiver deleted.") ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.201292 openstacksdk-4.0.0/examples/compute/0000775000175000017500000000000000000000000017521 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/compute/__init__.py0000664000175000017500000000000000000000000021620 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/compute/create.py0000664000175000017500000000416400000000000021343 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import os from examples.connect import FLAVOR_NAME from examples.connect import IMAGE_NAME from examples.connect import KEYPAIR_NAME from examples.connect import NETWORK_NAME from examples.connect import PRIVATE_KEYPAIR_FILE from examples.connect import SERVER_NAME from examples.connect import SSH_DIR """ Create resources with the Compute service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/compute.html """ def create_keypair(conn): keypair = conn.compute.find_keypair(KEYPAIR_NAME) if not keypair: print("Create Key Pair:") keypair = conn.compute.create_keypair(name=KEYPAIR_NAME) print(keypair) try: os.mkdir(SSH_DIR) except OSError as e: if e.errno != errno.EEXIST: raise e with open(PRIVATE_KEYPAIR_FILE, 'w') as f: f.write("%s" % keypair.private_key) os.chmod(PRIVATE_KEYPAIR_FILE, 0o400) return keypair def create_server(conn): print("Create Server:") image = conn.image.find_image(IMAGE_NAME) flavor = conn.compute.find_flavor(FLAVOR_NAME) network = conn.network.find_network(NETWORK_NAME) keypair = create_keypair(conn) server = conn.compute.create_server( name=SERVER_NAME, image_id=image.id, flavor_id=flavor.id, networks=[{"uuid": network.id}], key_name=keypair.name, ) server = conn.compute.wait_for_server(server) print( "ssh -i {key} root@{ip}".format( key=PRIVATE_KEYPAIR_FILE, ip=server.access_ipv4 ) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/compute/delete.py0000664000175000017500000000245100000000000021337 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import os from examples.connect import KEYPAIR_NAME from examples.connect import PRIVATE_KEYPAIR_FILE from examples.connect import SERVER_NAME """ Delete resources with the Compute service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/compute.html """ def delete_keypair(conn): print("Delete Key Pair:") keypair = conn.compute.find_keypair(KEYPAIR_NAME) try: os.remove(PRIVATE_KEYPAIR_FILE) except OSError as e: if e.errno != errno.ENOENT: raise e print(keypair) conn.compute.delete_keypair(keypair) def delete_server(conn): print("Delete Server:") server = conn.compute.find_server(SERVER_NAME) print(server) conn.compute.delete_server(server) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/compute/find.py0000664000175000017500000000224300000000000021014 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import examples.connect """ Find a resource from the Compute service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/compute.html """ def find_image(conn): print("Find Image:") image = conn.image.find_image(examples.connect.IMAGE_NAME) print(image) return image def find_flavor(conn): print("Find Flavor:") flavor = conn.compute.find_flavor(examples.connect.FLAVOR_NAME) print(flavor) return flavor def find_keypair(conn): print("Find Keypair:") keypair = conn.compute.find_keypair(examples.connect.KEYPAIR_NAME) print(keypair) return keypair ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/compute/list.py0000664000175000017500000000221100000000000021042 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ List resources from the Compute service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/compute.html """ def list_servers(conn): print("List Servers:") for server in conn.compute.servers(): print(server) def list_images(conn): print("List Images:") for image in conn.compute.images(): print(image) def list_flavors(conn): print("List Flavors:") for flavor in conn.compute.flavors(): print(flavor) def list_keypairs(conn): print("List Keypairs:") for keypair in conn.compute.keypairs(): print(keypair) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/connect.py0000664000175000017500000000553600000000000020061 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Connect to an OpenStack cloud. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/connect_from_config.html """ import argparse import os import sys import openstack from openstack.config import loader openstack.enable_logging(True, stream=sys.stdout) #: Defines the OpenStack Config cloud key in your config file, #: typically in $HOME/.config/openstack/clouds.yaml. That configuration #: will determine where the examples will be run and what resource defaults #: will be used to run the examples. TEST_CLOUD = os.getenv('OS_TEST_CLOUD', 'devstack-admin') EXAMPLE_CONFIG_KEY = os.getenv('OPENSTACKSDK_EXAMPLE_CONFIG_KEY', 'example') config = loader.OpenStackConfig() cloud = openstack.connect(cloud=TEST_CLOUD) class Opts: def __init__(self, cloud_name='devstack-admin', debug=False): self.cloud = cloud_name self.debug = debug # Use identity v3 API for examples. self.identity_api_version = '3' def _get_resource_value(resource_key, default): return config.get_extra_config(EXAMPLE_CONFIG_KEY).get( resource_key, default ) SERVER_NAME = 'openstacksdk-example' IMAGE_NAME = _get_resource_value('image_name', 'cirros-0.4.0-x86_64-disk') FLAVOR_NAME = _get_resource_value('flavor_name', 'm1.small') NETWORK_NAME = _get_resource_value('network_name', 'private') KEYPAIR_NAME = _get_resource_value('keypair_name', 'openstacksdk-example') SSH_DIR = _get_resource_value( 'ssh_dir', '{home}/.ssh'.format(home=os.path.expanduser("~")) ) PRIVATE_KEYPAIR_FILE = _get_resource_value( 'private_keypair_file', f'{SSH_DIR}/id_rsa.{KEYPAIR_NAME}', ) EXAMPLE_IMAGE_NAME = 'openstacksdk-example-public-image' def create_connection_from_config(): return openstack.connect(cloud=TEST_CLOUD) def create_connection_from_args(): parser = argparse.ArgumentParser() return openstack.connect(options=parser) def create_connection( auth_url, region, project_name, username, password, user_domain, project_domain, ): return openstack.connect( auth_url=auth_url, project_name=project_name, username=username, password=password, region_name=region, user_domain_name=user_domain, project_domain_name=project_domain, app_name='examples', app_version='1.0', ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.201292 openstacksdk-4.0.0/examples/dns/0000775000175000017500000000000000000000000016631 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/dns/__init__.py0000664000175000017500000000000000000000000020730 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/dns/list.py0000664000175000017500000000142100000000000020154 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ List resources from the DNS service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/dns.html """ def list_zones(conn): print("List Zones:") for zone in conn.dns.zones(): print(zone) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.201292 openstacksdk-4.0.0/examples/identity/0000775000175000017500000000000000000000000017676 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/identity/__init__.py0000664000175000017500000000000000000000000021775 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/identity/list.py0000664000175000017500000000475600000000000021237 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ List resources from the Identity service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/identity.html """ def list_users(conn): print("List Users:") for user in conn.identity.users(): print(user) def list_credentials(conn): print("List Credentials:") for credential in conn.identity.credentials(): print(credential) def list_projects(conn): print("List Projects:") for project in conn.identity.projects(): print(project) def list_domains(conn): print("List Domains:") for domain in conn.identity.domains(): print(domain) def list_groups(conn): print("List Groups:") for group in conn.identity.groups(): print(group) def list_services(conn): print("List Services:") for service in conn.identity.services(): print(service) def list_endpoints(conn): print("List Endpoints:") for endpoint in conn.identity.endpoints(): print(endpoint) def list_regions(conn): print("List Regions:") for region in conn.identity.regions(): print(region) def list_roles(conn): print("List Roles:") for role in conn.identity.roles(): print(role) def list_role_domain_group_assignments(conn): print("List Roles assignments for a group on domain:") for role in conn.identity.role_domain_group_assignments(): print(role) def list_role_domain_user_assignments(conn): print("List Roles assignments for a user on domain:") for role in conn.identity.role_project_user_assignments(): print(role) def list_role_project_group_assignments(conn): print("List Roles assignments for a group on project:") for role in conn.identity.role_project_group_assignments(): print(role) def list_role_project_user_assignments(conn): print("List Roles assignments for a user on project:") for role in conn.identity.role_project_user_assignments(): print(role) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.205294 openstacksdk-4.0.0/examples/image/0000775000175000017500000000000000000000000017127 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/image/__init__.py0000664000175000017500000000000000000000000021226 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/image/create.py0000664000175000017500000000220100000000000020737 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from examples.connect import EXAMPLE_IMAGE_NAME """ Create resources with the Image service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/image.html """ def upload_image(conn): print("Upload Image:") # Load fake image data for the example. data = 'This is fake image data.' # Build the image attributes and upload the image. image_attrs = { 'name': EXAMPLE_IMAGE_NAME, 'data': data, 'disk_format': 'raw', 'container_format': 'bare', 'visibility': 'public', } conn.image.upload_image(**image_attrs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/image/delete.py0000664000175000017500000000162600000000000020750 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from examples.connect import EXAMPLE_IMAGE_NAME """ Delete resources with the Image service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/image.html """ def delete_image(conn): print("Delete Image:") example_image = conn.image.find_image(EXAMPLE_IMAGE_NAME) conn.image.delete_image(example_image, ignore_missing=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/image/download.py0000664000175000017500000000431500000000000021313 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib """ Download an image with the Image service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/image.html """ def download_image_stream(conn): print("Download Image via streaming:") # Find the image you would like to download. image = conn.image.find_image("myimage") # As the actual download now takes place outside of the library # and in your own code, you are now responsible for checking # the integrity of the data. Create an MD5 has to be computed # after all of the data has been consumed. md5 = hashlib.md5() with open("myimage.qcow2", "wb") as local_image: response = conn.image.download_image(image, stream=True) # Read only 1 MiB of memory at a time until # all of the image data has been consumed. for chunk in response.iter_content(chunk_size=1024 * 1024): # With each chunk, add it to the hash to be computed. md5.update(chunk) local_image.write(chunk) # Now that you've consumed all of the data the response gave you, # ensure that the checksums of what the server offered and # what you downloaded are the same. if response.headers["Content-MD5"] != md5.hexdigest(): raise Exception("Checksum mismatch in downloaded content") def download_image(conn): print("Download Image:") # Find the image you would like to download. image = conn.image.find_image("myimage") with open("myimage.qcow2", "w") as local_image: response = conn.image.download_image(image) # Response will contain the entire contents of the Image. local_image.write(response) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/image/import.py0000664000175000017500000000240000000000000021007 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from examples.connect import EXAMPLE_IMAGE_NAME """ Create resources with the Image service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/image.html """ def import_image(conn): print("Import Image:") # Url where glance can download the image uri = ( 'https://download.cirros-cloud.net/0.4.0/' 'cirros-0.4.0-x86_64-disk.img' ) # Build the image attributes and import the image. image_attrs = { 'name': EXAMPLE_IMAGE_NAME, 'disk_format': 'qcow2', 'container_format': 'bare', 'visibility': 'public', } image = conn.image.create_image(**image_attrs) conn.image.import_image(image, method="web-download", uri=uri) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/image/list.py0000664000175000017500000000143400000000000020456 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ List resources from the Image service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/image.html """ def list_images(conn): print("List Images:") for image in conn.image.images(): print(image) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.205294 openstacksdk-4.0.0/examples/key_manager/0000775000175000017500000000000000000000000020327 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/key_manager/__init__.py0000664000175000017500000000000000000000000022426 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/key_manager/create.py0000664000175000017500000000155200000000000022147 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ List resources from the Key Manager service. """ def create_secret(conn): print("Create a secret:") conn.key_manager.create_secret( name="My public key", secret_type="public", expiration="2020-02-28T23:59:59", payload="ssh rsa...", payload_content_type="text/plain", ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/key_manager/get.py0000664000175000017500000000155000000000000021461 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ List resources from the Key Manager service. """ s = None def get_secret_payload(conn): print("Get a secret's payload:") # Assuming you have an object `s` which you perhaps received from # a conn.key_manager.secrets() call... secret = conn.key_manager.get_secret(s.secret_id) print(secret.payload) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/key_manager/list.py0000664000175000017500000000163400000000000021660 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ List resources from the Key Manager service. """ def list_secrets(conn): print("List Secrets:") for secret in conn.key_manager.secrets(): print(secret) def list_secrets_query(conn): print("List Secrets:") for secret in conn.key_manager.secrets( secret_type="symmetric", expiration="gte:2020-01-01T00:00:00" ): print(secret) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.205294 openstacksdk-4.0.0/examples/network/0000775000175000017500000000000000000000000017536 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/network/__init__.py0000664000175000017500000000000000000000000021635 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/network/create.py0000664000175000017500000000216700000000000021361 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Create resources with the Network service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/network.html """ def create_network(conn): print("Create Network:") example_network = conn.network.create_network( name='openstacksdk-example-project-network' ) print(example_network) example_subnet = conn.network.create_subnet( name='openstacksdk-example-project-subnet', network_id=example_network.id, ip_version='4', cidr='10.0.2.0/24', gateway_ip='10.0.2.1', ) print(example_subnet) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/network/delete.py0000664000175000017500000000203200000000000021347 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Delete resources with the Network service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/network.html """ def delete_network(conn): print("Delete Network:") example_network = conn.network.find_network( 'openstacksdk-example-project-network' ) for example_subnet in example_network.subnet_ids: conn.network.delete_subnet(example_subnet, ignore_missing=False) conn.network.delete_network(example_network, ignore_missing=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/network/find.py0000664000175000017500000000156000000000000021032 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import examples.connect """ Find a resource from the Network service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/network.html """ def find_network(conn): print("Find Network:") network = conn.network.find_network(examples.connect.NETWORK_NAME) print(network) return network ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/network/list.py0000664000175000017500000000261700000000000021071 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ List resources from the Network service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/network.html """ def list_networks(conn): print("List Networks:") for network in conn.network.networks(): print(network) def list_subnets(conn): print("List Subnets:") for subnet in conn.network.subnets(): print(subnet) def list_ports(conn): print("List Ports:") for port in conn.network.ports(): print(port) def list_security_groups(conn): print("List Security Groups:") for port in conn.network.security_groups(): print(port) def list_routers(conn): print("List Routers:") for router in conn.network.routers(): print(router) def list_network_agents(conn): print("List Network Agents:") for agent in conn.network.agents(): print(agent) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/network/security_group_rules.py0000664000175000017500000000327000000000000024407 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Create resources with the Network service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/network.html """ def open_port(conn): print("Open a port:") example_sec_group = conn.network.create_security_group( name='openstacksdk-example-security-group' ) print(example_sec_group) example_rule = conn.network.create_security_group_rule( security_group_id=example_sec_group.id, direction='ingress', remote_ip_prefix='0.0.0.0/0', protocol='tcp', port_range_max='443', port_range_min='443', ethertype='IPv4', ) print(example_rule) def allow_ping(conn): print("Allow pings:") example_sec_group = conn.network.create_security_group( name='openstacksdk-example-security-group2' ) print(example_sec_group) example_rule = conn.network.create_security_group_rule( security_group_id=example_sec_group.id, direction='ingress', remote_ip_prefix='0.0.0.0/0', protocol='icmp', port_range_max=None, port_range_min=None, ethertype='IPv4', ) print(example_rule) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.209296 openstacksdk-4.0.0/examples/shared_file_system/0000775000175000017500000000000000000000000021716 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/shared_file_system/__init__.py0000664000175000017500000000000000000000000024015 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/shared_file_system/availability_zones.py0000664000175000017500000000154600000000000026166 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ List resources from the Shared File System service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/shared_file_system.html """ def list_availability_zones(conn): print("List Shared File System Availability Zones:") for az in conn.share.availability_zones(): print(az) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/shared_file_system/share_group_snapshots.py0000664000175000017500000000451200000000000026712 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ List resources from the Shared File System service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/shared_file_system.html """ def list_share_group_snapshots(conn, **query): print("List all share group snapshots:") share_group_snapshots = conn.share.share_group_snapshots(**query) for share_group_snapshot in share_group_snapshots: print(share_group_snapshot) def get_share_group_snapshot(conn, group_snapshot_id): print("Show share group snapshot with given Id:") share_group_snapshot = conn.share.get_share_group_snapshots( group_snapshot_id ) print(share_group_snapshot) def share_group_snapshot_members(conn, group_snapshot_id): print("Show share group snapshot members with given Id:") members = conn.share.share_group_snapshot_members(group_snapshot_id) for member in members: print(member) def create_share_group_snapshot(conn, share_group_id, **attrs): print("Creating a share group snapshot from given attributes:") share_group_snapshot = conn.share.create_share_group_snapshot( share_group_id, **attrs ) print(share_group_snapshot) def reset_share_group_snapshot_status(conn, group_snapshot_id, status): print("Reseting the share group snapshot status:") conn.share.reset_share_group_snapshot_status(group_snapshot_id, status) def update_share_group_snapshot(conn, group_snapshot_id, **attrs): print("Updating a share group snapshot with given Id:") share_group_snapshot = conn.share.update_share_group_snapshot( group_snapshot_id, **attrs ) print(share_group_snapshot) def delete_share_group_snapshot(conn, group_snapshot_id): print("Deleting a share group snapshot with given Id:") conn.share.delete_share_group_snapshot(group_snapshot_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/shared_file_system/share_instances.py0000664000175000017500000000274300000000000025447 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ List resources from the Shared File System service. For a full guide see https://docs.openstack.org/openstacksdk/latest/user/guides/shared_file_system.html """ def share_instances(conn, **query): print('List all share instances:') for si in conn.share.share_instances(**query): print(si) def get_share_instance(conn, share_instance_id): print('Get share instance with given Id:') share_instance = conn.share.get_share_instance(share_instance_id) print(share_instance) def reset_share_instance_status(conn, share_instance_id, status): print( 'Reset the status of the share instance with the given ' 'share_instance_id to the given status' ) conn.share.reset_share_instance_status(share_instance_id, status) def delete_share_instance(conn, share_instance_id): print('Force-delete the share instance with the given share_instance_id') conn.share.delete_share_instance(share_instance_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/shared_file_system/share_metadata.py0000664000175000017500000000400600000000000025232 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def list_share_metadata(conn, share_id): # Method returns the entire share with the metadata inside it. returned_share = conn.get_share_metadata(share_id) # Access metadata of share metadata = returned_share['metadata'] print("List All Share Metadata:") for meta_key in metadata: print(f"{meta_key}={metadata[meta_key]}") def get_share_metadata_item(conn, share_id, key): # Method returns the entire share with the metadata inside it. returned_share = conn.get_share_metadata_item(share_id, key) # Access metadata of share metadata = returned_share['metadata'] print("Get share metadata item given item key and share id:") print(metadata[key]) def create_share_metadata(conn, share_id, metadata): # Method returns the entire share with the metadata inside it. created_share = conn.create_share_metadata(share_id, metadata) # Access metadata of share metadata = created_share['metadata'] print("Metadata created for given share:") print(metadata) def update_share_metadata(conn, share_id, metadata): # Method returns the entire share with the metadata inside it. updated_share = conn.update_share_metadata(share_id, metadata, True) # Access metadata of share metadata = updated_share['metadata'] print("Updated metadata for given share:") print(metadata) def delete_share_metadata(conn, share_id, keys): # Method doesn't return anything. conn.delete_share_metadata(share_id, keys) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/examples/shared_file_system/shares.py0000664000175000017500000000401100000000000023551 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def resize_share(conn, share_id, share_size): # Be explicit about not wanting to use force if the share # will be extended. use_force = False print('Resize the share to the given size:') conn.share.resize_share(share_id, share_size, use_force) def resize_shares_without_shrink(conn, min_size): # Sometimes, extending shares without shrinking # them (effectively setting a min size) is desirable. # Get list of shares from the connection. shares = conn.share.shares() # Loop over the shares: for share in shares: # Extend shares smaller than min_size to min_size, # but don't shrink shares larger than min_size. conn.share.resize_share(share.id, min_size, no_shrink=True) def manage_share(conn, protocol, export_path, service_host, **params): # Manage a share with the given protocol, export path, service host, and # optional additional parameters managed_share = conn.share.manage_share( protocol, export_path, service_host, **params ) # Can get the ID of the share, which is now being managed with Manila managed_share_id = managed_share.id print("The ID of the share which was managed: %s", managed_share_id) def unmanage_share(conn, share_id): # Unmanage the share with the given share ID conn.share.unmanage_share(share_id) try: # Getting the share will raise an exception as it has been unmanaged conn.share.get_share(share_id) except Exception: pass ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.209296 openstacksdk-4.0.0/extras/0000775000175000017500000000000000000000000015535 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/extras/delete-network.sh0000664000175000017500000000107400000000000021024 0ustar00zuulzuul00000000000000neutron router-gateway-clear router1 neutron router-interface-delete router1 for subnet in private-subnet ipv6-private-subnet ; do neutron router-interface-delete router1 $subnet subnet_id=$(neutron subnet-show $subnet -f value -c id) neutron port-list | grep $subnet_id | awk '{print $2}' | xargs -n1 neutron port-delete neutron subnet-delete $subnet done neutron router-delete router1 neutron net-delete private # Make the public network directly consumable neutron subnet-update public-subnet --enable-dhcp=True neutron net-update public --shared=True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/extras/run-ansible-tests.sh0000775000175000017500000000602000000000000021451 0ustar00zuulzuul00000000000000#!/bin/bash ############################################################################# # run-ansible-tests.sh # # Script used to setup a tox environment for running Ansible. This is meant # to be called by tox (via tox.ini). To run the Ansible tests, use: # # tox -e ansible [TAG ...] # or # tox -e ansible -- -c cloudX [TAG ...] # or to use the development version of Ansible: # tox -e ansible -- -d -c cloudX [TAG ...] # # USAGE: # run-ansible-tests.sh -e ENVDIR [-d] [-c CLOUD] [TAG ...] # # PARAMETERS: # -d Use Ansible source repo development branch. # -e ENVDIR Directory of the tox environment to use for testing. # -c CLOUD Name of the cloud to use for testing. # Defaults to "devstack-admin". # [TAG ...] Optional list of space-separated tags to control which # modules are tested. # # EXAMPLES: # # Run all Ansible tests # run-ansible-tests.sh -e ansible # # # Run auth, keypair, and network tests against cloudX # run-ansible-tests.sh -e ansible -c cloudX auth keypair network ############################################################################# CLOUD="devstack-admin" ENVDIR= USE_DEV=0 while getopts "c:de:" opt do case $opt in d) USE_DEV=1 ;; c) CLOUD=${OPTARG} ;; e) ENVDIR=${OPTARG} ;; ?) echo "Invalid option: -${OPTARG}" exit 1;; esac done if [ -z ${ENVDIR} ] then echo "Option -e is required" exit 1 fi shift $((OPTIND-1)) TAGS=$( echo "$*" | tr ' ' , ) # We need to source the current tox environment so that Ansible will # be setup for the correct python environment. source $ENVDIR/bin/activate if [ ${USE_DEV} -eq 1 ] then if [ -d ${ENVDIR}/ansible ] then echo "Using existing Ansible source repo" else echo "Installing Ansible source repo at $ENVDIR" git clone --recursive https://github.com/ansible/ansible.git ${ENVDIR}/ansible fi source $ENVDIR/ansible/hacking/env-setup fi # Run the shade Ansible tests tag_opt="" if [ ! -z ${TAGS} ] then tag_opt="--tags ${TAGS}" fi # Loop through all ANSIBLE_VAR_ environment variables to allow passing the further for var in $(env | grep -e '^ANSIBLE_VAR_'); do VAR_NAME=${var%%=*} # split variable name from value ANSIBLE_VAR_NAME=${VAR_NAME#ANSIBLE_VAR_} # cut ANSIBLE_VAR_ prefix from variable name ANSIBLE_VAR_NAME=${ANSIBLE_VAR_NAME,,} # lowercase ansible variable ANSIBLE_VAR_VALUE=${!VAR_NAME} # Get the variable value ANSIBLE_VARS+="${ANSIBLE_VAR_NAME}=${ANSIBLE_VAR_VALUE} " # concat variables done # Until we have a module that lets us determine the image we want from # within a playbook, we have to find the image here and pass it in. # We use the openstack client instead of nova client since it can use clouds.yaml. IMAGE=`openstack --os-cloud=${CLOUD} image list -f value -c Name | grep cirros | grep -v -e ramdisk -e kernel` if [ $? -ne 0 ] then echo "Failed to find Cirros image" exit 1 fi ansible-playbook -vvv ./openstack/tests/ansible/run.yml -e "cloud=${CLOUD} image=${IMAGE} ${ANSIBLE_VARS}" ${tag_opt} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/include-acceptance-regular-user.txt0000664000175000017500000000105200000000000023110 0ustar00zuulzuul00000000000000# This file contains list of tests that can work with regular user privileges # Until all tests are modified to properly identify whether they are able to # run or must skip the ones that are known to work are listed here. ### Block Storage openstack.tests.functional.block_storage.v3.test_volume # Do not enable test_backup for now, since it is not capable to determine # backup capabilities of the cloud # openstack.tests.functional.block_storage.v3.test_backup ### Cloud openstack.tests.functional.cloud ### Network openstack.tests.functional.network ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2132978 openstacksdk-4.0.0/openstack/0000775000175000017500000000000000000000000016216 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/__init__.py0000664000175000017500000000722000000000000020330 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The openstack SDK. :py:mod:`openstacksdk` is a client library for building applications to work with OpenStack clouds. The project aims to provide a consistent and complete set of interactions with OpenStack's many services, along with complete documentation, examples, and tools. There are three ways to interact with :py:mod:`openstacksdk`. The *clouds layer*, the *proxy layer*, and the *resource layer*. Most users will make use of either the *cloud layer* or *proxy layer*. Listing flavours using the *cloud layer*:: >>> import openstack >>> conn = openstack.connect(cloud='mordred') >>> for server in conn.list_servers(): ... print(server.to_dict()) Listing servers using the *proxy layer*:: >>> import openstack >>> conn = openstack.connect(cloud='mordred') >>> for server in conn.compute.servers(): ... print(server.to_dict()) Listing servers using the *resource layer*:: >>> import openstack >>> import openstack.compute.v2.server >>> conn = openstack.connect(cloud='mordred') >>> for server in openstack.compute.v2.server.Server.list( ... session=conn.compute, ... ): ... print(server.to_dict()) For more information, refer to the documentation found in each submodule. """ import argparse import typing as ty from openstack._log import enable_logging import openstack.config import openstack.connection __all__ = [ 'connect', 'enable_logging', ] def connect( cloud: ty.Optional[str] = None, app_name: ty.Optional[str] = None, app_version: ty.Optional[str] = None, options: ty.Optional[argparse.Namespace] = None, load_yaml_config: bool = True, load_envvars: bool = True, **kwargs, ) -> openstack.connection.Connection: """Create a :class:`~openstack.connection.Connection` :param string cloud: The name of the configuration to load from clouds.yaml. Defaults to 'envvars' which will load configuration settings from environment variables that start with ``OS_``. :param argparse.Namespace options: An argparse Namespace object. Allows direct passing in of argparse options to be added to the cloud config. Values of None and '' will be removed. :param bool load_yaml_config: Whether or not to load config settings from clouds.yaml files. Defaults to True. :param bool load_envvars: Whether or not to load config settings from environment variables. Defaults to True. :param kwargs: Additional configuration options. :returns: openstack.connnection.Connection :raises: keystoneauth1.exceptions.MissingRequiredOptions on missing required auth parameters """ cloud_region = openstack.config.get_cloud_region( cloud=cloud, app_name=app_name, app_version=app_version, load_yaml_config=load_yaml_config, load_envvars=load_envvars, options=options, **kwargs, ) return openstack.connection.Connection( config=cloud_region, vendor_hook=kwargs.get('vendor_hook'), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/__main__.py0000664000175000017500000000220200000000000020304 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import sys import pbr.version def show_version(args): print( "OpenstackSDK Version %s" % pbr.version.VersionInfo('openstacksdk').version_string_with_vcs() ) parser = argparse.ArgumentParser(description="Openstack SDK") subparsers = parser.add_subparsers(title='commands', dest='command') cmd_version = subparsers.add_parser( 'version', help='show Openstack SDK version' ) cmd_version.set_defaults(func=show_version) args = parser.parse_args() if not args.command: parser.print_help() sys.exit(1) args.func(args) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2132978 openstacksdk-4.0.0/openstack/_hacking/0000775000175000017500000000000000000000000017761 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/_hacking/checks.py0000664000175000017500000000374100000000000021600 0ustar00zuulzuul00000000000000# Copyright (c) 2019, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re from hacking import core """ Guidelines for writing new hacking checks - Use only for openstacksdk specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range O3xx. Find the current test with the highest allocated number and then pick the next value. - Keep the test method code in the source file ordered based on the O3xx value. - List the new rule in the top level HACKING.rst file - Add test cases for each new rule to openstack/tests/unit/test_hacking.py """ SETUPCLASS_RE = re.compile(r"def setUpClass\(") @core.flake8ext def assert_no_setupclass(logical_line): """Check for use of setUpClass O300 """ if SETUPCLASS_RE.match(logical_line): yield (0, "O300: setUpClass not allowed") @core.flake8ext def assert_no_deprecated_exceptions(logical_line, filename): """Check for use of deprecated cloud-layer exceptions 0310 """ if filename.endswith(os.path.join('openstack', 'cloud', 'exc.py')): return for exception in ( 'OpenStackCloudCreateException', 'OpenStackCloudTimeout', 'OpenStackCloudHTTPError', 'OpenStackCloudBadRequest', 'OpenStackCloudURINotFound', 'OpenStackCloudResourceNotFound', ): if re.search(fr'\b{exception}\b', logical_line): yield (0, 'O310: Use of deprecated Exception class') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/_log.py0000664000175000017500000001153000000000000017510 0ustar00zuulzuul00000000000000# Copyright (c) 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import sys import typing as ty def setup_logging( name: str, handlers: ty.Optional[ty.List[logging.Handler]] = None, level: ty.Optional[int] = None, ) -> logging.Logger: """Set up logging for a named logger. Gets and initializes a named logger, ensuring it at least has a `logging.NullHandler` attached. :param str name: Name of the logger. :param list handlers: A list of `logging.Handler` objects to attach to the logger. :param int level: Log level to set the logger at. :returns: A `logging.Logger` object that can be used to emit log messages. """ handlers = handlers or [] log = logging.getLogger(name) if len(log.handlers) == 0 and not handlers: log.addHandler(logging.NullHandler()) for h in handlers: log.addHandler(h) if level: log.setLevel(level) return log def enable_logging( debug: bool = False, http_debug: bool = False, path: ty.Optional[str] = None, stream: ty.Optional[ty.TextIO] = None, format_stream: bool = False, format_template: str = '%(asctime)s %(levelname)s: %(name)s %(message)s', handlers: ty.Optional[ty.List[logging.Handler]] = None, ) -> None: """Enable logging output. Helper function to enable logging. This function is available for debugging purposes and for folks doing simple applications who want an easy 'just make it work for me'. For more complex applications or for those who want more flexibility, the standard library ``logging`` package will receive these messages in any handlers you create. :param bool debug: Set this to ``True`` to receive debug messages. :param bool http_debug: Set this to ``True`` to receive debug messages including HTTP requests and responses. This implies ``debug=True``. :param str path: If a *path* is specified, logging output will written to that file in addition to sys.stderr. The path is passed to logging.FileHandler, which will append messages the file (and create it if needed). :param stream: One of ``None `` or ``sys.stdout`` or ``sys.stderr``. If it is ``None``, nothing is logged to a stream. If it isn't ``None``, console output is logged to this stream. :param bool format_stream: If format_stream is False, the default, apply ``format_template`` to ``path`` but not to ``stream`` outputs. If True, apply ``format_template`` to ``stream`` outputs as well. :param str format_template: Template to pass to :class:`logging.Formatter`. :rtype: None """ if not stream and not path: stream = sys.stderr if http_debug: debug = True if debug: level = logging.DEBUG else: level = logging.INFO formatter = logging.Formatter(format_template) if handlers: for handler in handlers: handler.setFormatter(formatter) else: handlers = [] if stream is not None: console = logging.StreamHandler(stream) if format_stream: console.setFormatter(formatter) handlers.append(console) if path is not None: file_handler = logging.FileHandler(path) file_handler.setFormatter(formatter) handlers.append(file_handler) setup_logging('openstack', handlers=handlers, level=level) setup_logging('keystoneauth', handlers=handlers, level=level) # Turn off logging on these so that if loggers higher in the tree # are more verbose we only get what we want out of the SDK. This is # particularly useful when combined with tools like ansible which set # debug logging level at the logging root. # If more complex logging is desired including stevedore debug logging, # enable_logging should not be used and instead python logging should # be configured directly. setup_logging( 'urllib3', handlers=[logging.NullHandler()], level=logging.INFO ) setup_logging( 'stevedore', handlers=[logging.NullHandler()], level=logging.INFO ) # Suppress warning about keystoneauth loggers setup_logging('keystoneauth.discovery') setup_logging('keystoneauth.identity.base') setup_logging('keystoneauth.identity.generic.base') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/_services_mixin.py0000664000175000017500000001425700000000000021767 0ustar00zuulzuul00000000000000# Generated file, to change, run tools/print-services.py from openstack import service_description from openstack.accelerator import accelerator_service from openstack.baremetal import baremetal_service from openstack.baremetal_introspection import baremetal_introspection_service from openstack.block_storage import block_storage_service from openstack.clustering import clustering_service from openstack.compute import compute_service from openstack.container_infrastructure_management import ( container_infrastructure_management_service, ) from openstack.database import database_service from openstack.dns import dns_service from openstack.identity import identity_service from openstack.image import image_service from openstack.instance_ha import instance_ha_service from openstack.key_manager import key_manager_service from openstack.load_balancer import load_balancer_service from openstack.message import message_service from openstack.network import network_service from openstack.object_store import object_store_service from openstack.orchestration import orchestration_service from openstack.placement import placement_service from openstack.shared_file_system import shared_file_system_service from openstack.workflow import workflow_service class ServicesMixin: identity = identity_service.IdentityService(service_type='identity') compute = compute_service.ComputeService(service_type='compute') image = image_service.ImageService(service_type='image') load_balancer = load_balancer_service.LoadBalancerService( service_type='load-balancer' ) object_store = object_store_service.ObjectStoreService( service_type='object-store' ) clustering = clustering_service.ClusteringService( service_type='clustering' ) resource_cluster = clustering cluster = clustering data_processing = service_description.ServiceDescription( service_type='data-processing' ) baremetal = baremetal_service.BaremetalService(service_type='baremetal') bare_metal = baremetal baremetal_introspection = ( baremetal_introspection_service.BaremetalIntrospectionService( service_type='baremetal-introspection' ) ) key_manager = key_manager_service.KeyManagerService( service_type='key-manager' ) resource_optimization = service_description.ServiceDescription( service_type='resource-optimization' ) infra_optim = resource_optimization message = message_service.MessageService(service_type='message') messaging = message application_catalog = service_description.ServiceDescription( service_type='application-catalog' ) container_infrastructure_management = container_infrastructure_management_service.ContainerInfrastructureManagementService( service_type='container-infrastructure-management' ) container_infra = container_infrastructure_management container_infrastructure = container_infrastructure_management search = service_description.ServiceDescription(service_type='search') dns = dns_service.DnsService(service_type='dns') workflow = workflow_service.WorkflowService(service_type='workflow') rating = service_description.ServiceDescription(service_type='rating') operator_policy = service_description.ServiceDescription( service_type='operator-policy' ) policy = operator_policy shared_file_system = shared_file_system_service.SharedFilesystemService( service_type='shared-file-system' ) share = shared_file_system data_protection_orchestration = service_description.ServiceDescription( service_type='data-protection-orchestration' ) orchestration = orchestration_service.OrchestrationService( service_type='orchestration' ) block_storage = block_storage_service.BlockStorageService( service_type='block-storage' ) block_store = block_storage volume = block_storage alarm = service_description.ServiceDescription(service_type='alarm') alarming = alarm meter = service_description.ServiceDescription(service_type='meter') metering = meter telemetry = meter event = service_description.ServiceDescription(service_type='event') events = event application_deployment = service_description.ServiceDescription( service_type='application-deployment' ) application_deployment = application_deployment multi_region_network_automation = service_description.ServiceDescription( service_type='multi-region-network-automation' ) tricircle = multi_region_network_automation database = database_service.DatabaseService(service_type='database') application_container = service_description.ServiceDescription( service_type='application-container' ) container = application_container root_cause_analysis = service_description.ServiceDescription( service_type='root-cause-analysis' ) rca = root_cause_analysis nfv_orchestration = service_description.ServiceDescription( service_type='nfv-orchestration' ) network = network_service.NetworkService(service_type='network') backup = service_description.ServiceDescription(service_type='backup') monitoring_logging = service_description.ServiceDescription( service_type='monitoring-logging' ) monitoring_log_api = monitoring_logging monitoring = service_description.ServiceDescription( service_type='monitoring' ) monitoring_events = service_description.ServiceDescription( service_type='monitoring-events' ) placement = placement_service.PlacementService(service_type='placement') instance_ha = instance_ha_service.InstanceHaService( service_type='instance-ha' ) ha = instance_ha reservation = service_description.ServiceDescription( service_type='reservation' ) function_engine = service_description.ServiceDescription( service_type='function-engine' ) accelerator = accelerator_service.AcceleratorService( service_type='accelerator' ) admin_logic = service_description.ServiceDescription( service_type='admin-logic' ) registration = admin_logic ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2132978 openstacksdk-4.0.0/openstack/accelerator/0000775000175000017500000000000000000000000020502 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/accelerator/__init__.py0000664000175000017500000000000000000000000022601 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/accelerator/accelerator_service.py0000664000175000017500000000145400000000000025064 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.accelerator.v2 import _proxy as _proxy_v2 from openstack import service_description class AcceleratorService(service_description.ServiceDescription): """The accelerator service.""" supported_versions = { '2': _proxy_v2.Proxy, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2132978 openstacksdk-4.0.0/openstack/accelerator/v2/0000775000175000017500000000000000000000000021031 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/accelerator/v2/__init__.py0000664000175000017500000000000000000000000023130 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/accelerator/v2/_proxy.py0000664000175000017500000001740100000000000022726 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.accelerator.v2 import accelerator_request as _arq from openstack.accelerator.v2 import deployable as _deployable from openstack.accelerator.v2 import device as _device from openstack.accelerator.v2 import device_profile as _device_profile from openstack import proxy class Proxy(proxy.Proxy): def deployables(self, **query): """Retrieve a generator of deployables. :param kwargs query: Optional query parameters to be sent to restrict the deployables to be returned. :returns: A generator of deployable instances. """ return self._list(_deployable.Deployable, **query) def get_deployable(self, uuid, fields=None): """Get a single deployable. :param uuid: The value can be the UUID of a deployable. :returns: One :class:`~openstack.accelerator.v2.deployable.Deployable` :raises: :class:`~openstack.exceptions.NotFoundException` when no deployable matching the criteria could be found. """ return self._get(_deployable.Deployable, uuid) def update_deployable(self, uuid, patch): """Reconfig the FPGA with new bitstream. :param uuid: The value can be the UUID of a deployable :param patch: The information to reconfig. :returns: The results of FPGA reconfig. """ return self._get_resource(_deployable.Deployable, uuid).patch( self, patch ) def devices(self, **query): """Retrieve a generator of devices. :param kwargs query: Optional query parameters to be sent to restrict the devices to be returned. Available parameters include: * hostname: The hostname of the device. * type: The type of the device. * vendor: The vendor ID of the device. * sort: A list of sorting keys separated by commas. Each sorting key can optionally be attached with a sorting direction modifier which can be ``asc`` or ``desc``. * limit: Requests a specified size of returned items from the query. Returns a number of items up to the specified limit value. * marker: Specifies the ID of the last-seen item. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. :returns: A generator of device instances. """ return self._list(_device.Device, **query) def get_device(self, uuid, fields=None): """Get a single device. :param uuid: The value can be the UUID of a device. :returns: One :class:`~openstack.accelerator.v2.device.Device` :raises: :class:`~openstack.exceptions.NotFoundException` when no device matching the criteria could be found. """ return self._get(_device.Device, uuid) def device_profiles(self, **query): """Retrieve a generator of device profiles. :param kwargs query: Optional query parameters to be sent to restrict the device profiles to be returned. :returns: A generator of device profile instances. """ return self._list(_device_profile.DeviceProfile, **query) def create_device_profile(self, **attrs): """Create a device_profile. :param kwargs attrs: a list of device_profiles. :returns: The list of created device profiles """ return self._create(_device_profile.DeviceProfile, **attrs) def delete_device_profile(self, device_profile, ignore_missing=True): """Delete a device profile :param device_profile: The value can be either the ID of a device profile or a :class:`~openstack.accelerator.v2.device_profile.DeviceProfile` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the device profile does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent device profile. :returns: ``None`` """ return self._delete( _device_profile.DeviceProfile, device_profile, ignore_missing=ignore_missing, ) def get_device_profile(self, uuid, fields=None): """Get a single device profile. :param uuid: The value can be the UUID of a device profile. :returns: One :class: `~openstack.accelerator.v2.device_profile.DeviceProfile` :raises: :class:`~openstack.exceptions.NotFoundException` when no device profile matching the criteria could be found. """ return self._get(_device_profile.DeviceProfile, uuid) def accelerator_requests(self, **query): """Retrieve a generator of accelerator requests. :param kwargs query: Optional query parameters to be sent to restrict the accelerator requests to be returned. :returns: A generator of accelerator request instances. """ return self._list(_arq.AcceleratorRequest, **query) def create_accelerator_request(self, **attrs): """Create an ARQs for a single device profile. :param kwargs attrs: request body. :returns: The created accelerator request instance. """ return self._create(_arq.AcceleratorRequest, **attrs) def delete_accelerator_request( self, accelerator_request, ignore_missing=True, ): """Delete a device profile :param device_profile: The value can be either the ID of a device profile or a :class:`~openstack.accelerator.v2.device_profile.DeviceProfile` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the device profile does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent accelerator request. :returns: ``None`` """ return self._delete( _arq.AcceleratorRequest, accelerator_request, ignore_missing=ignore_missing, ) def get_accelerator_request(self, uuid, fields=None): """Get a single accelerator request. :param uuid: The value can be the UUID of a accelerator request. :returns: One :class: `~openstack.accelerator.v2.accelerator_request.AcceleratorRequest` :raises: :class:`~openstack.exceptions.NotFoundException` when no accelerator request matching the criteria could be found. """ return self._get(_arq.AcceleratorRequest, uuid) def update_accelerator_request(self, uuid, properties): """Bind/Unbind an accelerator to VM. :param uuid: The uuid of the accelerator_request to be bound/unbound. :param properties: The info of VM that will bind/unbind the accelerator. :returns: True if bind/unbind succeeded, False otherwise. """ return self._get_resource(_arq.AcceleratorRequest, uuid).patch( self, properties ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/accelerator/v2/accelerator_request.py0000664000175000017500000001017000000000000025436 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource class AcceleratorRequest(resource.Resource): resource_key = 'arq' resources_key = 'arqs' base_path = '/accelerator_requests' # capabilities allow_create = True allow_fetch = True allow_delete = True allow_list = True #: Allow patch operation for binding. allow_patch = True #: The device address associated with this ARQ (if any) attach_handle_info = resource.Body('attach_handle_info') #: The type of attach handle (e.g. PCI, mdev...) attach_handle_type = resource.Body('attach_handle_type') #: The name of the device profile device_profile_name = resource.Body('device_profile_name') #: The id of the device profile group device_profile_group_id = resource.Body('device_profile_group_id') #: The UUID of the bound device RP (if any) device_rp_uuid = resource.Body('device_rp_uuid') #: The host name to which ARQ is bound. (if any) hostname = resource.Body('hostname') #: The UUID of the instance associated with this ARQ (if any) instance_uuid = resource.Body('instance_uuid') #: The state of the ARQ state = resource.Body('state') #: The UUID of the ARQ uuid = resource.Body('uuid', alternate_id=True) def _convert_patch(self, patch): # This overrides the default behavior of _convert_patch because # the PATCH method consumes JSON, its key is the ARQ uuid # and its value is an ordinary JSON patch. spec: # https://specs.openstack.org/openstack/cyborg-specs/specs/train/implemented/cyborg-api converted = super()._convert_patch(patch) converted = {self.id: converted} return converted def patch( self, session, patch=None, prepend_key=True, has_body=True, retry_on_conflict=None, base_path=None, ): # This overrides the default behavior of patch because # the PATCH method consumes a dict rather than a list. spec: # https://specs.openstack.org/openstack/cyborg-specs/specs/train/implemented/cyborg-api # The id cannot be dirty for an commit self._body._dirty.discard("id") # Only try to update if we actually have anything to commit. if not patch and not self.requires_commit: return self if not self.allow_patch: raise exceptions.MethodNotSupported(self, "patch") request = self._prepare_request( prepend_key=prepend_key, base_path=base_path, patch=True ) microversion = self._get_microversion(session, action='patch') if patch: request.body = self._convert_patch(patch) return self._commit( session, request, 'PATCH', microversion, has_body=has_body, retry_on_conflict=retry_on_conflict, ) def _consume_attrs(self, mapping, attrs): # This overrides the default behavior of _consume_attrs because # cyborg api returns an ARQ as list. spec: # https://specs.openstack.org/openstack/cyborg-specs/specs/train/implemented/cyborg-api if isinstance(self, AcceleratorRequest): if self.resources_key in attrs: attrs = attrs[self.resources_key][0] return super()._consume_attrs(mapping, attrs) def create(self, session, base_path=None): # This overrides the default behavior of resource creation because # cyborg doesn't accept resource_key in its request. return super().create(session, prepend_key=False, base_path=base_path) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/accelerator/v2/deployable.py0000664000175000017500000000550100000000000023524 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource class Deployable(resource.Resource): resource_key = 'deployable' resources_key = 'deployables' base_path = '/deployables' # capabilities allow_create = False allow_fetch = True allow_commit = False allow_delete = False allow_list = True allow_patch = True #: The timestamp when this deployable was created. created_at = resource.Body('created_at') #: The device_id of the deployable. device_id = resource.Body('device_id') #: The UUID of the deployable. id = resource.Body('uuid', alternate_id=True) #: The name of the deployable. name = resource.Body('name') #: The num_accelerator of the deployable. num_accelerators = resource.Body('num_accelerators') #: The parent_id of the deployable. parent_id = resource.Body('parent_id') #: The root_id of the deployable. root_id = resource.Body('root_id') #: The timestamp when this deployable was updated. updated_at = resource.Body('updated_at') def _commit( self, session, request, method, microversion, has_body=True, retry_on_conflict=None, ): session = self._get_session(session) kwargs = {} retriable_status_codes = set(session.retriable_status_codes or ()) if retry_on_conflict: kwargs['retriable_status_codes'] = retriable_status_codes | {409} elif retry_on_conflict is not None and retriable_status_codes: # The baremetal proxy defaults to retrying on conflict, allow # overriding it via an explicit retry_on_conflict=False. kwargs['retriable_status_codes'] = retriable_status_codes - {409} try: call = getattr(session, method.lower()) except AttributeError: raise exceptions.ResourceFailure( "Invalid commit method: %s" % method ) request.url = request.url + "/program" response = call( request.url, json=request.body, headers=request.headers, microversion=microversion, **kwargs ) self.microversion = microversion self._translate_response(response, has_body=has_body) return self ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/accelerator/v2/device.py0000664000175000017500000000317300000000000022646 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Device(resource.Resource): resource_key = 'device' resources_key = 'devices' base_path = '/devices' # capabilities allow_create = False allow_fetch = True allow_commit = False allow_delete = False allow_list = True #: The timestamp when this device was created. created_at = resource.Body('created_at') #: The hostname of the device. hostname = resource.Body('hostname') #: The ID of the device. id = resource.Body('id') #: The model of the device. model = resource.Body('model') #: The std board information of the device. std_board_info = resource.Body('std_board_info') #: The type of the device. type = resource.Body('type') #: The timestamp when this device was updated. updated_at = resource.Body('updated_at') #: The UUID of the device. uuid = resource.Body('uuid', alternate_id=True) #: The vendor ID of the device. vendor = resource.Body('vendor') #: The vendor board information of the device. vendor_board_info = resource.Body('vendor_board_info') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/accelerator/v2/device_profile.py0000664000175000017500000000362200000000000024365 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class DeviceProfile(resource.Resource): resource_key = 'device_profile' resources_key = 'device_profiles' base_path = '/device_profiles' # capabilities allow_create = True allow_fetch = True allow_commit = False allow_delete = True allow_list = True #: The timestamp when this device_profile was created. created_at = resource.Body('created_at') #: The description of the device profile description = resource.Body('description') #: The groups of the device profile groups = resource.Body('groups') #: The name of the device profile name = resource.Body('name') #: The timestamp when this device_profile was updated. updated_at = resource.Body('updated_at') #: The uuid of the device profile uuid = resource.Body('uuid', alternate_id=True) # TODO(s_shogo): This implementation only treat [ DeviceProfile ], and # cannot treat multiple DeviceProfiles in list. def _prepare_request_body(self, patch, prepend_key): body = super()._prepare_request_body(patch, prepend_key) return [body] def create(self, session, base_path=None): # This overrides the default behavior of resource creation because # cyborg doesn't accept resource_key in its request. return super().create(session, prepend_key=False, base_path=base_path) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/accelerator/version.py0000664000175000017500000000147500000000000022550 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Version(resource.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' # capabilities allow_list = True # Properties links = resource.Body('links') status = resource.Body('status') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2172997 openstacksdk-4.0.0/openstack/baremetal/0000775000175000017500000000000000000000000020152 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal/__init__.py0000664000175000017500000000000000000000000022251 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal/baremetal_service.py0000664000175000017500000000142700000000000024204 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal.v1 import _proxy from openstack import service_description class BaremetalService(service_description.ServiceDescription): """The bare metal service.""" supported_versions = { '1': _proxy.Proxy, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal/configdrive.py0000664000175000017500000001302100000000000023020 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Helpers for building configdrive compatible with the Bare Metal service.""" import base64 import contextlib import gzip import json import os import shutil import subprocess import tempfile import typing as ty @contextlib.contextmanager def populate_directory( metadata, user_data=None, versions=None, network_data=None, vendor_data=None, ): """Populate a directory with configdrive files. :param dict metadata: Metadata. :param bytes user_data: Vendor-specific user data. :param versions: List of metadata versions to support. :param dict network_data: Networking configuration. :param dict vendor_data: Extra supplied vendor data. :return: a context manager yielding a directory with files """ d = tempfile.mkdtemp() versions = versions or ('2012-08-10', 'latest') try: for version in versions: subdir = os.path.join(d, 'openstack', version) if not os.path.exists(subdir): os.makedirs(subdir) with open(os.path.join(subdir, 'meta_data.json'), 'w') as fp: json.dump(metadata, fp) if network_data: with open( os.path.join(subdir, 'network_data.json'), 'w' ) as fp: json.dump(network_data, fp) if vendor_data: with open( os.path.join(subdir, 'vendor_data2.json'), 'w' ) as fp: json.dump(vendor_data, fp) if user_data: # Strictly speaking, user data is binary, but in many cases # it's actually a text (cloud-init, ignition, etc). flag = 't' if isinstance(user_data, str) else 'b' with open( os.path.join(subdir, 'user_data'), 'w%s' % flag ) as fp: fp.write(user_data) yield d finally: shutil.rmtree(d) def build( metadata, user_data=None, versions=None, network_data=None, vendor_data=None, ): """Make a configdrive compatible with the Bare Metal service. Requires the genisoimage utility to be available. :param dict metadata: Metadata. :param user_data: Vendor-specific user data. :param versions: List of metadata versions to support. :param dict network_data: Networking configuration. :param dict vendor_data: Extra supplied vendor data. :return: configdrive contents as a base64-encoded string. """ with populate_directory( metadata, user_data, versions, network_data, vendor_data ) as path: return pack(path) def pack(path: str) -> str: """Pack a directory with files into a Bare Metal service configdrive. Creates an ISO image with the files and label "config-2". :param str path: Path to directory with files :return: configdrive contents as a base64-encoded string. """ with tempfile.NamedTemporaryFile() as tmpfile: # NOTE(toabctl): Luckily, genisoimage, mkisofs and xorrisofs understand # the same parameters which are currently used. cmds = ['genisoimage', 'mkisofs', 'xorrisofs'] error: ty.Optional[Exception] for c in cmds: try: p = subprocess.Popen( [ c, '-o', tmpfile.name, '-ldots', '-allow-lowercase', '-allow-multidot', '-l', '-publisher', 'metalsmith', '-quiet', '-J', '-r', '-V', 'config-2', path, ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) except OSError as e: error = e else: error = None break if error: raise RuntimeError( 'Error generating the configdrive. Make sure the ' '"genisoimage", "mkisofs" or "xorrisofs" tool is installed. ' 'Error: %s' % error ) stdout, stderr = p.communicate() if p.returncode != 0: raise RuntimeError( 'Error generating the configdrive.' 'Stdout: "%(stdout)s". Stderr: "%(stderr)s"' % {'stdout': stdout.decode(), 'stderr': stderr.decode()} ) tmpfile.seek(0) with tempfile.NamedTemporaryFile() as tmpzipfile: with gzip.GzipFile(fileobj=tmpzipfile, mode='wb') as gz_file: shutil.copyfileobj(tmpfile, gz_file) tmpzipfile.seek(0) # NOTE(dtantsur): Ironic expects configdrive to be a string, but # base64 returns bytes on Python 3. cd = base64.b64encode(tmpzipfile.read()).decode() return cd ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2172997 openstacksdk-4.0.0/openstack/baremetal/v1/0000775000175000017500000000000000000000000020500 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal/v1/__init__.py0000664000175000017500000000000000000000000022577 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal/v1/_common.py0000664000175000017500000001053200000000000022502 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource RETRIABLE_STATUS_CODES = [ # HTTP Conflict - happens if a node is locked 409, # HTTP Service Unavailable happens if there's no free conductor 503, ] """HTTP status codes that should be retried.""" PROVISIONING_VERSIONS = { 'abort': 13, 'adopt': 17, 'clean': 15, 'inspect': 6, 'manage': 4, 'provide': 4, 'rescue': 38, 'unrescue': 38, 'unhold': 85, 'service': 87, } """API microversions introducing provisioning verbs.""" # Based on https://docs.openstack.org/ironic/latest/contributor/states.html EXPECTED_STATES = { 'active': 'active', 'adopt': 'available', 'clean': 'manageable', 'deleted': 'available', 'inspect': 'manageable', 'manage': 'manageable', 'provide': 'available', 'rebuild': 'active', 'rescue': 'rescue', } """Mapping of provisioning actions to expected stable states.""" EXPECTED_POWER_STATES = { 'power on': 'power on', 'power off': 'power off', 'rebooting': 'power on', 'soft power off': 'power off', 'soft rebooting': 'power on', } """Mapping of target power states to expected power states.""" STATE_VERSIONS = { 'available': '1.1', 'enroll': '1.11', 'manageable': '1.4', } """API versions when certain states were introduced.""" VIF_VERSION = '1.28' """API version in which the VIF operations were introduced.""" VIF_OPTIONAL_PARAMS_VERSION = '1.67' """API version in which the VIF optional parameters were introduced.""" INJECT_NMI_VERSION = '1.29' """API vresion in which support for injecting NMI was introduced.""" CONFIG_DRIVE_REBUILD_VERSION = '1.35' """API version in which rebuild accepts a configdrive.""" RESET_INTERFACES_VERSION = '1.45' """API version in which the reset_interfaces parameter was introduced.""" CONFIG_DRIVE_DICT_VERSION = '1.56' """API version in which configdrive can be a dictionary.""" DEPLOY_STEPS_VERSION = '1.69' """API version in which deploy_steps was added to node provisioning.""" CHANGE_BOOT_MODE_VERSION = '1.76' """API version in which boot_mode and secure_boot states can be changed""" FIRMWARE_VERSION = '1.86' """API version in which firmware components of a node can be accessed""" class Resource(resource.Resource): base_path: str @classmethod def list(cls, session, details=False, **params): """This method is a generator which yields resource objects. This resource object list generator handles pagination and takes query params for response filtering. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param bool details: Whether to return detailed node records :param dict params: These keyword arguments are passed through the :meth:`~openstack.resource.QueryParameter._transpose` method to find if any of them match expected query parameters to be sent in the *params* argument to :meth:`~keystoneauth1.adapter.Adapter.get`. :return: A generator of :class:`openstack.resource.Resource` objects. :raises: :exc:`~openstack.exceptions.InvalidResourceQuery` if query contains invalid params. """ base_path = cls.base_path if details: base_path += '/detail' return super().list( session, paginated=True, base_path=base_path, **params ) def comma_separated_list(value): if value is None: return None else: return ','.join(value) def fields_type(value, resource_type): if value is None: return None resource_mapping = { key: value.name for key, value in resource_type.__dict__.items() if isinstance(value, resource.Body) } return comma_separated_list(resource_mapping.get(x, x) for x in value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal/v1/_proxy.py0000664000175000017500000022314700000000000022403 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty from openstack.baremetal.v1 import _common from openstack.baremetal.v1 import allocation as _allocation from openstack.baremetal.v1 import chassis as _chassis from openstack.baremetal.v1 import conductor as _conductor from openstack.baremetal.v1 import deploy_templates as _deploytemplates from openstack.baremetal.v1 import driver as _driver from openstack.baremetal.v1 import node as _node from openstack.baremetal.v1 import port as _port from openstack.baremetal.v1 import port_group as _portgroup from openstack.baremetal.v1 import volume_connector as _volumeconnector from openstack.baremetal.v1 import volume_target as _volumetarget from openstack import exceptions from openstack import proxy from openstack import utils class Proxy(proxy.Proxy): retriable_status_codes = _common.RETRIABLE_STATUS_CODES _resource_registry = { "allocation": _allocation.Allocation, "chassis": _chassis.Chassis, "conductor": _conductor.Conductor, "deploy_template": _deploytemplates.DeployTemplate, "driver": _driver.Driver, "node": _node.Node, "port": _port.Port, "port_group": _portgroup.PortGroup, "volume_connector": _volumeconnector.VolumeConnector, "volume_target": _volumetarget.VolumeTarget, } def _get_with_fields(self, resource_type, value, fields=None): """Fetch a bare metal resource. :param resource_type: The type of resource to get. :type resource_type: :class:`~openstack.resource.Resource` :param value: The value to get. Can be either the ID of a resource or a :class:`~openstack.resource.Resource` subclass. :param fields: Limit the resource fields to fetch. :returns: The result of the ``fetch`` :rtype: :class:`~openstack.resource.Resource` """ res = self._get_resource(resource_type, value) kwargs = {} if fields: kwargs['fields'] = _common.fields_type(fields, resource_type) return res.fetch( self, error_message="No {resource_type} found for {value}".format( resource_type=resource_type.__name__, value=value ), **kwargs, ) def chassis(self, details=False, **query): """Retrieve a generator of chassis. :param details: A boolean indicating whether the detailed information for every chassis should be returned. :param dict query: Optional query parameters to be sent to restrict the chassis to be returned. Available parameters include: * ``fields``: A list containing one or more fields to be returned in the response. This may lead to some performance gain because other fields of the resource are not refreshed. * ``limit``: Requests at most the specified number of items be returned from the query. * ``marker``: Specifies the ID of the last-seen chassis. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen chassis from the response as the ``marker`` value in a subsequent limited request. * ``sort_dir``: Sorts the response by the requested sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending). Default is ``asc``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. * ``sort_key``: Sorts the response by the this attribute value. Default is ``id``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. :returns: A generator of chassis instances. """ return _chassis.Chassis.list(self, details=details, **query) def create_chassis(self, **attrs): """Create a new chassis from attributes. :param dict attrs: Keyword arguments that will be used to create a :class:`~openstack.baremetal.v1.chassis.Chassis`. :returns: The results of chassis creation. :rtype: :class:`~openstack.baremetal.v1.chassis.Chassis`. """ return self._create(_chassis.Chassis, **attrs) def find_chassis(self, name_or_id, ignore_missing=True): """Find a single chassis. :param str name_or_id: The ID of a chassis. :param bool ignore_missing: When set to ``False``, an exception of :class:`~openstack.exceptions.NotFoundException` will be raised when the chassis does not exist. When set to `True``, None will be returned when attempting to find a nonexistent chassis. :returns: One :class:`~openstack.baremetal.v1.chassis.Chassis` object or None. """ return self._find( _chassis.Chassis, name_or_id, ignore_missing=ignore_missing ) def get_chassis(self, chassis, fields=None): """Get a specific chassis. :param chassis: The value can be the ID of a chassis or a :class:`~openstack.baremetal.v1.chassis.Chassis` instance. :param fields: Limit the resource fields to fetch. :returns: One :class:`~openstack.baremetal.v1.chassis.Chassis` :raises: :class:`~openstack.exceptions.NotFoundException` when no chassis matching the name or ID could be found. """ return self._get_with_fields(_chassis.Chassis, chassis, fields=fields) def update_chassis(self, chassis, **attrs): """Update a chassis. :param chassis: Either the ID of a chassis, or an instance of :class:`~openstack.baremetal.v1.chassis.Chassis`. :param dict attrs: The attributes to update on the chassis represented by the ``chassis`` parameter. :returns: The updated chassis. :rtype: :class:`~openstack.baremetal.v1.chassis.Chassis` """ return self._update(_chassis.Chassis, chassis, **attrs) def patch_chassis(self, chassis, patch): """Apply a JSON patch to the chassis. :param chassis: The value can be the ID of a chassis or a :class:`~openstack.baremetal.v1.chassis.Chassis` instance. :param patch: JSON patch to apply. :returns: The updated chassis. :rtype: :class:`~openstack.baremetal.v1.chassis.Chassis` """ return self._get_resource(_chassis.Chassis, chassis).patch(self, patch) def delete_chassis(self, chassis, ignore_missing=True): """Delete a chassis. :param chassis: The value can be either the ID of a chassis or a :class:`~openstack.baremetal.v1.chassis.Chassis` instance. :param bool ignore_missing: When set to ``False``, an exception :class:`~openstack.exceptions.NotFoundException` will be raised when the chassis could not be found. When set to ``True``, no exception will be raised when attempting to delete a non-existent chassis. :returns: The instance of the chassis which was deleted. :rtype: :class:`~openstack.baremetal.v1.chassis.Chassis`. """ return self._delete( _chassis.Chassis, chassis, ignore_missing=ignore_missing ) def drivers(self, details=False, **query): """Retrieve a generator of drivers. :param bool details: A boolean indicating whether the detailed information for every driver should be returned. :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of driver instances. """ # NOTE(dtantsur): details are available starting with API microversion # 1.30. Thus we do not send any value if not needed. if details: query['details'] = True return self._list(_driver.Driver, **query) def get_driver(self, driver): """Get a specific driver. :param driver: The value can be the name of a driver or a :class:`~openstack.baremetal.v1.driver.Driver` instance. :returns: One :class:`~openstack.baremetal.v1.driver.Driver` :raises: :class:`~openstack.exceptions.NotFoundException` when no driver matching the name could be found. """ return self._get(_driver.Driver, driver) def list_driver_vendor_passthru(self, driver): """Get driver's vendor_passthru methods. :param driver: The value can be the name of a driver or a :class:`~openstack.baremetal.v1.driver.Driver` instance. :returns: One :dict: of vendor methods with corresponding usages :raises: :class:`~openstack.exceptions.NotFoundException` when no driver matching the name could be found. """ driver = self.get_driver(driver) return driver.list_vendor_passthru(self) def call_driver_vendor_passthru( self, driver, verb: str, method: str, body=None ): """Call driver's vendor_passthru method. :param driver: The value can be the name of a driver or a :class:`~openstack.baremetal.v1.driver.Driver` instance. :param verb: One of GET, POST, PUT, DELETE, depending on the driver and method. :param method: Name of vendor method. :param body: passed to the vendor function as json body. :returns: Server response """ driver = self.get_driver(driver) return driver.call_vendor_passthru(self, verb, method, body) def nodes(self, details=False, **query): """Retrieve a generator of nodes. :param details: A boolean indicating whether the detailed information for every node should be returned. :param dict query: Optional query parameters to be sent to restrict the nodes returned. Available parameters include: * ``associated``: Only return those which are, or are not, associated with an ``instance_id``. * ``conductor_group``: Only return those in the specified ``conductor_group``. * ``driver``: Only return those with the specified ``driver``. * ``fault``: Only return those with the specified fault type. * ``fields``: A list containing one or more fields to be returned in the response. This may lead to some performance gain because other fields of the resource are not refreshed. * ``instance_id``: Only return the node with this specific instance UUID or an empty set if not found. * ``is_maintenance``: Only return those with ``maintenance`` set to ``True`` or ``False``. * ``limit``: Requests at most the specified number of nodes be returned from the query. * ``marker``: Specifies the ID of the last-seen node. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen node from the response as the ``marker`` value in a subsequent limited request. * ``provision_state``: Only return those nodes with the specified ``provision_state``. * ``resource_class``: Only return those with the specified ``resource_class``. * ``shard``: Only return nodes matching the supplied shard key. * ``sort_dir``: Sorts the response by the requested sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending). Default is ``asc``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. * ``sort_key``: Sorts the response by the this attribute value. Default is ``id``. You can specify multiple pairs of sort key and sort direction query pa rameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. :returns: A generator of :class:`~openstack.baremetal.v1.node.Node` """ return _node.Node.list(self, details=details, **query) def create_node(self, **attrs): """Create a new node from attributes. See :meth:`~openstack.baremetal.v1.node.Node.create` for an explanation of the initial provision state. :param dict attrs: Keyword arguments that will be used to create a :class:`~openstack.baremetal.v1.node.Node`. :returns: The results of node creation. :rtype: :class:`~openstack.baremetal.v1.node.Node`. """ return self._create(_node.Node, **attrs) def find_node(self, name_or_id, ignore_missing=True): """Find a single node. :param str name_or_id: The name or ID of a node. :param bool ignore_missing: When set to ``False``, an exception of :class:`~openstack.exceptions.NotFoundException` will be raised when the node does not exist. When set to `True``, None will be returned when attempting to find a nonexistent node. :returns: One :class:`~openstack.baremetal.v1.node.Node` object or None. """ return self._find( _node.Node, name_or_id, ignore_missing=ignore_missing ) def get_node(self, node, fields=None): """Get a specific node. :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param fields: Limit the resource fields to fetch. :returns: One :class:`~openstack.baremetal.v1.node.Node` :raises: :class:`~openstack.exceptions.NotFoundException` when no node matching the name or ID could be found. """ return self._get_with_fields(_node.Node, node, fields=fields) def get_node_inventory(self, node): """Get a specific node's hardware inventory. :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :returns: The node inventory :raises: :class:`~openstack.exceptions.NotFoundException` when no inventory could be found. """ res = self._get_resource(_node.Node, node) return res.get_node_inventory(self, node) def update_node(self, node, retry_on_conflict=True, **attrs): """Update a node. :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param bool retry_on_conflict: Whether to retry HTTP CONFLICT error. Most of the time it can be retried, since it is caused by the node being locked. However, when setting ``instance_id``, this is a normal code and should not be retried. :param dict attrs: The attributes to update on the node represented by the ``node`` parameter. :returns: The updated node. :rtype: :class:`~openstack.baremetal.v1.node.Node` """ res = self._get_resource(_node.Node, node, **attrs) return res.commit(self, retry_on_conflict=retry_on_conflict) def patch_node( self, node, patch, reset_interfaces=None, retry_on_conflict=True ): """Apply a JSON patch to the node. :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param patch: JSON patch to apply. :param bool reset_interfaces: whether to reset the node hardware interfaces to their defaults. This works only when changing drivers. Added in API microversion 1.45. :param bool retry_on_conflict: Whether to retry HTTP CONFLICT error. Most of the time it can be retried, since it is caused by the node being locked. However, when setting ``instance_id``, this is a normal code and should not be retried. See `Update Node `_ for details. :returns: The updated node. :rtype: :class:`~openstack.baremetal.v1.node.Node` """ res = self._get_resource(_node.Node, node) return res.patch( self, patch, retry_on_conflict=retry_on_conflict, reset_interfaces=reset_interfaces, ) def set_node_provision_state( self, node, target, config_drive=None, clean_steps=None, rescue_password=None, wait=False, timeout=None, deploy_steps=None, ): """Run an action modifying node's provision state. This call is asynchronous, it will return success as soon as the Bare Metal service acknowledges the request. :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param target: Provisioning action, e.g. ``active``, ``provide``. See the Bare Metal service documentation for available actions. :param config_drive: Config drive to pass to the node, only valid for ``active` and ``rebuild`` targets. You can use functions from :mod:`openstack.baremetal.configdrive` to build it. :param clean_steps: Clean steps to execute, only valid for ``clean`` target. :param rescue_password: Password for the rescue operation, only valid for ``rescue`` target. :param wait: Whether to wait for the node to get into the expected state. The expected state is determined from a combination of the current provision state and ``target``. :param timeout: If ``wait`` is set to ``True``, specifies how much (in seconds) to wait for the expected state to be reached. The value of ``None`` (the default) means no client-side timeout. :param deploy_steps: Deploy steps to execute, only valid for ``active`` and ``rebuild`` target. :returns: The updated :class:`~openstack.baremetal.v1.node.Node` :raises: ValueError if ``config_drive``, ``clean_steps``, ``deploy_steps`` or ``rescue_password`` are provided with an invalid ``target``. """ res = self._get_resource(_node.Node, node) return res.set_provision_state( self, target, config_drive=config_drive, clean_steps=clean_steps, rescue_password=rescue_password, wait=wait, timeout=timeout, deploy_steps=deploy_steps, ) def get_node_boot_device(self, node): """Get node boot device :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :return: The node boot device """ res = self._get_resource(_node.Node, node) return res.get_boot_device(self) def set_node_boot_device(self, node, boot_device, persistent=False): """Set node boot device :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param boot_device: Boot device to assign to the node. :param persistent: If the boot device change is maintained after node reboot :return: The updated :class:`~openstack.baremetal.v1.node.Node` """ res = self._get_resource(_node.Node, node) return res.set_boot_device(self, boot_device, persistent=persistent) def get_node_supported_boot_devices(self, node): """Get supported boot devices for node :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :return: The node boot device """ res = self._get_resource(_node.Node, node) return res.get_supported_boot_devices(self) def set_node_boot_mode(self, node, target): """Make a request to change node's boot mode :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param target: Boot mode to set for node, one of either 'uefi'/'bios'. """ res = self._get_resource(_node.Node, node) return res.set_boot_mode(self, target) def set_node_secure_boot(self, node, target): """Make a request to change node's secure boot state :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param target: Boolean indicating secure boot state to set. True/False corresponding to 'on'/'off' respectively. """ res = self._get_resource(_node.Node, node) return res.set_secure_boot(self, target) def inject_nmi_to_node(self, node): """Inject NMI to node. Injects a non-maskable interrupt (NMI) message to the node. This is used when response time is critical, such as during non-recoverable hardware errors. In addition, virsh inject-nmi is useful for triggering a crashdump in Windows guests. :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :return: None """ res = self._get_resource(_node.Node, node) res.inject_nmi(self) def wait_for_nodes_provision_state( self, nodes, expected_state, timeout=None, abort_on_failed_state=True, fail=True, ): """Wait for the nodes to reach the expected state. :param nodes: List of nodes - name, ID or :class:`~openstack.baremetal.v1.node.Node` instance. :param expected_state: The expected provisioning state to reach. :param timeout: If ``wait`` is set to ``True``, specifies how much (in seconds) to wait for the expected state to be reached. The value of ``None`` (the default) means no client-side timeout. :param abort_on_failed_state: If ``True`` (the default), abort waiting if any node reaches a failure state which does not match the expected one. Note that the failure state for ``enroll`` -> ``manageable`` transition is ``enroll`` again. :param fail: If set to ``False`` this call will not raise on timeouts and provisioning failures. :return: If `fail` is ``True`` (the default), the list of :class:`~openstack.baremetal.v1.node.Node` instances that reached the requested state. If `fail` is ``False``, a :class:`~openstack.baremetal.v1.node.WaitResult` named tuple. :raises: :class:`~openstack.exceptions.ResourceFailure` if a node reaches an error state and ``abort_on_failed_state`` is ``True``. :raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout. """ log_nodes = ', '.join( n.id if isinstance(n, _node.Node) else n for n in nodes ) finished = [] failed = [] remaining = nodes try: for count in utils.iterate_timeout( timeout, "Timeout waiting for nodes %(nodes)s to reach " "target state '%(state)s'" % {'nodes': log_nodes, 'state': expected_state}, ): nodes = [self.get_node(n) for n in remaining] remaining = [] for n in nodes: try: if n._check_state_reached( self, expected_state, abort_on_failed_state ): finished.append(n) else: remaining.append(n) except exceptions.ResourceFailure: if fail: raise else: failed.append(n) if not remaining: if fail: return finished else: return _node.WaitResult(finished, failed, []) self.log.debug( 'Still waiting for nodes %(nodes)s to reach state ' '"%(target)s"', { 'nodes': ', '.join(n.id for n in remaining), 'target': expected_state, }, ) except exceptions.ResourceTimeout: if fail: raise else: return _node.WaitResult(finished, failed, remaining) def set_node_power_state(self, node, target, wait=False, timeout=None): """Run an action modifying node's power state. This call is asynchronous, it will return success as soon as the Bare Metal service acknowledges the request. :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param target: Target power state, one of :class:`~openstack.baremetal.v1.node.PowerAction` or a string. :param wait: Whether to wait for the node to get into the expected state. :param timeout: If ``wait`` is set to ``True``, specifies how much (in seconds) to wait for the expected state to be reached. The value of ``None`` (the default) means no client-side timeout. """ self._get_resource(_node.Node, node).set_power_state( self, target, wait=wait, timeout=timeout ) def wait_for_node_power_state(self, node, expected_state, timeout=None): """Wait for the node to reach the power state. :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param timeout: How much (in seconds) to wait for the target state to be reached. The value of ``None`` (the default) means no timeout. :returns: The updated :class:`~openstack.baremetal.v1.node.Node` """ res = self._get_resource(_node.Node, node) return res.wait_for_power_state(self, expected_state, timeout=timeout) def wait_for_node_reservation(self, node, timeout=None): """Wait for a lock on the node to be released. Bare metal nodes in ironic have a reservation lock that is used to represent that a conductor has locked the node while performing some sort of action, such as changing configuration as a result of a machine state change. This lock can occur during power syncronization, and prevents updates to objects attached to the node, such as ports. Note that nothing prevents a conductor from acquiring the lock again after this call returns, so it should be treated as best effort. Returns immediately if there is no reservation on the node. :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param timeout: How much (in seconds) to wait for the lock to be released. The value of ``None`` (the default) means no timeout. :returns: The updated :class:`~openstack.baremetal.v1.node.Node` """ res = self._get_resource(_node.Node, node) return res.wait_for_reservation(self, timeout=timeout) def validate_node(self, node, required=('boot', 'deploy', 'power')): """Validate required information on a node. :param node: The value can be either the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param required: List of interfaces that are required to pass validation. The default value is the list of minimum required interfaces for provisioning. :return: dict mapping interface names to :class:`~openstack.baremetal.v1.node.ValidationResult` objects. :raises: :exc:`~openstack.exceptions.ValidationException` if validation fails for a required interface. """ res = self._get_resource(_node.Node, node) return res.validate(self, required=required) def set_node_maintenance(self, node, reason=None): """Enable maintenance mode on the node. :param node: The value can be either the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param reason: Optional reason for maintenance. :return: This :class:`Node` instance. """ res = self._get_resource(_node.Node, node) return res.set_maintenance(self, reason) def unset_node_maintenance(self, node): """Disable maintenance mode on the node. :param node: The value can be either the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :return: This :class:`Node` instance. """ res = self._get_resource(_node.Node, node) return res.unset_maintenance(self) def delete_node(self, node, ignore_missing=True): """Delete a node. :param node: The value can be either the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param bool ignore_missing: When set to ``False``, an exception :class:`~openstack.exceptions.NotFoundException` will be raised when the node could not be found. When set to ``True``, no exception will be raised when attempting to delete a non-existent node. :returns: The instance of the node which was deleted. :rtype: :class:`~openstack.baremetal.v1.node.Node`. """ return self._delete(_node.Node, node, ignore_missing=ignore_missing) def ports(self, details=False, **query): """Retrieve a generator of ports. :param details: A boolean indicating whether the detailed information for every port should be returned. :param dict query: Optional query parameters to be sent to restrict the ports returned. Available parameters include: * ``address``: Only return ports with the specified physical hardware address, typically a MAC address. * ``driver``: Only return those with the specified ``driver``. * ``fields``: A list containing one or more fields to be returned in the response. This may lead to some performance gain because other fields of the resource are not refreshed. * ``limit``: Requests at most the specified number of ports be returned from the query. * ``marker``: Specifies the ID of the last-seen port. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen port from the response as the ``marker`` value in a subsequent limited request. * ``node``:only return the ones associated with this specific node (name or UUID), or an empty set if not found. * ``node_id``:only return the ones associated with this specific node UUID, or an empty set if not found. * ``portgroup``: only return the ports associated with this specific Portgroup (name or UUID), or an empty set if not found. Added in API microversion 1.24. * ``sort_dir``: Sorts the response by the requested sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending). Default is ``asc``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. * ``sort_key``: Sorts the response by the this attribute value. Default is ``id``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. :returns: A generator of port instances. """ return _port.Port.list(self, details=details, **query) def create_port(self, **attrs): """Create a new port from attributes. :param dict attrs: Keyword arguments that will be used to create a :class:`~openstack.baremetal.v1.port.Port`. :returns: The results of port creation. :rtype: :class:`~openstack.baremetal.v1.port.Port`. """ return self._create(_port.Port, **attrs) def find_port(self, name_or_id, ignore_missing=True): """Find a single port. :param str name_or_id: The ID of a port. :param bool ignore_missing: When set to ``False``, an exception of :class:`~openstack.exceptions.NotFoundException` will be raised when the port does not exist. When set to `True``, None will be returned when attempting to find a nonexistent port. :returns: One :class:`~openstack.baremetal.v1.port.Port` object or None. """ return self._find( _port.Port, name_or_id, ignore_missing=ignore_missing ) def get_port(self, port, fields=None): """Get a specific port. :param port: The value can be the ID of a port or a :class:`~openstack.baremetal.v1.port.Port` instance. :param fields: Limit the resource fields to fetch. :returns: One :class:`~openstack.baremetal.v1.port.Port` :raises: :class:`~openstack.exceptions.NotFoundException` when no port matching the name or ID could be found. """ return self._get_with_fields(_port.Port, port, fields=fields) def update_port(self, port, **attrs): """Update a port. :param port: Either the ID of a port or an instance of :class:`~openstack.baremetal.v1.port.Port`. :param dict attrs: The attributes to update on the port represented by the ``port`` parameter. :returns: The updated port. :rtype: :class:`~openstack.baremetal.v1.port.Port` """ return self._update(_port.Port, port, **attrs) def patch_port(self, port, patch): """Apply a JSON patch to the port. :param port: The value can be the ID of a port or a :class:`~openstack.baremetal.v1.port.Port` instance. :param patch: JSON patch to apply. :returns: The updated port. :rtype: :class:`~openstack.baremetal.v1.port.Port` """ return self._get_resource(_port.Port, port).patch(self, patch) def delete_port(self, port, ignore_missing=True): """Delete a port. :param port: The value can be either the ID of a port or a :class:`~openstack.baremetal.v1.port.Port` instance. :param bool ignore_missing: When set to ``False``, an exception :class:`~openstack.exceptions.NotFoundException` will be raised when the port could not be found. When set to ``True``, no exception will be raised when attempting to delete a non-existent port. :returns: The instance of the port which was deleted. :rtype: :class:`~openstack.baremetal.v1.port.Port`. """ return self._delete(_port.Port, port, ignore_missing=ignore_missing) def port_groups(self, details=False, **query): """Retrieve a generator of port groups. :param details: A boolean indicating whether the detailed information for every port group should be returned. :param dict query: Optional query parameters to be sent to restrict the port groups returned. Available parameters include: * ``address``: Only return portgroups with the specified physical hardware address, typically a MAC address. * ``fields``: A list containing one or more fields to be returned in the response. This may lead to some performance gain because other fields of the resource are not refreshed. * ``limit``: Requests at most the specified number of portgroups returned from the query. * ``marker``: Specifies the ID of the last-seen portgroup. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen portgroup from the response as the ``marker`` value in a subsequent limited request. * ``node``:only return the ones associated with this specific node (name or UUID), or an empty set if not found. * ``sort_dir``: Sorts the response by the requested sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending). Default is ``asc``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. * ``sort_key``: Sorts the response by the this attribute value. Default is ``id``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. :returns: A generator of port group instances. """ return _portgroup.PortGroup.list(self, details=details, **query) def create_port_group(self, **attrs): """Create a new portgroup from attributes. :param dict attrs: Keyword arguments that will be used to create a :class:`~openstack.baremetal.v1.port_group.PortGroup`. :returns: The results of portgroup creation. :rtype: :class:`~openstack.baremetal.v1.port_group.PortGroup`. """ return self._create(_portgroup.PortGroup, **attrs) def find_port_group(self, name_or_id, ignore_missing=True): """Find a single port group. :param str name_or_id: The name or ID of a portgroup. :param bool ignore_missing: When set to ``False``, an exception of :class:`~openstack.exceptions.NotFoundException` will be raised when the port group does not exist. When set to `True``, None will be returned when attempting to find a nonexistent port group. :returns: One :class:`~openstack.baremetal.v1.port_group.PortGroup` object or None. """ return self._find( _portgroup.PortGroup, name_or_id, ignore_missing=ignore_missing ) def get_port_group(self, port_group, fields=None): """Get a specific port group. :param port_group: The value can be the name or ID of a chassis or a :class:`~openstack.baremetal.v1.port_group.PortGroup` instance. :param fields: Limit the resource fields to fetch. :returns: One :class:`~openstack.baremetal.v1.port_group.PortGroup` :raises: :class:`~openstack.exceptions.NotFoundException` when no port group matching the name or ID could be found. """ return self._get_with_fields( _portgroup.PortGroup, port_group, fields=fields ) def update_port_group(self, port_group, **attrs): """Update a port group. :param port_group: Either the name or the ID of a port group or an instance of :class:`~openstack.baremetal.v1.port_group.PortGroup`. :param dict attrs: The attributes to update on the port group represented by the ``port_group`` parameter. :returns: The updated port group. :rtype: :class:`~openstack.baremetal.v1.port_group.PortGroup` """ return self._update(_portgroup.PortGroup, port_group, **attrs) def patch_port_group(self, port_group, patch): """Apply a JSON patch to the port_group. :param port_group: The value can be the ID of a port group or a :class:`~openstack.baremetal.v1.port_group.PortGroup` instance. :param patch: JSON patch to apply. :returns: The updated port group. :rtype: :class:`~openstack.baremetal.v1.port_group.PortGroup` """ res = self._get_resource(_portgroup.PortGroup, port_group) return res.patch(self, patch) def delete_port_group(self, port_group, ignore_missing=True): """Delete a port group. :param port_group: The value can be either the name or ID of a port group or a :class:`~openstack.baremetal.v1.port_group.PortGroup` instance. :param bool ignore_missing: When set to ``False``, an exception :class:`~openstack.exceptions.NotFoundException` will be raised when the port group could not be found. When set to ``True``, no exception will be raised when attempting to delete a non-existent port group. :returns: The instance of the port group which was deleted. :rtype: :class:`~openstack.baremetal.v1.port_group.PortGroup`. """ return self._delete( _portgroup.PortGroup, port_group, ignore_missing=ignore_missing ) def attach_vif_to_node( self, node: ty.Union[_node.Node, str], vif_id: str, retry_on_conflict: bool = True, *, port_id: ty.Optional[str] = None, port_group_id: ty.Optional[str] = None, ) -> None: """Attach a VIF to the node. The exact form of the VIF ID depends on the network interface used by the node. In the most common case it is a Network service port (NOT a Bare Metal port) ID. A VIF can only be attached to one node at a time. :param node: The value can be either the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param vif_id: Backend-specific VIF ID. :param retry_on_conflict: Whether to retry HTTP CONFLICT errors. This can happen when either the VIF is already used on a node or the node is locked. Since the latter happens more often, the default value is True. :param port_id: The UUID of the port to attach the VIF to. Only one of port_id or port_group_id can be provided. :param port_group_id: The UUID of the portgroup to attach to. Only one of port_group_id or port_id can be provided. :return: None :raises: :exc:`~openstack.exceptions.NotSupported` if the server does not support the VIF API. :raises: :exc:`~openstack.exceptions.InvalidRequest` if both port_id and port_group_id are provided. """ res = self._get_resource(_node.Node, node) res.attach_vif( self, vif_id=vif_id, retry_on_conflict=retry_on_conflict, port_id=port_id, port_group_id=port_group_id, ) def detach_vif_from_node(self, node, vif_id, ignore_missing=True): """Detach a VIF from the node. The exact form of the VIF ID depends on the network interface used by the node. In the most common case it is a Network service port (NOT a Bare Metal port) ID. :param node: The value can be either the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param string vif_id: Backend-specific VIF ID. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the VIF does not exist. Otherwise, ``False`` is returned. :return: ``True`` if the VIF was detached, otherwise ``False``. :raises: :exc:`~openstack.exceptions.NotSupported` if the server does not support the VIF API. """ res = self._get_resource(_node.Node, node) return res.detach_vif(self, vif_id, ignore_missing=ignore_missing) def list_node_vifs(self, node): """List IDs of VIFs attached to the node. The exact form of the VIF ID depends on the network interface used by the node. In the most common case it is a Network service port (NOT a Bare Metal port) ID. :param node: The value can be either the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :return: List of VIF IDs as strings. :raises: :exc:`~openstack.exceptions.NotSupported` if the server does not support the VIF API. """ res = self._get_resource(_node.Node, node) return res.list_vifs(self) def allocations(self, **query): """Retrieve a generator of allocations. :param dict query: Optional query parameters to be sent to restrict the allocation to be returned. Available parameters include: * ``fields``: A list containing one or more fields to be returned in the response. This may lead to some performance gain because other fields of the resource are not refreshed. * ``limit``: Requests at most the specified number of items be returned from the query. * ``marker``: Specifies the ID of the last-seen allocation. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen allocation from the response as the ``marker`` value in a subsequent limited request. * ``sort_dir``: Sorts the response by the requested sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending). Default is ``asc``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. * ``sort_key``: Sorts the response by the this attribute value. Default is ``id``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. :returns: A generator of allocation instances. """ return _allocation.Allocation.list(self, **query) def create_allocation(self, **attrs): """Create a new allocation from attributes. :param dict attrs: Keyword arguments that will be used to create a :class:`~openstack.baremetal.v1.allocation.Allocation`. :returns: The results of allocation creation. :rtype: :class:`~openstack.baremetal.v1.allocation.Allocation`. """ return self._create(_allocation.Allocation, **attrs) def get_allocation(self, allocation, fields=None): """Get a specific allocation. :param allocation: The value can be the name or ID of an allocation or a :class:`~openstack.baremetal.v1.allocation.Allocation` instance. :param fields: Limit the resource fields to fetch. :returns: One :class:`~openstack.baremetal.v1.allocation.Allocation` :raises: :class:`~openstack.exceptions.NotFoundException` when no allocation matching the name or ID could be found. """ return self._get_with_fields( _allocation.Allocation, allocation, fields=fields ) def update_allocation(self, allocation, **attrs): """Update an allocation. :param allocation: The value can be the name or ID of an allocation or a :class:`~openstack.baremetal.v1.allocation.Allocation` instance. :param dict attrs: The attributes to update on the allocation represented by the ``allocation`` parameter. :returns: The updated allocation. :rtype: :class:`~openstack.baremetal.v1.allocation.Allocation` """ return self._update(_allocation.Allocation, allocation, **attrs) def patch_allocation(self, allocation, patch): """Apply a JSON patch to the allocation. :param allocation: The value can be the name or ID of an allocation or a :class:`~openstack.baremetal.v1.allocation.Allocation` instance. :param patch: JSON patch to apply. :returns: The updated allocation. :rtype: :class:`~openstack.baremetal.v1.allocation.Allocation` """ return self._get_resource(_allocation.Allocation, allocation).patch( self, patch ) def delete_allocation(self, allocation, ignore_missing=True): """Delete an allocation. :param allocation: The value can be the name or ID of an allocation or a :class:`~openstack.baremetal.v1.allocation.Allocation` instance. :param bool ignore_missing: When set to ``False``, an exception :class:`~openstack.exceptions.NotFoundException` will be raised when the allocation could not be found. When set to ``True``, no exception will be raised when attempting to delete a non-existent allocation. :returns: The instance of the allocation which was deleted. :rtype: :class:`~openstack.baremetal.v1.allocation.Allocation`. """ return self._delete( _allocation.Allocation, allocation, ignore_missing=ignore_missing ) def wait_for_allocation( self, allocation, timeout=None, ignore_error=False ): """Wait for the allocation to become active. :param allocation: The value can be the name or ID of an allocation or a :class:`~openstack.baremetal.v1.allocation.Allocation` instance. :param timeout: How much (in seconds) to wait for the allocation. The value of ``None`` (the default) means no client-side timeout. :param ignore_error: If ``True``, this call will raise an exception if the allocation reaches the ``error`` state. Otherwise the error state is considered successful and the call returns. :returns: The instance of the allocation. :rtype: :class:`~openstack.baremetal.v1.allocation.Allocation`. :raises: :class:`~openstack.exceptions.ResourceFailure` if allocation fails and ``ignore_error`` is ``False``. :raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout. """ res = self._get_resource(_allocation.Allocation, allocation) return res.wait(self, timeout=timeout, ignore_error=ignore_error) def add_node_trait(self, node, trait): """Add a trait to a node. :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param trait: trait to remove from the node. :returns: The updated node """ res = self._get_resource(_node.Node, node) return res.add_trait(self, trait) def remove_node_trait(self, node, trait, ignore_missing=True): """Remove a trait from a node. :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param trait: trait to remove from the node. :param bool ignore_missing: When set to ``False``, an exception :class:`~openstack.exceptions.NotFoundException` will be raised when the trait could not be found. When set to ``True``, no exception will be raised when attempting to delete a non-existent trait. :returns: The updated :class:`~openstack.baremetal.v1.node.Node` """ res = self._get_resource(_node.Node, node) return res.remove_trait(self, trait, ignore_missing=ignore_missing) def call_node_vendor_passthru(self, node, verb, method, body=None): """Calls vendor_passthru for a node. :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param verb: The HTTP verb, one of GET, SET, POST, DELETE. :param method: The method to call using vendor_passthru. :param body: The JSON body in the HTTP call. :returns: The raw response from the method. """ res = self._get_resource(_node.Node, node) return res.call_vendor_passthru(self, verb, method, body) def list_node_vendor_passthru(self, node): """Lists vendor_passthru for a node. :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :returns: A list of vendor_passthru methods for the node. """ res = self._get_resource(_node.Node, node) return res.list_vendor_passthru(self) def get_node_console(self, node): """Get the console for a node. :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :returns: Connection information for the console. """ res = self._get_resource(_node.Node, node) return res.get_node_console(self) def enable_node_console(self, node): """Enable the console for a node. :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :returns: None """ res = self._get_resource(_node.Node, node) return res.set_console_mode(self, True) def disable_node_console(self, node): """Disable the console for a node. :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :returns: None """ res = self._get_resource(_node.Node, node) return res.set_console_mode(self, False) def set_node_traits(self, node, traits): """Set traits for a node. Removes any existing traits and adds the traits passed in to this method. :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param traits: list of traits to add to the node. :returns: The updated :class:`~openstack.baremetal.v1.node.Node` """ res = self._get_resource(_node.Node, node) return res.set_traits(self, traits) def list_node_firmware(self, node): """Lists firmware components for a node. :param node: The value can be the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :returns: A list of the node's firmware components. """ res = self._get_resource(_node.Node, node) return res.list_firmware(self) def volume_connectors(self, details=False, **query): """Retrieve a generator of volume_connector. :param details: A boolean indicating whether the detailed information for every volume_connector should be returned. :param dict query: Optional query parameters to be sent to restrict the volume_connectors returned. Available parameters include: * ``fields``: A list containing one or more fields to be returned in the response. This may lead to some performance gain because other fields of the resource are not refreshed. * ``limit``: Requests at most the specified number of volume_connector be returned from the query. * ``marker``: Specifies the ID of the last-seen volume_connector. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen volume_connector from the response as the ``marker`` value in subsequent limited request. * ``node``:only return the ones associated with this specific node (name or UUID), or an empty set if not found. * ``sort_dir``:Sorts the response by the requested sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending). Default is ``asc``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. * ``sort_key``: Sorts the response by the this attribute value. Default is ``id``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. :returns: A generator of volume_connector instances. """ if details: query['detail'] = True return _volumeconnector.VolumeConnector.list(self, **query) def create_volume_connector(self, **attrs): """Create a new volume_connector from attributes. :param dict attrs: Keyword arguments that will be used to create a :class:`~openstack.baremetal.v1.volume_connector.VolumeConnector`. :returns: The results of volume_connector creation. :rtype: :class:`~openstack.baremetal.v1.volume_connector.VolumeConnector`. """ return self._create(_volumeconnector.VolumeConnector, **attrs) def find_volume_connector(self, vc_id, ignore_missing=True): """Find a single volume connector. :param str vc_id: The ID of a volume connector. :param bool ignore_missing: When set to ``False``, an exception of :class:`~openstack.exceptions.NotFoundException` will be raised when the volume connector does not exist. When set to `True``, None will be returned when attempting to find a nonexistent volume connector. :returns: One :class:`~openstack.baremetal.v1.volumeconnector.VolumeConnector` object or None. """ return self._find( _volumeconnector.VolumeConnector, vc_id, ignore_missing=ignore_missing, ) def get_volume_connector(self, volume_connector, fields=None): """Get a specific volume_connector. :param volume_connector: The value can be the ID of a volume_connector or a :class:`~openstack.baremetal.v1.volume_connector.VolumeConnector` instance. :param fields: Limit the resource fields to fetch.` :returns: One :class: `~openstack.baremetal.v1.volume_connector.VolumeConnector` :raises: :class:`~openstack.exceptions.NotFoundException` when no volume_connector matching the name or ID could be found.` """ return self._get_with_fields( _volumeconnector.VolumeConnector, volume_connector, fields=fields ) def update_volume_connector(self, volume_connector, **attrs): """Update a volume_connector. :param volume_connector: Either the ID of a volume_connector or an instance of :class:`~openstack.baremetal.v1.volume_connector.VolumeConnector`. :param dict attrs: The attributes to update on the volume_connector represented by the ``volume_connector`` parameter. :returns: The updated volume_connector. :rtype: :class:`~openstack.baremetal.v1.volume_connector.VolumeConnector` """ return self._update( _volumeconnector.VolumeConnector, volume_connector, **attrs ) def patch_volume_connector(self, volume_connector, patch): """Apply a JSON patch to the volume_connector. :param volume_connector: The value can be the ID of a volume_connector or a :class:`~openstack.baremetal.v1.volume_connector.VolumeConnector` instance. :param patch: JSON patch to apply. :returns: The updated volume_connector. :rtype: :class:`~openstack.baremetal.v1.volume_connector.VolumeConnector.` """ return self._get_resource( _volumeconnector.VolumeConnector, volume_connector ).patch(self, patch) def delete_volume_connector(self, volume_connector, ignore_missing=True): """Delete an volume_connector. :param volume_connector: The value can be either the ID of a volume_connector.VolumeConnector or a :class:`~openstack.baremetal.v1.volume_connector.VolumeConnector` instance. :param bool ignore_missing: When set to ``False``, an exception :class:`~openstack.exceptions.NotFoundException` will be raised when the volume_connector could not be found. When set to ``True``, no exception will be raised when attempting to delete a non-existent volume_connector. :returns: The instance of the volume_connector which was deleted. :rtype: :class:`~openstack.baremetal.v1.volume_connector.VolumeConnector`. """ return self._delete( _volumeconnector.VolumeConnector, volume_connector, ignore_missing=ignore_missing, ) def volume_targets(self, details=False, **query): """Retrieve a generator of volume_target. :param details: A boolean indicating whether the detailed information for every volume_target should be returned. :param dict query: Optional query parameters to be sent to restrict the volume_targets returned. Available parameters include: * ``fields``: A list containing one or more fields to be returned in the response. This may lead to some performance gain because other fields of the resource are not refreshed. * ``limit``: Requests at most the specified number of volume_connector be returned from the query. * ``marker``: Specifies the ID of the last-seen volume_target. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen volume_target from the response as the ``marker`` value in subsequent limited request. * ``node``:only return the ones associated with this specific node (name or UUID), or an empty set if not found. * ``sort_dir``:Sorts the response by the requested sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending). Default is ``asc``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. * ``sort_key``: Sorts the response by the this attribute value. Default is ``id``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. :returns: A generator of volume_target instances. """ if details: query['detail'] = True return _volumetarget.VolumeTarget.list(self, **query) def create_volume_target(self, **attrs): """Create a new volume_target from attributes. :param dict attrs: Keyword arguments that will be used to create a :class:`~openstack.baremetal.v1.volume_target.VolumeTarget`. :returns: The results of volume_target creation. :rtype: :class:`~openstack.baremetal.v1.volume_target.VolumeTarget`. """ return self._create(_volumetarget.VolumeTarget, **attrs) def find_volume_target(self, vt_id, ignore_missing=True): """Find a single volume target. :param str vt_id: The ID of a volume target. :param bool ignore_missing: When set to ``False``, an exception of :class:`~openstack.exceptions.NotFoundException` will be raised when the volume connector does not exist. When set to `True``, None will be returned when attempting to find a nonexistent volume target. :returns: One :class:`~openstack.baremetal.v1.volumetarget.VolumeTarget` object or None. """ return self._find( _volumetarget.VolumeTarget, vt_id, ignore_missing=ignore_missing ) def get_volume_target(self, volume_target, fields=None): """Get a specific volume_target. :param volume_target: The value can be the ID of a volume_target or a :class:`~openstack.baremetal.v1.volume_target.VolumeTarget` instance. :param fields: Limit the resource fields to fetch.` :returns: One :class:`~openstack.baremetal.v1.volume_target.VolumeTarget` :raises: :class:`~openstack.exceptions.NotFoundException` when no volume_target matching the name or ID could be found.` """ return self._get_with_fields( _volumetarget.VolumeTarget, volume_target, fields=fields ) def update_volume_target(self, volume_target, **attrs): """Update a volume_target. :param volume_target: Either the ID of a volume_target or an instance of :class:`~openstack.baremetal.v1.volume_target.VolumeTarget`. :param dict attrs: The attributes to update on the volume_target represented by the ``volume_target`` parameter. :returns: The updated volume_target. :rtype: :class:`~openstack.baremetal.v1.volume_target.VolumeTarget` """ return self._update(_volumetarget.VolumeTarget, volume_target, **attrs) def patch_volume_target(self, volume_target, patch): """Apply a JSON patch to the volume_target. :param volume_target: The value can be the ID of a volume_target or a :class:`~openstack.baremetal.v1.volume_target.VolumeTarget` instance. :param patch: JSON patch to apply. :returns: The updated volume_target. :rtype: :class:`~openstack.baremetal.v1.volume_target.VolumeTarget.` """ return self._get_resource( _volumetarget.VolumeTarget, volume_target ).patch(self, patch) def delete_volume_target(self, volume_target, ignore_missing=True): """Delete an volume_target. :param volume_target: The value can be either the ID of a volume_target.VolumeTarget or a :class:`~openstack.baremetal.v1.volume_target.VolumeTarget` instance. :param bool ignore_missing: When set to ``False``, an exception :class:`~openstack.exceptions.NotFoundException` will be raised when the volume_target could not be found. When set to ``True``, no exception will be raised when attempting to delete a non-existent volume_target. :returns: The instance of the volume_target which was deleted. :rtype: :class:`~openstack.baremetal.v1.volume_target.VolumeTarget`. """ return self._delete( _volumetarget.VolumeTarget, volume_target, ignore_missing=ignore_missing, ) def deploy_templates(self, details=False, **query): """Retrieve a generator of deploy_templates. :param details: A boolean indicating whether the detailed information for every deploy_templates should be returned. :param dict query: Optional query parameters to be sent to restrict the deploy_templates to be returned. :returns: A generator of Deploy templates instances. """ if details: query['detail'] = True return _deploytemplates.DeployTemplate.list(self, **query) def create_deploy_template(self, **attrs): """Create a new deploy_template from attributes. :param dict attrs: Keyword arguments that will be used to create a :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate`. :returns: The results of deploy_template creation. :rtype: :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate`. """ return self._create(_deploytemplates.DeployTemplate, **attrs) def update_deploy_template(self, deploy_template, **attrs): """Update a deploy_template. :param deploy_template: Either the ID of a deploy_template, or an instance of :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate`. :param dict attrs: The attributes to update on the deploy_template represented by the ``deploy_template`` parameter. :returns: The updated deploy_template. :rtype: :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate` """ return self._update( _deploytemplates.DeployTemplate, deploy_template, **attrs ) def delete_deploy_template(self, deploy_template, ignore_missing=True): """Delete a deploy_template. :param deploy_template:The value can be either the ID of a deploy_template or a :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate` instance. :param bool ignore_missing: When set to ``False``, an exception:class:`~openstack.exceptions.NotFoundException` will be raised when the deploy_template could not be found. When set to ``True``, no exception will be raised when attempting to delete a non-existent deploy_template. :returns: The instance of the deploy_template which was deleted. :rtype: :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate`. """ return self._delete( _deploytemplates.DeployTemplate, deploy_template, ignore_missing=ignore_missing, ) def get_deploy_template(self, deploy_template, fields=None): """Get a specific deployment template. :param deploy_template: The value can be the name or ID of a deployment template :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate` instance. :param fields: Limit the resource fields to fetch. :returns: One :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate` :raises: :class:`~openstack.exceptions.NotFoundException` when no deployment template matching the name or ID could be found. """ return self._get_with_fields( _deploytemplates.DeployTemplate, deploy_template, fields=fields ) def patch_deploy_template(self, deploy_template, patch): """Apply a JSON patch to the deploy_templates. :param deploy_templates: The value can be the ID of a deploy_template or a :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate` instance. :param patch: JSON patch to apply. :returns: The updated deploy_template. :rtype: :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate` """ return self._get_resource( _deploytemplates.DeployTemplate, deploy_template ).patch(self, patch) def conductors(self, details=False, **query): """Retrieve a generator of conductors. :param bool details: A boolean indicating whether the detailed information for every conductor should be returned. :returns: A generator of conductor instances. """ if details: query['details'] = True return _conductor.Conductor.list(self, **query) def get_conductor(self, conductor, fields=None): """Get a specific conductor. :param conductor: The value can be the name of a conductor or a :class:`~openstack.baremetal.v1.conductor.Conductor` instance. :returns: One :class:`~openstack.baremetal.v1.conductor.Conductor` :raises: :class:`~openstack.exceptions.NotFoundException` when no conductor matching the name could be found. """ return self._get_with_fields( _conductor.Conductor, conductor, fields=fields ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal/v1/allocation.py0000664000175000017500000001044400000000000023202 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal.v1 import _common from openstack import exceptions from openstack import resource from openstack import utils class Allocation(_common.Resource): resources_key = 'allocations' base_path = '/allocations' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_patch = True commit_method = 'PATCH' commit_jsonpatch = True _query_mapping = resource.QueryParameters( 'node', 'resource_class', 'state', 'owner', fields={'type': _common.fields_type}, ) # Allocation update is available since 1.57 # Backfilling allocations is available since 1.58 # owner attribute is available since 1.60 _max_microversion = '1.60' #: The candidate nodes for this allocation. candidate_nodes = resource.Body('candidate_nodes', type=list) #: Timestamp at which the allocation was created. created_at = resource.Body('created_at') #: A set of one or more arbitrary metadata key and value pairs. extra = resource.Body('extra', type=dict) #: The UUID for the allocation. id = resource.Body('uuid', alternate_id=True) #: The last error for the allocation. last_error = resource.Body("last_error") #: A list of relative links, including the self and bookmark links. links = resource.Body('links', type=list) #: The name of the allocation. name = resource.Body('name') #: The node UUID or name to create the allocation against, #: bypassing the normal allocation process. node = resource.Body('node') #: UUID of the node this allocation belongs to. node_id = resource.Body('node_uuid') #: The tenant who owns the object owner = resource.Body('owner') #: The requested resource class. resource_class = resource.Body('resource_class') #: The state of the allocation. state = resource.Body('state') #: The requested traits. traits = resource.Body('traits', type=list) #: Timestamp at which the allocation was last updated. updated_at = resource.Body('updated_at') def wait(self, session, timeout=None, ignore_error=False): """Wait for the allocation to become active. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param timeout: How much (in seconds) to wait for the allocation. The value of ``None`` (the default) means no client-side timeout. :param ignore_error: If ``True``, this call will raise an exception if the allocation reaches the ``error`` state. Otherwise the error state is considered successful and the call returns. :return: This :class:`Allocation` instance. :raises: :class:`~openstack.exceptions.ResourceFailure` if allocation fails and ``ignore_error`` is ``False``. :raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout. """ if self.state == 'active': return self for count in utils.iterate_timeout( timeout, "Timeout waiting for the allocation %s" % self.id ): self.fetch(session) if self.state == 'error' and not ignore_error: raise exceptions.ResourceFailure( "Allocation %(allocation)s failed: %(error)s" % {'allocation': self.id, 'error': self.last_error} ) elif self.state != 'allocating': return self session.log.debug( 'Still waiting for the allocation %(allocation)s ' 'to become active, the current state is %(state)s', {'allocation': self.id, 'state': self.state}, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal/v1/chassis.py0000664000175000017500000000345400000000000022515 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal.v1 import _common from openstack import resource class Chassis(_common.Resource): resources_key = 'chassis' base_path = '/chassis' # Specifying fields became possible in 1.8. _max_microversion = '1.8' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_patch = True commit_method = 'PATCH' commit_jsonpatch = True _query_mapping = resource.QueryParameters( fields={'type': _common.fields_type}, ) #: Timestamp at which the chassis was created. created_at = resource.Body('created_at') #: A descriptive text about the service description = resource.Body('description') #: A set of one or more arbitrary metadata key and value pairs. extra = resource.Body('extra') #: The UUID for the chassis id = resource.Body('uuid', alternate_id=True) #: A list of relative links, including the self and bookmark links. links = resource.Body('links', type=list) #: Links to the collection of nodes contained in the chassis nodes = resource.Body('nodes', type=list) #: Timestamp at which the chassis was last updated. updated_at = resource.Body('updated_at') ChassisDetail = Chassis ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal/v1/conductor.py0000664000175000017500000000252200000000000023053 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal.v1 import _common from openstack import resource class Conductor(_common.Resource): resources_key = 'conductors' base_path = '/conductors' # capabilities allow_create = False allow_fetch = True allow_commit = False allow_delete = False allow_list = True allow_patch = False _query_mapping = resource.QueryParameters( 'detail', fields={'type': _common.fields_type}, ) _max_microversion = '1.49' created_at = resource.Body('created_at') updated_at = resource.Body('updated_at') hostname = resource.Body('hostname') conductor_group = resource.Body('conductor_group') alive = resource.Body('alive', type=bool) links = resource.Body('links', type=list) drivers = resource.Body('drivers', type=list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal/v1/deploy_templates.py0000664000175000017500000000341600000000000024430 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal.v1 import _common from openstack import resource class DeployTemplate(_common.Resource): resources_key = 'deploy_templates' base_path = '/deploy_templates' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_patch = True commit_method = 'PATCH' commit_jsonpatch = True _query_mapping = resource.QueryParameters( 'detail', fields={'type': _common.fields_type}, ) # Deploy Templates is available since 1.55 _max_microversion = '1.55' name = resource.Body('name') #: Timestamp at which the deploy_template was created. created_at = resource.Body('created_at') #: A set of one or more arbitrary metadata key and value pairs. extra = resource.Body('extra') #: A list of relative links. Includes the self and bookmark links. links = resource.Body('links', type=list) #: A set of physical information of the deploy_template. steps = resource.Body('steps', type=list) #: Timestamp at which the deploy_template was last updated. updated_at = resource.Body('updated_at') #: The UUID of the resource. id = resource.Body('uuid', alternate_id=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal/v1/driver.py0000664000175000017500000002106000000000000022344 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty from openstack.baremetal.v1 import _common from openstack import exceptions from openstack import resource from openstack import utils class Driver(resource.Resource): resources_key = 'drivers' base_path = '/drivers' # capabilities allow_create = False allow_fetch = True allow_commit = False allow_delete = False allow_list = True _query_mapping = resource.QueryParameters(details='detail') # The BIOS interface fields introduced in 1.40 (Rocky). # The firmware interface fields introduced in 1.86. _max_microversion = '1.86' #: A list of active hosts that support this driver. hosts = resource.Body('hosts', type=list) #: A list of relative links, including the self and bookmark links. links = resource.Body('links', type=list) #: The name of the driver name = resource.Body('name', alternate_id=True) #: A list of links to driver properties. properties = resource.Body('properties', type=list) # Hardware interface properties grouped together for convenience, # available with detail=True. #: Default BIOS interface implementation. #: Introduced in API microversion 1.40. default_bios_interface = resource.Body("default_bios_interface") #: Default boot interface implementation. #: Introduced in API microversion 1.30. default_boot_interface = resource.Body("default_boot_interface") #: Default console interface implementation. #: Introduced in API microversion 1.30. default_console_interface = resource.Body("default_console_interface") #: Default deploy interface implementation. #: Introduced in API microversion 1.30. default_deploy_interface = resource.Body("default_deploy_interface") #: Default firmware interface implementation. #: Introduced in API microversion 1.86. default_firmware_interface = resource.Body("default_firmware_interface") #: Default inspect interface implementation. #: Introduced in API microversion 1.30. default_inspect_interface = resource.Body("default_inspect_interface") #: Default management interface implementation. #: Introduced in API microversion 1.30. default_management_interface = resource.Body( "default_management_interface" ) #: Default network interface implementation. #: Introduced in API microversion 1.30. default_network_interface = resource.Body("default_network_interface") #: Default port interface implementation. #: Introduced in API microversion 1.30. default_power_interface = resource.Body("default_power_interface") #: Default RAID interface implementation. #: Introduced in API microversion 1.30. default_raid_interface = resource.Body("default_raid_interface") #: Default rescue interface implementation. #: Introduced in API microversion 1.38. default_rescue_interface = resource.Body("default_rescue_interface") #: Default storage interface implementation. #: Introduced in API microversion 1.33. default_storage_interface = resource.Body("default_storage_interface") #: Default vendor interface implementation. #: Introduced in API microversion 1.30. default_vendor_interface = resource.Body("default_vendor_interface") #: Enabled BIOS interface implementations. #: Introduced in API microversion 1.40. enabled_bios_interfaces = resource.Body("enabled_bios_interfaces") #: Enabled boot interface implementations. #: Introduced in API microversion 1.30. enabled_boot_interfaces = resource.Body("enabled_boot_interfaces") #: Enabled console interface implementations. #: Introduced in API microversion 1.30. enabled_console_interfaces = resource.Body("enabled_console_interfaces") #: Enabled deploy interface implementations. #: Introduced in API microversion 1.30. enabled_deploy_interfaces = resource.Body("enabled_deploy_interfaces") #: Enabled firmware interface implementations. #: Introduced in API microversion 1.86. enabled_firmware_interfaces = resource.Body("enabled_firmware_interfaces") #: Enabled inspect interface implementations. #: Introduced in API microversion 1.30. enabled_inspect_interfaces = resource.Body("enabled_inspect_interfaces") #: Enabled management interface implementations. #: Introduced in API microversion 1.30. enabled_management_interfaces = resource.Body( "enabled_management_interfaces" ) #: Enabled network interface implementations. #: Introduced in API microversion 1.30. enabled_network_interfaces = resource.Body("enabled_network_interfaces") #: Enabled port interface implementations. #: Introduced in API microversion 1.30. enabled_power_interfaces = resource.Body("enabled_power_interfaces") #: Enabled RAID interface implementations. #: Introduced in API microversion 1.30. enabled_raid_interfaces = resource.Body("enabled_raid_interfaces") #: Enabled rescue interface implementations. #: Introduced in API microversion 1.38. enabled_rescue_interfaces = resource.Body("enabled_rescue_interfaces") #: Enabled storage interface implementations. #: Introduced in API microversion 1.33. enabled_storage_interfaces = resource.Body("enabled_storage_interfaces") #: Enabled vendor interface implementations. #: Introduced in API microversion 1.30. enabled_vendor_interfaces = resource.Body("enabled_vendor_interfaces") def list_vendor_passthru(self, session): """Fetch vendor specific methods exposed by driver :param session: The session to use for making this request. :returns: A dict of the available vendor passthru methods for driver. Method names keys and corresponding usages in dict form as values Usage dict properties: * ``async``: bool # Is passthru function invoked asynchronously * ``attach``: bool # Is return value attached to response object * ``description``: str # Description of what the method does * ``http_methods``: list # List of HTTP methods supported """ session = self._get_session(session) request = self._prepare_request() request.url = utils.urljoin(request.url, 'vendor_passthru', 'methods') response = session.get(request.url, headers=request.headers) msg = "Failed to list list vendor_passthru methods for {driver_name}" exceptions.raise_from_response( response, error_message=msg.format(driver_name=self.name) ) return response.json() def call_vendor_passthru( self, session, verb: str, method: str, body: ty.Optional[dict] = None ): """Call a vendor specific passthru method Contents of body are params passed to the hardware driver function. Validation happens there. Missing parameters, or excess parameters will cause the request to be rejected :param session: The session to use for making this request. :param method: Vendor passthru method name. :param verb: One of GET, POST, PUT, DELETE, depending on the driver and method. :param body: passed to the vendor function as json body. :raises: :exc:`ValueError` if :data:`verb` is not one of GET, POST, PUT, DELETE :returns: response of method call. """ if verb.upper() not in ['GET', 'PUT', 'POST', 'DELETE']: raise ValueError(f'Invalid verb: {verb}') session = self._get_session(session) request = self._prepare_request() request.url = utils.urljoin( request.url, f'vendor_passthru?method={method}' ) call = getattr(session, verb.lower()) response = call( request.url, json=body, headers=request.headers, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) msg = "Failed call to method {method} on driver {driver_name}".format( method=method, driver_name=self.name ) exceptions.raise_from_response(response, error_message=msg) return response ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal/v1/node.py0000664000175000017500000016425000000000000022007 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import enum import typing as ty from openstack.baremetal.v1 import _common from openstack import exceptions from openstack import resource from openstack import utils class ValidationResult: """Result of a single interface validation. :ivar result: Result of a validation, ``True`` for success, ``False`` for failure, ``None`` for unsupported interface. :ivar reason: If ``result`` is ``False`` or ``None``, explanation of the result. """ def __init__(self, result, reason): self.result = result self.reason = reason class PowerAction(enum.Enum): """Mapping from an action to a target power state.""" POWER_ON = 'power on' """Power on the node.""" POWER_OFF = 'power off' """Power off the node (using hard power off).""" REBOOT = 'rebooting' """Reboot the node (using hard power off).""" SOFT_POWER_OFF = 'soft power off' """Power off the node using soft power off.""" SOFT_REBOOT = 'soft rebooting' """Reboot the node using soft power off.""" class WaitResult( collections.namedtuple('WaitResult', ['success', 'failure', 'timeout']) ): """A named tuple representing a result of waiting for several nodes. Each component is a list of :class:`~openstack.baremetal.v1.node.Node` objects: :ivar ~.success: a list of :class:`~openstack.baremetal.v1.node.Node` objects that reached the state. :ivar ~.timeout: a list of :class:`~openstack.baremetal.v1.node.Node` objects that reached timeout. :ivar ~.failure: a list of :class:`~openstack.baremetal.v1.node.Node` objects that hit a failure. """ __slots__ = () class Node(_common.Resource): resources_key = 'nodes' base_path = '/nodes' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_patch = True commit_method = 'PATCH' commit_jsonpatch = True _query_mapping = resource.QueryParameters( 'associated', 'conductor_group', 'driver', 'fault', 'include_children', 'parent_node', 'provision_state', 'resource_class', 'shard', fields={'type': _common.fields_type}, instance_id='instance_uuid', is_maintenance='maintenance', ) # Ability to have a firmware_interface on a node. _max_microversion = '1.87' # Properties #: The UUID of the allocation associated with this node. Added in API #: microversion 1.52. allocation_id = resource.Body("allocation_uuid") #: A string or UUID of the tenant who owns the baremetal node. Added in API #: microversion 1.50. owner = resource.Body("owner") #: The current boot mode state (uefi/bios). Added in API microversion 1.75. boot_mode = resource.Body("boot_mode") #: The UUID of the chassis associated wit this node. Can be empty or None. chassis_id = resource.Body("chassis_uuid") #: The current clean step. clean_step = resource.Body("clean_step") #: Hostname of the conductor currently handling this node. Added in API # microversion 1.49. conductor = resource.Body("conductor") #: Conductor group this node is managed by. Added in API microversion 1.46. conductor_group = resource.Body("conductor_group") #: Timestamp at which the node was last updated. created_at = resource.Body("created_at") #: The current deploy step. Added in API microversion 1.44. deploy_step = resource.Body("deploy_step") #: The description of the node. Added in API microversion 1.51. description = resource.Body("description") #: The name of the driver. driver = resource.Body("driver") #: All the metadata required by the driver to manage this node. List of #: fields varies between drivers, and can be retrieved from the #: :class:`openstack.baremetal.v1.driver.Driver` resource. driver_info = resource.Body("driver_info", type=dict) #: Internal metadata set and stored by node's driver. This is read-only. driver_internal_info = resource.Body("driver_internal_info", type=dict) #: A set of one or more arbitrary metadata key and value pairs. extra = resource.Body("extra") #: Fault type that caused the node to enter maintenance mode. #: Introduced in API microversion 1.42. fault = resource.Body("fault") #: The UUID of the node resource. id = resource.Body("uuid", alternate_id=True) #: Information used to customize the deployed image, e.g. size of root #: partition, config drive in the form of base64 encoded string and other #: metadata. instance_info = resource.Body("instance_info") #: UUID of the nova instance associated with this node. instance_id = resource.Body("instance_uuid") #: Override enabling of automated cleaning. Added in API microversion 1.47. is_automated_clean_enabled = resource.Body("automated_clean", type=bool) #: Whether console access is enabled on this node. is_console_enabled = resource.Body("console_enabled", type=bool) #: Whether node is currently in "maintenance mode". Nodes put into #: maintenance mode are removed from the available resource pool. is_maintenance = resource.Body("maintenance", type=bool) # Whether the node is protected from undeploying. Added in API microversion # 1.48. is_protected = resource.Body("protected", type=bool) #: Whether the node is marked for retirement. Added in API microversion #: 1.61. is_retired = resource.Body("retired", type=bool) #: Whether the node is currently booted with secure boot turned on. #: Added in API microversion 1.75. is_secure_boot = resource.Body("secure_boot", type=bool) #: Any error from the most recent transaction that started but failed to #: finish. last_error = resource.Body("last_error") #: Field indicating if the node is leased to a specific project. #: Added in API version 1.65 lessee = resource.Body("lessee") #: A list of relative links, including self and bookmark links. links = resource.Body("links", type=list) #: user settable description of the reason why the node was placed into #: maintenance mode. maintenance_reason = resource.Body("maintenance_reason") #: Human readable identifier for the node. May be undefined. Certain words #: are reserved. Added in API microversion 1.5 name = resource.Body("name") #: The node which serves as the parent_node for this node. #: Added in API version 1.83 parent_node = resource.Body("parent_node") #: Links to the collection of ports on this node. ports = resource.Body("ports", type=list) #: Links to the collection of portgroups on this node. Available since #: API microversion 1.24. port_groups = resource.Body("portgroups", type=list) #: The current power state. Usually "power on" or "power off", but may be #: "None" if service is unable to determine the power state. power_state = resource.Body("power_state") #: Physical characteristics of the node. Content populated by the service #: during inspection. properties = resource.Body("properties", type=dict) # The reason why this node is protected. Added in API microversion 1.48. protected_reason = resource.Body("protected_reason") #: The current provisioning state of the node. provision_state = resource.Body("provision_state") #: The reason why the node is marked for retirement. Added in API #: microversion 1.61. retired_reason = resource.Body("retired_reason") #: The current RAID configuration of the node. raid_config = resource.Body("raid_config") #: The name of an service conductor host which is holding a lock on this #: node, if a lock is held. reservation = resource.Body("reservation") #: A string to be used by external schedulers to identify this node as a #: unit of a specific type of resource. Added in API microversion 1.21. resource_class = resource.Body("resource_class") #: A string represents the current service step being executed upon. #: Added in API microversion 1.87. service_step = resource.Body("service_step") #: A string indicating the shard this node belongs to. Added in API #: microversion 1,82. shard = resource.Body("shard") #: Links to the collection of states. states = resource.Body("states", type=list) #: The requested state if a provisioning action has been requested. For #: example, ``AVAILABLE``, ``DEPLOYING``, ``DEPLOYWAIT``, ``DEPLOYING``, #: ``ACTIVE`` etc. target_provision_state = resource.Body("target_provision_state") #: The requested state during a state transition. target_power_state = resource.Body("target_power_state") #: The requested RAID configuration of the node which will be applied when #: the node next transitions through the CLEANING state. target_raid_config = resource.Body("target_raid_config") #: Traits of the node. Introduced in API microversion 1.37. traits = resource.Body("traits", type=list) #: Timestamp at which the node was last updated. updated_at = resource.Body("updated_at") # Hardware interfaces grouped together for convenience. #: BIOS interface to use when setting BIOS properties of the node. #: Introduced in API microversion 1.40. bios_interface = resource.Body("bios_interface") #: Boot interface to use when configuring boot of the node. #: Introduced in API microversion 1.31. boot_interface = resource.Body("boot_interface") #: Console interface to use when working with serial console. #: Introduced in API microversion 1.31. console_interface = resource.Body("console_interface") #: Deploy interface to use when deploying the node. #: Introduced in API microversion 1.31. deploy_interface = resource.Body("deploy_interface") #: Firmware interface to be used when managing the node. #: Introduced in API microversion 1.86 firmware_interface = resource.Body("firmware_interface") #: Inspect interface to use when inspecting the node. #: Introduced in API microversion 1.31. inspect_interface = resource.Body("inspect_interface") #: Management interface to use for management actions on the node. #: Introduced in API microversion 1.31. management_interface = resource.Body("management_interface") #: Network interface provider to use when plumbing the network connections #: for this node. Introduced in API microversion 1.20. network_interface = resource.Body("network_interface") #: Power interface to use for power actions on the node. #: Introduced in API microversion 1.31. power_interface = resource.Body("power_interface") #: RAID interface to use for configuring RAID on the node. #: Introduced in API microversion 1.31. raid_interface = resource.Body("raid_interface") #: Rescue interface to use for rescuing of the node. #: Introduced in API microversion 1.38. rescue_interface = resource.Body("rescue_interface") #: Storage interface to use when attaching remote storage. #: Introduced in API microversion 1.33. storage_interface = resource.Body("storage_interface") #: Vendor interface to use for vendor-specific actions on the node. #: Introduced in API microversion 1.31. vendor_interface = resource.Body("vendor_interface") def _consume_body_attrs(self, attrs): if 'provision_state' in attrs and attrs['provision_state'] is None: # API version 1.1 uses None instead of "available". Make it # consistent. attrs['provision_state'] = 'available' return super()._consume_body_attrs(attrs) def create(self, session, *args, **kwargs): """Create a remote resource based on this instance. The overridden version is capable of handling the populated ``provision_state`` field of one of three values: ``enroll``, ``manageable`` or ``available``. If not provided, the server default is used (``enroll`` in newer versions of Ironic). This call does not cause a node to go through automated cleaning. If you need it, use ``provision_state=manageable`` followed by a call to :meth:`set_provision_state`. Note that Bare Metal API 1.4 is required for ``manageable`` and 1.11 is required for ``enroll``. .. warning:: Using ``provision_state=available`` is only possible with API versions 1.1 to 1.10 and thus is incompatible with setting any fields that appeared after 1.11. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :return: This :class:`Resource` instance. :raises: ValueError if the Node's ``provision_state`` is not one of ``None``, ``enroll``, ``manageable`` or ``available``. :raises: :exc:`~openstack.exceptions.NotSupported` if the ``provision_state`` cannot be reached with any API version supported by the server. """ expected_provision_state = self.provision_state session = self._get_session(session) if expected_provision_state is not None: # Verify that the requested provision state is reachable with # the API version we are going to use. try: microversion = _common.STATE_VERSIONS[expected_provision_state] except KeyError: raise ValueError( "Node's provision_state must be one of %s for creation, " "got %s" % ( ', '.join(_common.STATE_VERSIONS), expected_provision_state, ) ) else: error_message = ( "Cannot create a node with initial provision " "state %s" % expected_provision_state ) # Nodes cannot be created as available using new API versions maximum = ( '1.10' if expected_provision_state == 'available' else None ) microversion = self._assert_microversion_for( session, 'create', microversion, maximum=maximum, error_message=error_message, ) else: microversion = None # use the base negotiation # Ironic cannot set provision_state itself, so marking it as unchanged self._clean_body_attrs({'provision_state'}) super().create(session, *args, microversion=microversion, **kwargs) if ( expected_provision_state == 'manageable' and self.provision_state != 'manageable' ): # Manageable is not reachable directly self.set_provision_state(session, 'manage', wait=True) return self def commit(self, session, *args, **kwargs): """Commit the state of the instance to the remote resource. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :return: This :class:`Node` instance. """ # These fields have to be set through separate API. if ( 'maintenance_reason' in self._body.dirty or 'maintenance' in self._body.dirty ): if not self.is_maintenance and self.maintenance_reason: if 'maintenance' in self._body.dirty: self.maintenance_reason = None else: raise ValueError( 'Maintenance reason cannot be set when ' 'maintenance is False' ) if self.is_maintenance: self._do_maintenance_action( session, 'put', {'reason': self.maintenance_reason} ) else: # This corresponds to setting maintenance=False and # maintenance_reason=None in the same request. self._do_maintenance_action(session, 'delete') self._clean_body_attrs({'maintenance', 'maintenance_reason'}) if not self.requires_commit: # Other fields are not updated, re-fetch the node to reflect # the new status. return self.fetch(session) return super().commit(session, *args, **kwargs) def set_provision_state( self, session, target, config_drive=None, clean_steps=None, rescue_password=None, wait=False, timeout=None, deploy_steps=None, service_steps=None, ): """Run an action modifying this node's provision state. This call is asynchronous, it will return success as soon as the Bare Metal service acknowledges the request. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param target: Provisioning action, e.g. ``active``, ``provide``. See the Bare Metal service documentation for available actions. :param config_drive: Config drive to pass to the node, only valid for ``active` and ``rebuild`` targets. You can use functions from :mod:`openstack.baremetal.configdrive` to build it. :param clean_steps: Clean steps to execute, only valid for ``clean`` target. :param rescue_password: Password for the rescue operation, only valid for ``rescue`` target. :param wait: Whether to wait for the target state to be reached. :param timeout: Timeout (in seconds) to wait for the target state to be reached. If ``None``, wait without timeout. :param deploy_steps: Deploy steps to execute, only valid for ``active`` and ``rebuild`` target. :param service_steps: Service steps to execute, only valid for ``service`` target. :return: This :class:`Node` instance. :raises: ValueError if ``config_drive``, ``clean_steps``, ``deploy_steps`` or ``rescue_password`` are provided with an invalid ``target``. :raises: :class:`~openstack.exceptions.ResourceFailure` if the node reaches an error state while waiting for the state. :raises: :class:`~openstack.exceptions.ResourceTimeout` if timeout is reached while waiting for the state. """ session = self._get_session(session) version = None if target in _common.PROVISIONING_VERSIONS: version = '1.%d' % _common.PROVISIONING_VERSIONS[target] if config_drive: # Some config drive actions require a higher version. if isinstance(config_drive, dict): version = _common.CONFIG_DRIVE_DICT_VERSION elif target == 'rebuild': version = _common.CONFIG_DRIVE_REBUILD_VERSION if deploy_steps: version = _common.DEPLOY_STEPS_VERSION version = self._assert_microversion_for(session, 'commit', version) body = {'target': target} if config_drive: if target not in ('active', 'rebuild'): raise ValueError( 'Config drive can only be provided with ' '"active" and "rebuild" targets' ) if isinstance(config_drive, bytes): try: config_drive = config_drive.decode('utf-8') except UnicodeError: raise ValueError( 'Config drive must be a dictionary or a base64 ' 'encoded string' ) # Not a typo - ironic accepts "configdrive" (without underscore) body['configdrive'] = config_drive if clean_steps is not None: if target != 'clean': raise ValueError( 'Clean steps can only be provided with "clean" target' ) body['clean_steps'] = clean_steps if deploy_steps is not None: if target not in ('active', 'rebuild'): raise ValueError( 'Deploy steps can only be provided with ' '"deploy" and "rebuild" target' ) body['deploy_steps'] = deploy_steps if service_steps is not None: if target != 'service': raise ValueError( 'Service steps can only be provided with ' '"service" target' ) body['service_steps'] = service_steps if rescue_password is not None: if target != 'rescue': raise ValueError( 'Rescue password can only be provided with ' '"rescue" target' ) body['rescue_password'] = rescue_password if wait: try: expected_state = _common.EXPECTED_STATES[target] except KeyError: raise ValueError( 'For target %s the expected state is not ' 'known, cannot wait for it' % target ) request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'states', 'provision') response = session.put( request.url, json=body, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) msg = ( "Failed to set provision state for bare metal node {node} " "to {target}".format(node=self.id, target=target) ) exceptions.raise_from_response(response, error_message=msg) if wait: return self.wait_for_provision_state( session, expected_state, timeout=timeout ) else: return self.fetch(session) def wait_for_power_state(self, session, expected_state, timeout=None): """Wait for the node to reach the expected power state. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param expected_state: The expected power state to reach. :param timeout: If ``wait`` is set to ``True``, specifies how much (in seconds) to wait for the expected state to be reached. The value of ``None`` (the default) means no client-side timeout. :return: This :class:`Node` instance. :raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout. """ for count in utils.iterate_timeout( timeout, "Timeout waiting for node %(node)s to reach " "power state '%(state)s'" % {'node': self.id, 'state': expected_state}, ): self.fetch(session) if self.power_state == expected_state: return self session.log.debug( 'Still waiting for node %(node)s to reach power state ' '"%(target)s", the current state is "%(state)s"', { 'node': self.id, 'target': expected_state, 'state': self.power_state, }, ) def wait_for_provision_state( self, session, expected_state, timeout=None, abort_on_failed_state=True ): """Wait for the node to reach the expected state. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param expected_state: The expected provisioning state to reach. :param timeout: If ``wait`` is set to ``True``, specifies how much (in seconds) to wait for the expected state to be reached. The value of ``None`` (the default) means no client-side timeout. :param abort_on_failed_state: If ``True`` (the default), abort waiting if the node reaches a failure state which does not match the expected one. Note that the failure state for ``enroll`` -> ``manageable`` transition is ``enroll`` again. :return: This :class:`Node` instance. :raises: :class:`~openstack.exceptions.ResourceFailure` if the node reaches an error state and ``abort_on_failed_state`` is ``True``. :raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout. """ for count in utils.iterate_timeout( timeout, "Timeout waiting for node %(node)s to reach " "target state '%(state)s'" % {'node': self.id, 'state': expected_state}, ): self.fetch(session) if self._check_state_reached( session, expected_state, abort_on_failed_state ): return self session.log.debug( 'Still waiting for node %(node)s to reach state ' '"%(target)s", the current state is "%(state)s"', { 'node': self.id, 'target': expected_state, 'state': self.provision_state, }, ) def wait_for_reservation(self, session, timeout=None): """Wait for a lock on the node to be released. Bare metal nodes in ironic have a reservation lock that is used to represent that a conductor has locked the node while performing some sort of action, such as changing configuration as a result of a machine state change. This lock can occur during power syncronization, and prevents updates to objects attached to the node, such as ports. Note that nothing prevents a conductor from acquiring the lock again after this call returns, so it should be treated as best effort. Returns immediately if there is no reservation on the node. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param timeout: How much (in seconds) to wait for the lock to be released. The value of ``None`` (the default) means no timeout. :return: This :class:`Node` instance. """ if self.reservation is None: return self for count in utils.iterate_timeout( timeout, "Timeout waiting for the lock to be released on node %s" % self.id, ): self.fetch(session) if self.reservation is None: return self session.log.debug( 'Still waiting for the lock to be released on node ' '%(node)s, currently locked by conductor %(host)s', {'node': self.id, 'host': self.reservation}, ) def _check_state_reached( self, session, expected_state, abort_on_failed_state=True ): """Wait for the node to reach the expected state. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param expected_state: The expected provisioning state to reach. :param abort_on_failed_state: If ``True`` (the default), abort waiting if the node reaches a failure state which does not match the expected one. Note that the failure state for ``enroll`` -> ``manageable`` transition is ``enroll`` again. :return: ``True`` if the target state is reached :raises: :class:`~openstack.exceptions.ResourceFailure` if the node reaches an error state and ``abort_on_failed_state`` is ``True``. """ # NOTE(dtantsur): microversion 1.2 changed None to available if self.provision_state == expected_state or ( expected_state == 'available' and self.provision_state is None ): return True elif not abort_on_failed_state: return False if ( self.provision_state.endswith(' failed') or self.provision_state == 'error' ): raise exceptions.ResourceFailure( "Node %(node)s reached failure state \"%(state)s\"; " "the last error is %(error)s" % { 'node': self.id, 'state': self.provision_state, 'error': self.last_error, } ) # Special case: a failure state for "manage" transition can be # "enroll" elif ( expected_state == 'manageable' and self.provision_state == 'enroll' and self.last_error ): raise exceptions.ResourceFailure( "Node %(node)s could not reach state manageable: " "failed to verify management credentials; " "the last error is %(error)s" % {'node': self.id, 'error': self.last_error} ) def inject_nmi(self, session): """Inject NMI. :param session: The session to use for making this request. :return: None """ session = self._get_session(session) version = self._assert_microversion_for( session, 'commit', _common.INJECT_NMI_VERSION, ) request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'management', 'inject_nmi') response = session.put( request.url, json={}, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) msg = f"Failed to inject NMI to node {self.id}" exceptions.raise_from_response(response, error_message=msg) def set_power_state(self, session, target, wait=False, timeout=None): """Run an action modifying this node's power state. This call is asynchronous, it will return success as soon as the Bare Metal service acknowledges the request. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param target: Target power state, as a :class:`PowerAction` or a string. :param wait: Whether to wait for the expected power state to be reached. :param timeout: Timeout (in seconds) to wait for the target state to be reached. If ``None``, wait without timeout. """ if isinstance(target, PowerAction): target = target.value if wait: try: expected = _common.EXPECTED_POWER_STATES[target] except KeyError: raise ValueError( "Cannot use target power state %s with wait, " "the expected state is not known" % target ) session = self._get_session(session) if target.startswith("soft "): version = '1.27' else: version = None version = self._assert_microversion_for(session, 'commit', version) # TODO(dtantsur): server timeout support body = {'target': target} request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'states', 'power') response = session.put( request.url, json=body, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) msg = ( "Failed to set power state for bare metal node {node} " "to {target}".format(node=self.id, target=target) ) exceptions.raise_from_response(response, error_message=msg) if wait: self.wait_for_power_state(session, expected, timeout=timeout) def attach_vif( self, session, vif_id: str, retry_on_conflict: bool = True, *, port_id: ty.Optional[str] = None, port_group_id: ty.Optional[str] = None, ) -> None: """Attach a VIF to the node. The exact form of the VIF ID depends on the network interface used by the node. In the most common case it is a Network service port (NOT a Bare Metal port) ID. A VIF can only be attached to one node at a time. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param vif_id: Backend-specific VIF ID. :param retry_on_conflict: Whether to retry HTTP CONFLICT errors. This can happen when either the VIF is already used on a node or the node is locked. Since the latter happens more often, the default value is True. :param port_id: The UUID of the port to attach the VIF to. Only one of port_id or port_group_id can be provided. :param port_group_id: The UUID of the portgroup to attach to. Only one of port_group_id or port_id can be provided. :return: None :raises: :exc:`~openstack.exceptions.NotSupported` if the server does not support the VIF API. :raises: :exc:`~openstack.exceptions.InvalidRequest` if both port_id and port_group_id are provided. """ if port_id and port_group_id: msg = ( 'Only one of vif_port_id and vif_portgroup_id can be provided' ) raise exceptions.InvalidRequest(msg) session = self._get_session(session) if port_id or port_group_id: required_version = _common.VIF_OPTIONAL_PARAMS_VERSION else: required_version = _common.VIF_VERSION version = self._assert_microversion_for( session, 'commit', required_version, error_message=("Cannot use VIF attachment API"), ) request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'vifs') body = {'id': vif_id} if port_id: body['port_uuid'] = port_id elif port_group_id: body['portgroup_uuid'] = port_group_id retriable_status_codes = _common.RETRIABLE_STATUS_CODES if not retry_on_conflict: retriable_status_codes = list(set(retriable_status_codes) - {409}) response = session.post( request.url, json=body, headers=request.headers, microversion=version, retriable_status_codes=retriable_status_codes, ) msg = "Failed to attach VIF {vif} to bare metal node {node}".format( node=self.id, vif=vif_id ) exceptions.raise_from_response(response, error_message=msg) def detach_vif(self, session, vif_id, ignore_missing=True): """Detach a VIF from the node. The exact form of the VIF ID depends on the network interface used by the node. In the most common case it is a Network service port (NOT a Bare Metal port) ID. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param string vif_id: Backend-specific VIF ID. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the VIF does not exist. Otherwise, ``False`` is returned. :return: ``True`` if the VIF was detached, otherwise ``False``. :raises: :exc:`~openstack.exceptions.NotSupported` if the server does not support the VIF API. """ session = self._get_session(session) version = self._assert_microversion_for( session, 'commit', _common.VIF_VERSION, error_message=("Cannot use VIF attachment API"), ) request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'vifs', vif_id) response = session.delete( request.url, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) if ignore_missing and response.status_code == 400: session.log.debug( 'VIF %(vif)s was already removed from node %(node)s', {'vif': vif_id, 'node': self.id}, ) return False msg = "Failed to detach VIF {vif} from bare metal node {node}".format( node=self.id, vif=vif_id ) exceptions.raise_from_response(response, error_message=msg) return True def list_vifs(self, session): """List IDs of VIFs attached to the node. The exact form of the VIF ID depends on the network interface used by the node. In the most common case it is a Network service port (NOT a Bare Metal port) ID. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :return: List of VIF IDs as strings. :raises: :exc:`~openstack.exceptions.NotSupported` if the server does not support the VIF API. """ session = self._get_session(session) version = self._assert_microversion_for( session, 'fetch', _common.VIF_VERSION, error_message=("Cannot use VIF attachment API"), ) request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'vifs') response = session.get( request.url, headers=request.headers, microversion=version ) msg = "Failed to list VIFs attached to bare metal node {node}".format( node=self.id ) exceptions.raise_from_response(response, error_message=msg) return [vif['id'] for vif in response.json()['vifs']] def validate(self, session, required=('boot', 'deploy', 'power')): """Validate required information on the node. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param required: List of interfaces that are required to pass validation. The default value is the list of minimum required interfaces for provisioning. :return: dict mapping interface names to :class:`ValidationResult` objects. :raises: :exc:`~openstack.exceptions.ValidationException` if validation fails for a required interface. """ session = self._get_session(session) version = self._get_microversion(session, action='fetch') request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'validate') response = session.get( request.url, headers=request.headers, microversion=version ) msg = f"Failed to validate node {self.id}" exceptions.raise_from_response(response, error_message=msg) result = response.json() if required: failed = [ '{} ({})'.format(key, value.get('reason', 'no reason')) for key, value in result.items() if key in required and not value.get('result') ] if failed: raise exceptions.ValidationException( 'Validation failed for required interfaces of node {node}:' ' {failures}'.format( node=self.id, failures=', '.join(failed) ) ) return { key: ValidationResult(value.get('result'), value.get('reason')) for key, value in result.items() } def set_maintenance(self, session, reason=None): """Enable maintenance mode on the node. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param reason: Optional reason for maintenance. :return: This :class:`Node` instance. """ self._do_maintenance_action(session, 'put', {'reason': reason}) return self.fetch(session) def unset_maintenance(self, session): """Disable maintenance mode on the node. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :return: This :class:`Node` instance. """ self._do_maintenance_action(session, 'delete') return self.fetch(session) def _do_maintenance_action(self, session, verb, body=None): session = self._get_session(session) version = self._get_microversion(session, action='commit') request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'maintenance') response = getattr(session, verb)( request.url, json=body, headers=request.headers, microversion=version, ) msg = "Failed to change maintenance mode for node {node}".format( node=self.id ) exceptions.raise_from_response(response, error_message=msg) def get_boot_device(self, session): """Get node boot device. :param session: The session to use for making this request. :returns: The HTTP response. """ session = self._get_session(session) version = self._get_microversion(session, action='fetch') request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'management', 'boot_device') response = session.get( request.url, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) msg = "Failed to get boot device for node {node}".format( node=self.id, ) exceptions.raise_from_response(response, error_message=msg) return response.json() def set_boot_device(self, session, boot_device, persistent=False): """Set node boot device :param session: The session to use for making this request. :param boot_device: Boot device to assign to the node. :param persistent: If the boot device change is maintained after node reboot :returns: ``None`` """ session = self._get_session(session) version = self._get_microversion(session, action='commit') request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'management', 'boot_device') body = {'boot_device': boot_device, 'persistent': persistent} response = session.put( request.url, json=body, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) msg = f"Failed to set boot device for node {self.id}" exceptions.raise_from_response(response, error_message=msg) def get_supported_boot_devices(self, session): """Get supported boot devices for the node. :param session: The session to use for making this request. :returns: The HTTP response. """ session = self._get_session(session) version = self._get_microversion(session, action='fetch') request = self._prepare_request(requires_id=True) request.url = utils.urljoin( request.url, 'management', 'boot_device', 'supported', ) response = session.get( request.url, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) msg = "Failed to get supported boot devices for node {node}".format( node=self.id, ) exceptions.raise_from_response(response, error_message=msg) return response.json() def set_boot_mode(self, session, target): """Make a request to change node's boot mode This call is asynchronous, it will return success as soon as the Bare Metal service acknowledges the request. :param session: The session to use for making this request. :param target: Boot mode to set for node, one of either 'uefi'/'bios'. :returns: ``None`` :raises: ValueError if ``target`` is not one of 'uefi or 'bios'. """ session = self._get_session(session) version = utils.pick_microversion( session, _common.CHANGE_BOOT_MODE_VERSION ) request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'states', 'boot_mode') if target not in ('uefi', 'bios'): raise ValueError( "Unrecognized boot mode %s." "Boot mode should be one of 'uefi' or 'bios'." % target ) body = {'target': target} response = session.put( request.url, json=body, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) msg = f"Failed to change boot mode for node {self.id}" exceptions.raise_from_response(response, error_message=msg) def set_secure_boot(self, session, target): """Make a request to change node's secure boot state This call is asynchronous, it will return success as soon as the Bare Metal service acknowledges the request. :param session: The session to use for making this request. :param bool target: Boolean indicating secure boot state to set. True/False corresponding to 'on'/'off' respectively. :returns: ``None`` :raises: ValueError if ``target`` is not boolean. """ session = self._get_session(session) version = utils.pick_microversion( session, _common.CHANGE_BOOT_MODE_VERSION ) request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'states', 'secure_boot') if not isinstance(target, bool): raise ValueError( "Invalid target %s. It should be True or False " "corresponding to secure boot state 'on' or 'off'" % target ) body = {'target': target} response = session.put( request.url, json=body, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) msg = "Failed to change secure boot state for {node}".format( node=self.id ) exceptions.raise_from_response(response, error_message=msg) def add_trait(self, session, trait): """Add a trait to the node. :param session: The session to use for making this request. :param trait: The trait to add to the node. :returns: ``None`` """ session = self._get_session(session) version = utils.pick_microversion(session, '1.37') request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'traits', trait) response = session.put( request.url, json=None, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) msg = "Failed to add trait {trait} for node {node}".format( trait=trait, node=self.id ) exceptions.raise_from_response(response, error_message=msg) self.traits = list(set(self.traits or ()) | {trait}) def remove_trait(self, session, trait, ignore_missing=True): """Remove a trait from the node. :param session: The session to use for making this request. :param trait: The trait to remove from the node. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the trait does not exist. Otherwise, ``False`` is returned. :returns bool: True on success removing the trait. False when the trait does not exist already. """ session = self._get_session(session) version = utils.pick_microversion(session, '1.37') request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'traits', trait) response = session.delete( request.url, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) if ignore_missing and response.status_code == 400: session.log.debug( 'Trait %(trait)s was already removed from node %(node)s', {'trait': trait, 'node': self.id}, ) return False msg = "Failed to remove trait {trait} from bare metal node {node}" exceptions.raise_from_response( response, error_message=msg.format(node=self.id, trait=trait), ) if self.traits: self.traits = list(set(self.traits) - {trait}) return True def set_traits(self, session, traits): """Set traits for the node. Removes any existing traits and adds the traits passed in to this method. :param session: The session to use for making this request. :param traits: list of traits to add to the node. :returns: ``None`` """ session = self._get_session(session) version = utils.pick_microversion(session, '1.37') request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'traits') body = {'traits': traits} response = session.put( request.url, json=body, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) msg = f"Failed to set traits for node {self.id}" exceptions.raise_from_response(response, error_message=msg) self.traits = traits def call_vendor_passthru(self, session, verb, method, body=None): """Call a vendor passthru method. :param session: The session to use for making this request. :param verb: The HTTP verb, one of GET, SET, POST, DELETE. :param method: The method to call using vendor_passthru. :param body: The JSON body in the HTTP call. :returns: The HTTP response. """ session = self._get_session(session) version = self._get_microversion(session, action='commit') request = self._prepare_request(requires_id=True) request.url = utils.urljoin( request.url, f'vendor_passthru?method={method}' ) call = getattr(session, verb.lower()) response = call( request.url, json=body, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) msg = ( "Failed to call vendor_passthru for node {node}, verb {verb}" " and method {method}".format( node=self.id, verb=verb, method=method ) ) exceptions.raise_from_response(response, error_message=msg) return response def list_vendor_passthru(self, session): """List vendor passthru methods for the node. :param session: The session to use for making this request. :returns: The HTTP response. """ session = self._get_session(session) version = self._get_microversion(session, action='fetch') request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'vendor_passthru/methods') response = session.get( request.url, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) msg = "Failed to list vendor_passthru methods for node {node}".format( node=self.id ) exceptions.raise_from_response(response, error_message=msg) return response.json() def get_console(self, session): """Get the node console. :param session: The session to use for making this request. :returns: The HTTP response. """ session = self._get_session(session) version = self._get_microversion(session, action='fetch') request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'states', 'console') response = session.get( request.url, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) msg = "Failed to get console for node {node}".format( node=self.id, ) exceptions.raise_from_response(response, error_message=msg) return response.json() def set_console_mode(self, session, enabled): """Set the node console mode. :param session: The session to use for making this request. :param enabled: Whether the console should be enabled or not. :return: ``None`` """ session = self._get_session(session) version = self._get_microversion(session, action='commit') request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'states', 'console') if not isinstance(enabled, bool): raise ValueError( "Invalid enabled %s. It should be True or False " "corresponding to console enabled or disabled" % enabled ) body = {'enabled': enabled} response = session.put( request.url, json=body, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) msg = "Failed to change console mode for {node}".format( node=self.id, ) exceptions.raise_from_response(response, error_message=msg) # TODO(stephenfin): Drop 'node_id' and use 'self.id' instead or convert to # a classmethod def get_node_inventory(self, session, node_id): """Get a node's inventory. :param session: The session to use for making this request. :param node_id: The ID of the node. :returns: The HTTP response. """ session = self._get_session(session) version = self._get_microversion(session, action='fetch') request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'inventory') response = session.get( request.url, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) msg = "Failed to get inventory for node {node}".format( node=node_id, ) exceptions.raise_from_response(response, error_message=msg) return response.json() def list_firmware(self, session): """List firmware components associated with the node. :param session: The session to use for making this request. :returns: The HTTP response. """ session = self._get_session(session) version = self._assert_microversion_for( session, 'fetch', _common.FIRMWARE_VERSION, error_message=("Cannot use node list firmware components API"), ) request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'firmware') response = session.get( request.url, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) msg = "Failed to list firmware components for node {node}".format( node=self.id ) exceptions.raise_from_response(response, error_message=msg) return response.json() def patch( self, session, patch=None, prepend_key=True, has_body=True, retry_on_conflict=None, base_path=None, reset_interfaces=None, ): if reset_interfaces is not None: # The id cannot be dirty for an commit self._body._dirty.discard("id") # Only try to update if we actually have anything to commit. if not patch and not self.requires_commit: return self if not self.allow_patch: raise exceptions.MethodNotSupported(self, "patch") session = self._get_session(session) microversion = self._assert_microversion_for( session, 'commit', _common.RESET_INTERFACES_VERSION ) params = [('reset_interfaces', reset_interfaces)] request = self._prepare_request( requires_id=True, prepend_key=prepend_key, base_path=base_path, patch=True, params=params, ) if patch: request.body += self._convert_patch(patch) return self._commit( session, request, 'PATCH', microversion, has_body=has_body, retry_on_conflict=retry_on_conflict, ) else: return super().patch( session, patch=patch, retry_on_conflict=retry_on_conflict ) NodeDetail = Node ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal/v1/port.py0000664000175000017500000000615300000000000022043 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal.v1 import _common from openstack import resource class Port(_common.Resource): resources_key = 'ports' base_path = '/ports' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_patch = True commit_method = 'PATCH' commit_jsonpatch = True _query_mapping = resource.QueryParameters( 'address', 'node', 'portgroup', 'shard', fields={'type': _common.fields_type}, node_id='node_uuid', ) # The physical_network field introduced in 1.34 # The is_smartnic field added in 1.53 # Query filter by shard added in 1.82 # The name field added in 1.88 _max_microversion = '1.88' #: The physical hardware address of the network port, typically the #: hardware MAC address. address = resource.Body('address') #: Timestamp at which the port was created. created_at = resource.Body('created_at') #: A set of one or more arbitrary metadata key and value pairs. extra = resource.Body('extra') #: The UUID of the port id = resource.Body('uuid', alternate_id=True) #: Internal metadata set and stored by the port. This field is read-only. #: Added in API microversion 1.18. internal_info = resource.Body('internal_info') #: Whether PXE is enabled on the port. Added in API microversion 1.19. is_pxe_enabled = resource.Body('pxe_enabled', type=bool) #: Whether the port is a Smart NIC port. Added in API microversion 1.53. is_smartnic = resource.Body('is_smartnic', type=bool) #: A list of relative links, including the self and bookmark links. links = resource.Body('links', type=list) #: The port bindig profile. If specified, must contain ``switch_id`` and #: ``port_id`` fields. ``switch_info`` field is an optional string field #: to be used to store vendor specific information. Added in API #: microversion 1.19. local_link_connection = resource.Body('local_link_connection') #: The name of the port name = resource.Body('name') #: The UUID of node this port belongs to node_id = resource.Body('node_uuid') #: The name of physical network this port is attached to. #: Added in API microversion 1.34. physical_network = resource.Body('physical_network') #: The UUID of PortGroup this port belongs to. Added in API microversion #: 1.24. port_group_id = resource.Body('portgroup_uuid') #: Timestamp at which the port was last updated. updated_at = resource.Body('updated_at') PortDetail = Port ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal/v1/port_group.py0000664000175000017500000000526100000000000023256 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal.v1 import _common from openstack import resource class PortGroup(_common.Resource): resources_key = 'portgroups' base_path = '/portgroups' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_patch = True commit_method = 'PATCH' commit_jsonpatch = True _query_mapping = resource.QueryParameters( 'node', 'address', fields={'type': _common.fields_type}, ) # The mode and properties field introduced in 1.26. _max_microversion = '1.26' #: The physical hardware address of the portgroup, typically the hardware #: MAC address. Added in API microversion 1.23. address = resource.Body('address') #: Timestamp at which the portgroup was created. created_at = resource.Body('created_at') #: A set of one or more arbitrary metadata key and value pairs. extra = resource.Body('extra', type=dict) #: The name of the portgroup name = resource.Body('name') #: The UUID for the portgroup id = resource.Body('uuid', alternate_id=True) #: Internal metadaa set and stored by the portgroup. internal_info = resource.Body('internal_info') #: Whether ports that are members of this portgroup can be used as #: standalone ports. Added in API microversion 1.23. is_standalone_ports_supported = resource.Body( 'standalone_ports_supported', type=bool ) #: A list of relative links, including the self and bookmark links. links = resource.Body('links', type=list) #: Port bonding mode. Added in API microversion 1.26. mode = resource.Body('mode') #: UUID of the node this portgroup belongs to. node_id = resource.Body('node_uuid') #: A list of links to the collection of ports belonging to this portgroup. #: Added in API microversion 1.24. ports = resource.Body('ports') #: Port group properties. Added in API microversion 1.26. properties = resource.Body('properties', type=dict) #: Timestamp at which the portgroup was last updated. updated_at = resource.Body('updated_at') PortGroupDetail = PortGroup ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal/v1/volume_connector.py0000664000175000017500000000366000000000000024440 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal.v1 import _common from openstack import resource class VolumeConnector(_common.Resource): resources_key = 'connectors' base_path = '/volume/connectors' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_patch = True commit_method = 'PATCH' commit_jsonpatch = True _query_mapping = resource.QueryParameters( 'node', 'detail', fields={'type': _common.fields_type}, ) # Volume Connectors is available since 1.32 _max_microversion = '1.32' #: The identifier of Volume connector and this field depends on the "type" # of the volume_connector connector_id = resource.Body('connector_id') #: Timestamp at which the port was created. created_at = resource.Body('created_at') #: A set of one or more arbitrary metadata key and value pairs. extra = resource.Body('extra') #: A list of relative links, including the self and bookmark links. links = resource.Body('links', type=list) #: The UUID of node this port belongs to node_id = resource.Body('node_uuid') #: The types of Volume connector type = resource.Body('type') #: Timestamp at which the port was last updated. updated_at = resource.Body('updated_at') #: The UUID of the port id = resource.Body('uuid', alternate_id=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal/v1/volume_target.py0000664000175000017500000000415300000000000023732 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal.v1 import _common from openstack import resource class VolumeTarget(_common.Resource): resources_key = 'targets' base_path = '/volume/targets' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_patch = True commit_method = 'PATCH' commit_jsonpatch = True _query_mapping = resource.QueryParameters( 'node', 'detail', fields={'type': _common.fields_type}, ) # Volume Targets is available since 1.32 _max_microversion = '1.32' #: The boot index of the Volume target. “0” indicates that this volume is # used as a boot volume. boot_index = resource.Body('boot_index') #: Timestamp at which the port was created. created_at = resource.Body('created_at') #: A set of one or more arbitrary metadata key and value pairs. extra = resource.Body('extra') #: A list of relative links. Includes the self and bookmark links. links = resource.Body('links', type=list) #: The UUID of the Node this resource belongs to. node_id = resource.Body('node_uuid') #: A set of physical information of the volume. properties = resource.Body('properties') #: Timestamp at which the port was last updated. updated_at = resource.Body('updated_at') #: The UUID of the resource. id = resource.Body('uuid', alternate_id=True) #: The identifier of the volume. volume_id = resource.Body('volume_id') #: The type of Volume target. volume_type = resource.Body('volume_type') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal/version.py0000664000175000017500000000154000000000000022211 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Version(resource.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' # Capabilities allow_list = True # Attributes links = resource.Body('links') status = resource.Body('status') updated = resource.Body('updated') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2213018 openstacksdk-4.0.0/openstack/baremetal_introspection/0000775000175000017500000000000000000000000023132 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal_introspection/__init__.py0000664000175000017500000000000000000000000025231 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal_introspection/baremetal_introspection_service.py0000664000175000017500000000150000000000000032134 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal_introspection.v1 import _proxy from openstack import service_description class BaremetalIntrospectionService(service_description.ServiceDescription): """The bare metal introspection service.""" supported_versions = { '1': _proxy.Proxy, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2213018 openstacksdk-4.0.0/openstack/baremetal_introspection/v1/0000775000175000017500000000000000000000000023460 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal_introspection/v1/__init__.py0000664000175000017500000000000000000000000025557 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal_introspection/v1/_proxy.py0000664000175000017500000002413200000000000025354 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import _log from openstack.baremetal.v1 import node as _node from openstack.baremetal_introspection.v1 import introspection as _introspect from openstack.baremetal_introspection.v1 import ( introspection_rule as _introspection_rule, ) from openstack import exceptions from openstack import proxy _logger = _log.setup_logging('openstack') class Proxy(proxy.Proxy): _resource_registry = { "introspection": _introspect.Introspection, "introspection_rule": _introspection_rule.IntrospectionRule, } def introspections(self, **query): """Retrieve a generator of introspection records. :param dict query: Optional query parameters to be sent to restrict the records to be returned. Available parameters include: * ``fields``: A list containing one or more fields to be returned in the response. This may lead to some performance gain because other fields of the resource are not refreshed. * ``limit``: Requests at most the specified number of items be returned from the query. * ``marker``: Specifies the ID of the last-seen introspection. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen introspection from the response as the ``marker`` value in a subsequent limited request. * ``sort_dir``: Sorts the response by the requested sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending). Default is ``asc``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. * ``sort_key``: Sorts the response by the this attribute value. Default is ``id``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. :returns: A generator of :class:`~.introspection.Introspection` objects """ return _introspect.Introspection.list(self, **query) def start_introspection(self, node, manage_boot=None): """Create a new introspection from attributes. :param node: The value can be either the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param bool manage_boot: Whether to manage boot parameters for the node. Defaults to the server default (which is `True`). :returns: :class:`~.introspection.Introspection` instance. """ node = self._get_resource(_node.Node, node) res = _introspect.Introspection.new( connection=self._get_connection(), id=node.id ) kwargs = {} if manage_boot is not None: kwargs['manage_boot'] = manage_boot return res.create(self, **kwargs) def get_introspection(self, introspection): """Get a specific introspection. :param introspection: The value can be the name or ID of an introspection (matching bare metal node name or ID) or an :class:`~.introspection.Introspection` instance. :returns: :class:`~.introspection.Introspection` instance. :raises: :class:`~openstack.exceptions.NotFoundException` when no introspection matching the name or ID could be found. """ return self._get(_introspect.Introspection, introspection) def get_introspection_data(self, introspection, processed=True): """Get introspection data. :param introspection: The value can be the name or ID of an introspection (matching bare metal node name or ID) or an :class:`~.introspection.Introspection` instance. :param processed: Whether to fetch the final processed data (the default) or the raw unprocessed data as received from the ramdisk. :returns: introspection data from the most recent successful run. :rtype: dict """ res = self._get_resource(_introspect.Introspection, introspection) return res.get_data(self, processed=processed) def abort_introspection(self, introspection, ignore_missing=True): """Abort an introspection. Note that the introspection is not aborted immediately, you may use `wait_for_introspection` with `ignore_error=True`. :param introspection: The value can be the name or ID of an introspection (matching bare metal node name or ID) or an :class:`~.introspection.Introspection` instance. :param bool ignore_missing: When set to ``False``, an exception :class:`~openstack.exceptions.NotFoundException` will be raised when the introspection could not be found. When set to ``True``, no exception will be raised when attempting to abort a non-existent introspection. :returns: nothing """ res = self._get_resource(_introspect.Introspection, introspection) try: res.abort(self) except exceptions.NotFoundException: if not ignore_missing: raise def wait_for_introspection( self, introspection, timeout=None, ignore_error=False, ): """Wait for the introspection to finish. :param introspection: The value can be the name or ID of an introspection (matching bare metal node name or ID) or an :class:`~.introspection.Introspection` instance. :param timeout: How much (in seconds) to wait for the introspection. The value of ``None`` (the default) means no client-side timeout. :param ignore_error: If ``True``, this call will raise an exception if the introspection reaches the ``error`` state. Otherwise the error state is considered successful and the call returns. :returns: :class:`~.introspection.Introspection` instance. :raises: :class:`~openstack.exceptions.ResourceFailure` if introspection fails and ``ignore_error`` is ``False``. :raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout. """ res = self._get_resource(_introspect.Introspection, introspection) return res.wait(self, timeout=timeout, ignore_error=ignore_error) def create_introspection_rule(self, **attrs): """Create a new introspection rules from attributes. :param dict attrs: Keyword arguments which will be used to create a :class:`~.introspection_rule.IntrospectionRule`, comprised of the properties on the IntrospectionRule class. :returns: :class:`~.introspection_rule.IntrospectionRule` instance. """ return self._create(_introspection_rule.IntrospectionRule, **attrs) def delete_introspection_rule( self, introspection_rule, ignore_missing=True, ): """Delete an introspection rule. :param introspection_rule: The value can be either the ID of an introspection rule or a :class:`~.introspection_rule.IntrospectionRule` instance. :param bool ignore_missing: When set to ``False``, an exception:class:`~openstack.exceptions.NotFoundException` will be raised when the introspection rule could not be found. When set to ``True``, no exception will be raised when attempting to delete a non-existent introspection rule. :returns: ``None`` """ self._delete( _introspection_rule.IntrospectionRule, introspection_rule, ignore_missing=ignore_missing, ) def get_introspection_rule(self, introspection_rule): """Get a specific introspection rule. :param introspection_rule: The value can be the name or ID of an introspection rule or a :class:`~.introspection_rule.IntrospectionRule` instance. :returns: :class:`~.introspection_rule.IntrospectionRule` instance. :raises: :class:`~openstack.exceptions.NotFoundException` when no introspection rule matching the name or ID could be found. """ return self._get( _introspection_rule.IntrospectionRule, introspection_rule, ) def introspection_rules(self, **query): """Retrieve a generator of introspection rules. :param dict query: Optional query parameters to be sent to restrict the records to be returned. Available parameters include: * ``uuid``: The UUID of the Ironic Inspector rule. * ``limit``: List of a logic statementd or operations in rules, that can be evaluated as True or False. * ``actions``: List of operations that will be performed if conditions of this rule are fulfilled. * ``description``: Rule human-readable description. * ``scope``: Scope of an introspection rule. If set, the rule is only applied to nodes that have matching inspection_scope property. :returns: A generator of :class:`~.introspection_rule.IntrospectionRule` objects """ return self._list(_introspection_rule.IntrospectionRule, **query) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal_introspection/v1/introspection.py0000664000175000017500000001315100000000000026733 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import _log from openstack.baremetal.v1 import _common from openstack import exceptions from openstack import resource from openstack import utils _logger = _log.setup_logging('openstack') class Introspection(resource.Resource): resources_key = 'introspection' base_path = '/introspection' # capabilities allow_create = True allow_fetch = True allow_commit = False allow_delete = True allow_list = True # created via POST with ID create_method = 'POST' create_requires_id = True create_returns_body = False #: Timestamp at which the introspection was finished. finished_at = resource.Body('finished_at') #: The last error message (if any). error = resource.Body('error') #: The UUID of the introspection (matches the node UUID). id = resource.Body('uuid', alternate_id=True) #: Whether introspection is finished. is_finished = resource.Body('finished', type=bool) #: A list of relative links, including the self and bookmark links. links = resource.Body('links', type=list) #: Timestamp at which the introspection was started. started_at = resource.Body('started_at') #: The current introspection state. state = resource.Body('state') def abort(self, session): """Abort introspection. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` """ if self.is_finished: return session = self._get_session(session) version = self._get_microversion(session, action='delete') request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'abort') response = session.post( request.url, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) msg = f"Failed to abort introspection for node {self.id}" exceptions.raise_from_response(response, error_message=msg) def get_data(self, session, processed=True): """Get introspection data. Note that the introspection data format is not stable and can vary from environment to environment. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param processed: Whether to fetch the final processed data (the default) or the raw unprocessed data as received from the ramdisk. :type processed: bool :returns: introspection data from the most recent successful run. :rtype: dict """ session = self._get_session(session) version = ( self._get_microversion(session, action='fetch') if processed else '1.17' ) request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'data') if not processed: request.url = utils.urljoin(request.url, 'unprocessed') response = session.get( request.url, headers=request.headers, microversion=version ) msg = "Failed to fetch introspection data for node {id}".format( id=self.id ) exceptions.raise_from_response(response, error_message=msg) return response.json() def wait(self, session, timeout=None, ignore_error=False): """Wait for the node to reach the expected state. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param timeout: How much (in seconds) to wait for the introspection. The value of ``None`` (the default) means no client-side timeout. :param ignore_error: If ``True``, this call will raise an exception if the introspection reaches the ``error`` state. Otherwise the error state is considered successful and the call returns. :return: This :class:`Introspection` instance. :raises: :class:`~openstack.exceptions.ResourceFailure` if introspection fails and ``ignore_error`` is ``False``. :raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout. """ if self._check_state(ignore_error): return self for count in utils.iterate_timeout( timeout, "Timeout waiting for introspection on node %s" % self.id ): self.fetch(session) if self._check_state(ignore_error): return self _logger.debug( 'Still waiting for introspection of node %(node)s, ' 'the current state is "%(state)s"', {'node': self.id, 'state': self.state}, ) def _check_state(self, ignore_error): if self.state == 'error' and not ignore_error: raise exceptions.ResourceFailure( "Introspection of node %(node)s failed: %(error)s" % {'node': self.id, 'error': self.error} ) else: return self.is_finished ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/baremetal_introspection/v1/introspection_rule.py0000664000175000017500000000303700000000000027764 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal.v1 import _common from openstack import resource class IntrospectionRule(_common.Resource): resources_key = 'rules' base_path = '/rules' # capabilities allow_create = True allow_fetch = True allow_commit = False allow_delete = True allow_list = True # created via POST with ID create_method = 'POST' create_requires_id = True #: The UUID of the resource. id = resource.Body('uuid', alternate_id=True) #: List of a logic statementd or operations in rules conditions = resource.Body('conditions', type=list) #: List of operations that will be performed if conditions of this rule #: are fulfilled. actions = resource.Body('actions', type=list) #: Rule human-readable description description = resource.Body('description') #: Scope of an introspection rule scope = resource.Body('scope') #: A list of relative links, including the self and bookmark links. links = resource.Body('links', type=list) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2213018 openstacksdk-4.0.0/openstack/block_storage/0000775000175000017500000000000000000000000021034 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/__init__.py0000664000175000017500000000000000000000000023133 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/_base_proxy.py0000664000175000017500000000400300000000000023715 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from openstack import exceptions from openstack import proxy class BaseBlockStorageProxy(proxy.Proxy, metaclass=abc.ABCMeta): def create_image( self, name, volume, allow_duplicates, container_format, disk_format, wait, timeout, ): if not disk_format: disk_format = self._connection.config.config['image_format'] if not container_format: # https://docs.openstack.org/image-guide/image-formats.html container_format = 'bare' if 'id' in volume: volume_id = volume['id'] else: volume_obj = self.get_volume(volume) if not volume_obj: raise exceptions.SDKException( "Volume {volume} given to create_image could" " not be found".format(volume=volume) ) volume_id = volume_obj['id'] data = self.post( f'/volumes/{volume_id}/action', json={ 'os-volume_upload_image': { 'force': allow_duplicates, 'image_name': name, 'container_format': container_format, 'disk_format': disk_format, } }, ) response = self._connection._get_and_munchify( 'os-volume_upload_image', data ) return self._connection.image._existing_image(id=response['image_id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/block_storage_service.py0000664000175000017500000000161200000000000025744 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v2 import _proxy as _v2_proxy from openstack.block_storage.v3 import _proxy as _v3_proxy from openstack import service_description class BlockStorageService(service_description.ServiceDescription): """The block storage service.""" supported_versions = { '3': _v3_proxy.Proxy, '2': _v2_proxy.Proxy, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2253036 openstacksdk-4.0.0/openstack/block_storage/v2/0000775000175000017500000000000000000000000021363 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v2/__init__.py0000664000175000017500000000000000000000000023462 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v2/_proxy.py0000664000175000017500000011767100000000000023272 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import warnings from openstack.block_storage import _base_proxy from openstack.block_storage.v2 import backup as _backup from openstack.block_storage.v2 import capabilities as _capabilities from openstack.block_storage.v2 import extension as _extension from openstack.block_storage.v2 import limits as _limits from openstack.block_storage.v2 import quota_class_set as _quota_class_set from openstack.block_storage.v2 import quota_set as _quota_set from openstack.block_storage.v2 import snapshot as _snapshot from openstack.block_storage.v2 import stats as _stats from openstack.block_storage.v2 import type as _type from openstack.block_storage.v2 import volume as _volume from openstack.identity.v3 import project as _project from openstack import resource from openstack import warnings as os_warnings class Proxy(_base_proxy.BaseBlockStorageProxy): # ====== SNAPSHOTS ====== def get_snapshot(self, snapshot): """Get a single snapshot :param snapshot: The value can be the ID of a snapshot or a :class:`~openstack.block_storage.v2.snapshot.Snapshot` instance. :returns: One :class:`~openstack.block_storage.v2.snapshot.Snapshot` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_snapshot.Snapshot, snapshot) def find_snapshot( self, name_or_id, ignore_missing=True, *, details=True, all_projects=False, ): """Find a single snapshot :param snapshot: The name or ID a snapshot :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the snapshot does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param bool details: When set to ``False``, an :class:`~openstack.block_storage.v2.snapshot.Snapshot` object will be returned. The default, ``True``, will cause an :class:`~openstack.block_storage.v2.snapshot.SnapshotDetail` object to be returned. :param bool all_projects: When set to ``True``, search for snapshot by name across all projects. Note that this will likely result in a higher chance of duplicates. Admin-only by default. :returns: One :class:`~openstack.block_storage.v2.snapshot.Snapshot`, one :class:`~openstack.block_storage.v2.snapshot.SnapshotDetail` object, or None. :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ query = {} if all_projects: query['all_projects'] = True list_base_path = '/snapshots/detail' if details else None return self._find( _snapshot.Snapshot, name_or_id, ignore_missing=ignore_missing, list_base_path=list_base_path, **query, ) def snapshots(self, *, details=True, all_projects=False, **query): """Retrieve a generator of snapshots :param bool details: When set to ``False`` :class:`~openstack.block_storage.v2.snapshot.Snapshot` objects will be returned. The default, ``True``, will cause :class:`~openstack.block_storage.v2.snapshot.SnapshotDetail` objects to be returned. :param bool all_projects: When set to ``True``, list snapshots from all projects. Admin-only by default. :param kwargs query: Optional query parameters to be sent to limit the snapshots being returned. Available parameters include: * name: Name of the snapshot as a string. * volume_id: volume id of a snapshot. * status: Value of the status of the snapshot so that you can filter on "available" for example. :returns: A generator of snapshot objects. """ if all_projects: query['all_projects'] = True base_path = '/snapshots/detail' if details else None return self._list(_snapshot.Snapshot, base_path=base_path, **query) def create_snapshot(self, **attrs): """Create a new snapshot from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.block_storage.v2.snapshot.Snapshot`, comprised of the properties on the Snapshot class. :returns: The results of snapshot creation :rtype: :class:`~openstack.block_storage.v2.snapshot.Snapshot` """ return self._create(_snapshot.Snapshot, **attrs) def delete_snapshot(self, snapshot, ignore_missing=True): """Delete a snapshot :param snapshot: The value can be either the ID of a snapshot or a :class:`~openstack.block_storage.v2.snapshot.Snapshot` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the snapshot does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent snapshot. :returns: ``None`` """ self._delete( _snapshot.Snapshot, snapshot, ignore_missing=ignore_missing ) # ====== SNAPSHOT ACTIONS ====== def reset_snapshot(self, snapshot, status): """Reset status of the snapshot :param snapshot: The value can be either the ID of a backup or a :class:`~openstack.block_storage.v2.snapshot.Snapshot` instance. :param str status: New snapshot status :returns: None """ snapshot = self._get_resource(_snapshot.Snapshot, snapshot) snapshot.reset(self, status) # ====== TYPES ====== def get_type(self, type): """Get a single type :param type: The value can be the ID of a type or a :class:`~openstack.block_storage.v2.type.Type` instance. :returns: One :class:`~openstack.block_storage.v2.type.Type` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_type.Type, type) def find_type(self, name_or_id, ignore_missing=True): """Find a single volume type :param snapshot: The name or ID a volume type :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the volume type does not exist. :returns: One :class:`~openstack.block_storage.v2.type.Type` :raises: :class:`~openstack.exceptions.ResourceNotFound` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ return self._find( _type.Type, name_or_id, ignore_missing=ignore_missing, ) def types(self, **query): """Retrieve a generator of volume types :returns: A generator of volume type objects. """ return self._list(_type.Type, **query) def create_type(self, **attrs): """Create a new type from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.block_storage.v2.type.Type`, comprised of the properties on the Type class. :returns: The results of type creation :rtype: :class:`~openstack.block_storage.v2.type.Type` """ return self._create(_type.Type, **attrs) def delete_type(self, type, ignore_missing=True): """Delete a type :param type: The value can be either the ID of a type or a :class:`~openstack.block_storage.v2.type.Type` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the type does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent type. :returns: ``None`` """ self._delete(_type.Type, type, ignore_missing=ignore_missing) def get_type_access(self, type): """Lists project IDs that have access to private volume type. :param type: The value can be either the ID of a type or a :class:`~openstack.block_storage.v2.type.Type` instance. :returns: List of dictionaries describing projects that have access to the specified type """ res = self._get_resource(_type.Type, type) return res.get_private_access(self) def add_type_access(self, type, project_id): """Adds private volume type access to a project. :param type: The value can be either the ID of a type or a :class:`~openstack.block_storage.v2.type.Type` instance. :param str project_id: The ID of the project. Volume Type access to be added to this project ID. :returns: ``None`` """ res = self._get_resource(_type.Type, type) return res.add_private_access(self, project_id) def remove_type_access(self, type, project_id): """Remove private volume type access from a project. :param type: The value can be either the ID of a type or a :class:`~openstack.block_storage.v2.type.Type` instance. :param str project_id: The ID of the project. Volume Type access to be removed to this project ID. :returns: ``None`` """ res = self._get_resource(_type.Type, type) return res.remove_private_access(self, project_id) # ====== VOLUMES ====== def get_volume(self, volume): """Get a single volume :param volume: The value can be the ID of a volume or a :class:`~openstack.block_storage.v2.volume.Volume` instance. :returns: One :class:`~openstack.block_storage.v2.volume.Volume` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_volume.Volume, volume) def find_volume( self, name_or_id, ignore_missing=True, *, details=True, all_projects=False, ): """Find a single volume :param volume: The name or ID a volume :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the volume does not exist. :param bool details: When set to ``False`` no extended attributes will be returned. The default, ``True``, will cause an object with additional attributes to be returned. :param bool all_projects: When set to ``True``, search for volume by name across all projects. Note that this will likely result in a higher chance of duplicates. Admin-only by default. :returns: One :class:`~openstack.block_storage.v2.volume.Volume` or None. :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ query = {} if all_projects: query['all_projects'] = True list_base_path = '/volumes/detail' if details else None return self._find( _volume.Volume, name_or_id, ignore_missing=ignore_missing, list_base_path=list_base_path, **query, ) def volumes(self, *, details=True, all_projects=False, **query): """Retrieve a generator of volumes :param bool details: When set to ``False`` no extended attributes will be returned. The default, ``True``, will cause objects with additional attributes to be returned. :param bool all_projects: When set to ``True``, list volumes from all projects. Admin-only by default. :param kwargs query: Optional query parameters to be sent to limit the volumes being returned. Available parameters include: * name: Name of the volume as a string. * status: Value of the status of the volume so that you can filter on "available" for example. :returns: A generator of volume objects. """ if all_projects: query['all_projects'] = True base_path = '/volumes/detail' if details else None return self._list(_volume.Volume, base_path=base_path, **query) def create_volume(self, **attrs): """Create a new volume from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.block_storage.v2.volume.Volume`, comprised of the properties on the Volume class. :returns: The results of volume creation :rtype: :class:`~openstack.block_storage.v2.volume.Volume` """ return self._create(_volume.Volume, **attrs) def delete_volume(self, volume, ignore_missing=True, force=False): """Delete a volume :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v2.volume.Volume` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the volume does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent volume. :param bool force: Whether to try forcing volume deletion. :returns: ``None`` """ if not force: self._delete(_volume.Volume, volume, ignore_missing=ignore_missing) else: volume = self._get_resource(_volume.Volume, volume) volume.force_delete(self) # ====== VOLUME ACTIONS ====== def extend_volume(self, volume, size): """Extend a volume :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v2.volume.Volume` instance. :param size: New volume size :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.extend(self, size) def set_volume_readonly(self, volume, readonly=True): """Set a volume's read-only flag. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v2.volume.Volume` instance. :param bool readonly: Whether the volume should be a read-only volume or not. :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.set_readonly(self, readonly) def retype_volume(self, volume, new_type, migration_policy="never"): """Retype the volume. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v2.volume.Volume` instance. :param new_type: The new volume type that volume is changed with. The value can be either the ID of the volume type or a :class:`~openstack.block_storage.v2.type.Type` instance. :param str migration_policy: Specify if the volume should be migrated when it is re-typed. Possible values are on-demand or never. Default: never. :returns: None """ volume = self._get_resource(_volume.Volume, volume) type_id = resource.Resource._get_id(new_type) volume.retype(self, type_id, migration_policy) def set_volume_bootable_status(self, volume, bootable): """Set bootable status of the volume. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v2.volume.Volume` instance. :param bool bootable: Specifies whether the volume should be bootable or not. :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.set_bootable_status(self, bootable) def set_volume_image_metadata(self, volume, **metadata): """Update image metadata for a volume :param volume: Either the ID of a volume or a :class:`~openstack.block_storage.v2.volume.Volume`. :param kwargs metadata: Key/value pairs to be updated in the volume's image metadata. No other metadata is modified by this call. :returns: None """ volume = self._get_resource(_volume.Volume, volume) return volume.set_image_metadata(self, metadata=metadata) def delete_volume_image_metadata(self, volume, keys=None): """Delete metadata for a volume :param volume: Either the ID of a volume or a :class:`~openstack.block_storage.v2.volume.Volume`. :param list keys: The keys to delete. If left empty complete metadata will be removed. :returns: None """ volume = self._get_resource(_volume.Volume, volume) if keys is not None: for key in keys: volume.delete_image_metadata_item(self, key) else: volume.delete_image_metadata(self) def reset_volume_status( self, volume, status=None, attach_status=None, migration_status=None ): """Reset volume statuses. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v2.volume.Volume` instance. :param str status: The new volume status. :param str attach_status: The new volume attach status. :param str migration_status: The new volume migration status (admin only). :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.reset_status(self, status, attach_status, migration_status) def attach_volume(self, volume, mountpoint, instance=None, host_name=None): """Attaches a volume to a server. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v2.volume.Volume` instance. :param str mountpoint: The attaching mount point. :param str instance: The UUID of the attaching instance. :param str host_name: The name of the attaching host. :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.attach(self, mountpoint, instance, host_name) def detach_volume(self, volume, attachment, force=False, connector=None): """Detaches a volume from a server. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v2.volume.Volume` instance. :param str attachment: The ID of the attachment. :param bool force: Whether to force volume detach (Rolls back an unsuccessful detach operation after you disconnect the volume.) :param dict connector: The connector object. :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.detach(self, attachment, force, connector) def unmanage_volume(self, volume): """Removes a volume from Block Storage management without removing the back-end storage object that is associated with it. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v2.volume.Volume` instance. :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.unmanage(self) def migrate_volume( self, volume, host=None, force_host_copy=False, lock_volume=False ): """Migrates a volume to the specified host. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v2.volume.Volume` instance. :param str host: The target host for the volume migration. Host format is host@backend. :param bool force_host_copy: If false (the default), rely on the volume backend driver to perform the migration, which might be optimized. If true, or the volume driver fails to migrate the volume itself, a generic host-based migration is performed. :param bool lock_volume: If true, migrating an available volume will change its status to maintenance preventing other operations from being performed on the volume such as attach, detach, retype, etc. :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.migrate(self, host, force_host_copy, lock_volume) def complete_volume_migration(self, volume, new_volume, error=False): """Complete the migration of a volume. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v2.volume.Volume` instance. :param str new_volume: The UUID of the new volume. :param bool error: Used to indicate if an error has occured elsewhere that requires clean up. :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.complete_migration(self, new_volume, error) # ====== BACKEND POOLS ====== def backend_pools(self, **query): """Returns a generator of cinder Back-end storage pools :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns A generator of cinder Back-end storage pools objects """ return self._list(_stats.Pools, **query) # ====== BACKUPS ====== def backups(self, details=True, **query): """Retrieve a generator of backups :param bool details: When set to ``False`` no additional details will be returned. The default, ``True``, will cause objects with additional attributes to be returned. :param dict query: Optional query parameters to be sent to limit the resources being returned: * offset: pagination marker * limit: pagination limit * sort_key: Sorts by an attribute. A valid value is name, status, container_format, disk_format, size, id, created_at, or updated_at. Default is created_at. The API uses the natural sorting direction of the sort_key attribute value. * sort_dir: Sorts by one or more sets of attribute and sort direction combinations. If you omit the sort direction in a set, default is desc. :returns: A generator of backup objects. """ base_path = '/backups/detail' if details else None return self._list(_backup.Backup, base_path=base_path, **query) def get_backup(self, backup): """Get a backup :param backup: The value can be the ID of a backup or a :class:`~openstack.block_storage.v2.backup.Backup` instance. :returns: Backup instance :rtype: :class:`~openstack.block_storage.v2.backup.Backup` """ return self._get(_backup.Backup, backup) def find_backup(self, name_or_id, ignore_missing=True, *, details=True): """Find a single backup :param snapshot: The name or ID a backup :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the backup does not exist. :param bool details: When set to ``False`` no additional details will be returned. The default, ``True``, will cause objects with additional attributes to be returned. :returns: One :class:`~openstack.block_storage.v2.backup.Backup` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ list_base_path = '/backups/detail' if details else None return self._find( _backup.Backup, name_or_id, ignore_missing=ignore_missing, list_base_path=list_base_path, ) def create_backup(self, **attrs): """Create a new Backup from attributes with native API :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.block_storage.v2.backup.Backup` comprised of the properties on the Backup class. :returns: The results of Backup creation :rtype: :class:`~openstack.block_storage.v2.backup.Backup` """ return self._create(_backup.Backup, **attrs) def delete_backup(self, backup, ignore_missing=True, force=False): """Delete a CloudBackup :param backup: The value can be the ID of a backup or a :class:`~openstack.block_storage.v2.backup.Backup` instance :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the zone does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent zone. :param bool force: Whether to try forcing backup deletion :returns: ``None`` """ if not force: self._delete(_backup.Backup, backup, ignore_missing=ignore_missing) else: backup = self._get_resource(_backup.Backup, backup) backup.force_delete(self) # ====== BACKUP ACTIONS ====== def restore_backup(self, backup, volume_id, name): """Restore a Backup to volume :param backup: The value can be the ID of a backup or a :class:`~openstack.block_storage.v2.backup.Backup` instance :param volume_id: The ID of the volume to restore the backup to. :param name: The name for new volume creation to restore. :returns: Updated backup instance :rtype: :class:`~openstack.block_storage.v2.backup.Backup` """ backup = self._get_resource(_backup.Backup, backup) return backup.restore(self, volume_id=volume_id, name=name) def reset_backup(self, backup, status): """Reset status of the backup :param backup: The value can be either the ID of a backup or a :class:`~openstack.block_storage.v2.backup.Backup` instance. :param str status: New backup status :returns: None """ backup = self._get_resource(_backup.Backup, backup) backup.reset(self, status) # ====== LIMITS ====== def get_limits(self, project=None): """Retrieves limits :param project: A project to get limits for. The value can be either the ID of a project or an :class:`~openstack.identity.v2.project.Project` instance. :returns: A Limits object, including both :class:`~openstack.block_storage.v2.limits.AbsoluteLimit` and :class:`~openstack.block_storage.v2.limits.RateLimit` :rtype: :class:`~openstack.block_storage.v2.limits.Limits` """ params = {} if project: params['project_id'] = resource.Resource._get_id(project) return self._get(_limits.Limits, requires_id=False, **params) # ====== CAPABILITIES ====== def get_capabilities(self, host): """Get a backend's capabilites :param host: Specified backend to obtain volume stats and properties. :returns: One :class: `~openstack.block_storage.v2.capabilites.Capabilities` instance. :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_capabilities.Capabilities, host) # ====== QUOTA CLASS SETS ====== def get_quota_class_set(self, quota_class_set='default'): """Get a single quota class set Only one quota class is permitted, ``default``. :param quota_class_set: The value can be the ID of a quota class set (only ``default`` is supported) or a :class:`~openstack.block_storage.v2.quota_class_set.QuotaClassSet` instance. :returns: One :class:`~openstack.block_storage.v2.quota_class_set.QuotaClassSet` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_quota_class_set.QuotaClassSet, quota_class_set) def update_quota_class_set(self, quota_class_set, **attrs): """Update a QuotaClassSet. Only one quota class is permitted, ``default``. :param quota_class_set: Either the ID of a quota class set (only ``default`` is supported) or a :class:`~openstack.block_storage.v2.quota_class_set.QuotaClassSet` instance. :param attrs: The attributes to update on the QuotaClassSet represented by ``quota_class_set``. :returns: The updated QuotaSet :rtype: :class:`~openstack.block_storage.v2.quota_set.QuotaSet` """ return self._update( _quota_class_set.QuotaClassSet, quota_class_set, **attrs ) # ====== QUOTA SETS ====== def get_quota_set(self, project, usage=False, **query): """Show QuotaSet information for the project :param project: ID or instance of :class:`~openstack.identity.project.Project` of the project for which the quota should be retrieved :param bool usage: When set to ``True`` quota usage and reservations would be filled. :param dict query: Additional query parameters to use. :returns: One :class:`~openstack.block_storage.v2.quota_set.QuotaSet` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ project = self._get_resource(_project.Project, project) res = self._get_resource( _quota_set.QuotaSet, None, project_id=project.id ) return res.fetch(self, usage=usage, **query) def get_quota_set_defaults(self, project): """Show QuotaSet defaults for the project :param project: ID or instance of :class:`~openstack.identity.project.Project` of the project for which the quota should be retrieved :returns: One :class:`~openstack.block_storage.v2.quota_set.QuotaSet` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ project = self._get_resource(_project.Project, project) res = self._get_resource( _quota_set.QuotaSet, None, project_id=project.id ) return res.fetch(self, base_path='/os-quota-sets/defaults') def revert_quota_set(self, project, **query): """Reset Quota for the project/user. :param project: ID or instance of :class:`~openstack.identity.project.Project` of the project for which the quota should be resetted. :param dict query: Additional parameters to be used. :returns: ``None`` """ project = self._get_resource(_project.Project, project) res = self._get_resource( _quota_set.QuotaSet, None, project_id=project.id ) if not query: query = {} return res.delete(self, **query) def update_quota_set(self, project, **attrs): """Update a QuotaSet. :param project: ID or instance of :class:`~openstack.identity.project.Project` of the project for which the quota should be reset. :param attrs: The attributes to update on the QuotaSet represented by ``quota_set``. :returns: The updated QuotaSet :rtype: :class:`~openstack.block_storage.v3.quota_set.QuotaSet` """ if 'project_id' in attrs or isinstance(project, _quota_set.QuotaSet): warnings.warn( "The signature of 'update_quota_set' has changed and it " "now expects a Project as the first argument, in line " "with the other quota set methods.", os_warnings.RemovedInSDK50Warning, ) if isinstance(project, _quota_set.QuotaSet): attrs['project_id'] = project.project_id # cinder doesn't support any query parameters so we simply pop # these if 'query' in attrs: attrs.pop('params') else: project = self._get_resource(_project.Project, project) attrs['project_id'] = project.id return self._update(_quota_set.QuotaSet, None, **attrs) # ====== VOLUME METADATA ====== def get_volume_metadata(self, volume): """Return a dictionary of metadata for a volume :param volume: Either the ID of a volume or a :class:`~openstack.block_storage.v2.volume.Volume`. :returns: A :class:`~openstack.block_storage.v2.volume.Volume` with the volume's metadata. All keys and values are Unicode text. :rtype: :class:`~openstack.block_storage.v2.volume.Volume` """ volume = self._get_resource(_volume.Volume, volume) return volume.fetch_metadata(self) def set_volume_metadata(self, volume, **metadata): """Update metadata for a volume :param volume: Either the ID of a volume or a :class:`~openstack.block_storage.v2.volume.Volume`. :param kwargs metadata: Key/value pairs to be updated in the volume's metadata. No other metadata is modified by this call. All keys and values are stored as Unicode. :returns: A :class:`~openstack.block_storage.v2.volume.Volume` with the volume's metadata. All keys and values are Unicode text. :rtype: :class:`~openstack.block_storage.v2.volume.Volume` """ volume = self._get_resource(_volume.Volume, volume) return volume.set_metadata(self, metadata=metadata) def delete_volume_metadata(self, volume, keys=None): """Delete metadata for a volume :param volume: Either the ID of a volume or a :class:`~openstack.block_storage.v2.volume.Volume`. :param list keys: The keys to delete. If left empty complete metadata will be removed. :rtype: ``None`` """ volume = self._get_resource(_volume.Volume, volume) if keys is not None: for key in keys: volume.delete_metadata_item(self, key) else: volume.delete_metadata(self) # ====== SNAPSHOT METADATA ====== def get_snapshot_metadata(self, snapshot): """Return a dictionary of metadata for a snapshot :param snapshot: Either the ID of a snapshot or a :class:`~openstack.block_storage.v2.snapshot.Snapshot`. :returns: A :class:`~openstack.block_storage.v2.snapshot.Snapshot` with the snapshot's metadata. All keys and values are Unicode text. :rtype: :class:`~openstack.block_storage.v2.snapshot.Snapshot` """ snapshot = self._get_resource(_snapshot.Snapshot, snapshot) return snapshot.fetch_metadata(self) def set_snapshot_metadata(self, snapshot, **metadata): """Update metadata for a snapshot :param snapshot: Either the ID of a snapshot or a :class:`~openstack.block_storage.v2.snapshot.Snapshot`. :param kwargs metadata: Key/value pairs to be updated in the snapshot's metadata. No other metadata is modified by this call. All keys and values are stored as Unicode. :returns: A :class:`~openstack.block_storage.v2.snapshot.Snapshot` with the snapshot's metadata. All keys and values are Unicode text. :rtype: :class:`~openstack.block_storage.v2.snapshot.Snapshot` """ snapshot = self._get_resource(_snapshot.Snapshot, snapshot) return snapshot.set_metadata(self, metadata=metadata) def delete_snapshot_metadata(self, snapshot, keys=None): """Delete metadata for a snapshot :param snapshot: Either the ID of a snapshot or a :class:`~openstack.block_storage.v2.snapshot.Snapshot`. :param list keys: The keys to delete. If left empty complete metadata will be removed. :rtype: ``None`` """ snapshot = self._get_resource(_snapshot.Snapshot, snapshot) if keys is not None: for key in keys: snapshot.delete_metadata_item(self, key) else: snapshot.delete_metadata(self) # ====== EXTENSIONS ====== def extensions(self): """Return a generator of extensions :returns: A generator of extension :rtype: :class:`~openstack.block_storage.v2.extension.Extension` """ return self._list(_extension.Extension) # ====== UTILS ====== def wait_for_status( self, res, status='available', failures=None, interval=2, wait=120, callback=None, ): """Wait for a resource to be in a particular status. :param res: The resource to wait on to reach the specified status. The resource must have a ``status`` attribute. :type resource: A :class:`~openstack.resource.Resource` object. :param status: Desired status. :param failures: Statuses that would be interpreted as failures. :type failures: :py:class:`list` :param interval: Number of seconds to wait before to consecutive checks. Default to 2. :param wait: Maximum number of seconds to wait before the change. Default to 120. :param callback: A callback function. This will be called with a single value, progress. :returns: The resource is returned on success. :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition to the desired status failed to occur in specified seconds. :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource has transited to one of the failure statuses. :raises: :class:`~AttributeError` if the resource does not have a ``status`` attribute. """ failures = ['error'] if failures is None else failures return resource.wait_for_status( self, res, status, failures, interval, wait, callback=callback, ) def wait_for_delete(self, res, interval=2, wait=120, callback=None): """Wait for a resource to be deleted. :param res: The resource to wait on to be deleted. :type resource: A :class:`~openstack.resource.Resource` object. :param interval: Number of seconds to wait before to consecutive checks. Default to 2. :param wait: Maximum number of seconds to wait before the change. Default to 120. :param callback: A callback function. This will be called with a single value, progress. :returns: The resource is returned on success. :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition to delete failed to occur in the specified seconds. """ return resource.wait_for_delete( self, res, interval, wait, callback=callback, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v2/backup.py0000664000175000017500000001720400000000000023206 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty from openstack import exceptions from openstack import resource from openstack import utils class Backup(resource.Resource): """Volume Backup""" resource_key = "backup" resources_key = "backups" base_path = "/backups" _query_mapping = resource.QueryParameters( 'all_tenants', 'limit', 'marker', 'project_id', 'name', 'status', 'volume_id', 'sort_key', 'sort_dir', ) # capabilities allow_fetch = True allow_create = True allow_delete = True allow_list = True allow_get = True #: Properties #: backup availability zone availability_zone = resource.Body("availability_zone") #: The container backup in container = resource.Body("container") #: The date and time when the resource was created. created_at = resource.Body("created_at") #: data timestamp #: The time when the data on the volume was first saved. #: If it is a backup from volume, it will be the same as created_at #: for a backup. If it is a backup from a snapshot, #: it will be the same as created_at for the snapshot. data_timestamp = resource.Body('data_timestamp') #: backup description description = resource.Body("description") #: Backup fail reason fail_reason = resource.Body("fail_reason") #: Force backup force = resource.Body("force", type=bool) #: has_dependent_backups #: If this value is true, there are other backups depending on this backup. has_dependent_backups = resource.Body('has_dependent_backups', type=bool) #: Indicates whether the backup mode is incremental. #: If this value is true, the backup mode is incremental. #: If this value is false, the backup mode is full. is_incremental = resource.Body("is_incremental", type=bool) #: A list of links associated with this volume. *Type: list* links = resource.Body("links", type=list) #: backup name name = resource.Body("name") #: backup object count object_count = resource.Body("object_count", type=int) #: The size of the volume, in gibibytes (GiB). size = resource.Body("size", type=int) #: The UUID of the source volume snapshot. snapshot_id = resource.Body("snapshot_id") #: backup status #: values: creating, available, deleting, error, restoring, error_restoring status = resource.Body("status") #: The date and time when the resource was updated. updated_at = resource.Body("updated_at") #: The UUID of the volume. volume_id = resource.Body("volume_id") def create(self, session, prepend_key=True, base_path=None, **params): """Create a remote resource based on this instance. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param prepend_key: A boolean indicating whether the resource_key should be prepended in a resource creation request. Default to True. :param str base_path: Base part of the URI for creating resources, if different from :data:`~openstack.resource.Resource.base_path`. :param dict params: Additional params to pass. :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_create` is not set to ``True``. """ if not self.allow_create: raise exceptions.MethodNotSupported(self, "create") session = self._get_session(session) microversion = self._get_microversion(session, action='create') requires_id = ( self.create_requires_id if self.create_requires_id is not None else self.create_method == 'PUT' ) if self.create_exclude_id_from_body: self._body._dirty.discard("id") if self.create_method == 'POST': request = self._prepare_request( requires_id=requires_id, prepend_key=prepend_key, base_path=base_path, ) # NOTE(gtema) this is a funny example of when attribute # is called "incremental" on create, "is_incremental" on get # and use of "alias" or "aka" is not working for such conflict, # since our preferred attr name is exactly "is_incremental" body = request.body if 'is_incremental' in body['backup']: body['backup']['incremental'] = body['backup'].pop( 'is_incremental' ) response = session.post( request.url, json=request.body, headers=request.headers, microversion=microversion, params=params, ) else: # Just for safety of the implementation (since PUT removed) raise exceptions.ResourceFailure( "Invalid create method: %s" % self.create_method ) has_body = ( self.has_body if self.create_returns_body is None else self.create_returns_body ) self.microversion = microversion self._translate_response(response, has_body=has_body) # direct comparision to False since we need to rule out None if self.has_body and self.create_returns_body is False: # fetch the body if it's required but not returned by create return self.fetch(session) return self def _action(self, session, body, microversion=None): """Preform backup actions given the message body.""" url = utils.urljoin(self.base_path, self.id, 'action') resp = session.post( url, json=body, microversion=self._max_microversion ) exceptions.raise_from_response(resp) return resp def restore(self, session, volume_id=None, name=None): """Restore current backup to volume :param session: openstack session :param volume_id: The ID of the volume to restore the backup to. :param name: The name for new volume creation to restore. :return: Updated backup instance """ url = utils.urljoin(self.base_path, self.id, "restore") body: ty.Dict[str, ty.Dict] = {'restore': {}} if volume_id: body['restore']['volume_id'] = volume_id if name: body['restore']['name'] = name if not (volume_id or name): raise exceptions.SDKException( 'Either of `name` or `volume_id` must be specified.' ) response = session.post(url, json=body) self._translate_response(response, has_body=False) return self def force_delete(self, session): """Force backup deletion""" body = {'os-force_delete': None} self._action(session, body) def reset(self, session, status): """Reset the status of the backup""" body = {'os-reset_status': {'status': status}} self._action(session, body) BackupDetail = Backup ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v2/capabilities.py0000664000175000017500000000351000000000000024365 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Capabilities(resource.Resource): base_path = "/capabilities" # Capabilities allow_fetch = True #: Properties #: The capabilities description description = resource.Body("description") #: The name of volume backend capabilities. display_name = resource.Body("display_name") #: The driver version. driver_version = resource.Body("driver_version") #: The storage namespace, such as OS::Storage::Capabilities::foo. namespace = resource.Body("namespace") #: The name of the storage pool. pool_name = resource.Body("pool_name") #: The backend volume capabilites list, which consists of cinder #: standard capabilities and vendor unique properties. properties = resource.Body("properties", type=dict) #: A list of volume backends used to replicate volumes on this backend. replication_targets = resource.Body("replication_targets", type=list) #: The storage backend for the backend volume. storage_protocol = resource.Body("storage_protocol") #: The name of the vendor. vendor_name = resource.Body("vendor_name") #: The volume type access. visibility = resource.Body("visibility") #: The name of the back-end volume. volume_backend_name = resource.Body("volume_backend_name") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v2/extension.py0000664000175000017500000000246100000000000023754 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Extension(resource.Resource): resources_key = "extensions" base_path = "/extensions" # Capabilities allow_list = True #: Properties #: The alias for the extension. alias = resource.Body('alias', type=str) #: The extension description. description = resource.Body('description', type=str) #: Links pertaining to this extension. links = resource.Body('links', type=list) #: The name of this extension. name = resource.Body('name') #: A URL pointing to the namespace for this extension. namespace = resource.Body('namespace') #: The date and time when the resource was updated. #: The date and time stamp format is ISO 8601. updated_at = resource.Body('updated', type=str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v2/limits.py0000664000175000017500000000626600000000000023250 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class AbsoluteLimit(resource.Resource): #: Properties #: The maximum total amount of backups, in gibibytes (GiB). max_total_backup_gigabytes = resource.Body( "maxTotalBackupGigabytes", type=int ) #: The maximum number of backups. max_total_backups = resource.Body("maxTotalBackups", type=int) #: The maximum number of snapshots. max_total_snapshots = resource.Body("maxTotalSnapshots", type=int) #: The maximum total amount of volumes, in gibibytes (GiB). max_total_volume_gigabytes = resource.Body( "maxTotalVolumeGigabytes", type=int ) #: The maximum number of volumes. max_total_volumes = resource.Body("maxTotalVolumes", type=int) #: The total number of backups gibibytes (GiB) used. total_backup_gigabytes_used = resource.Body( "totalBackupGigabytesUsed", type=int ) #: The total number of backups used. total_backups_used = resource.Body("totalBackupsUsed", type=int) #: The total number of gibibytes (GiB) used. total_gigabytes_used = resource.Body("totalGigabytesUsed", type=int) #: The total number of snapshots used. total_snapshots_used = resource.Body("totalSnapshotsUsed", type=int) #: The total number of volumes used. total_volumes_used = resource.Body("totalVolumesUsed", type=int) class RateLimit(resource.Resource): #: Properties #: Rate limits next availabe time. next_available = resource.Body("next-available") #: Integer for rate limits remaining. remaining = resource.Body("remaining", type=int) #: Unit of measurement for the value parameter. unit = resource.Body("unit") #: Integer number of requests which can be made. value = resource.Body("value", type=int) #: An HTTP verb (POST, PUT, etc.). verb = resource.Body("verb") class RateLimits(resource.Resource): #: Properties #: A list of the specific limits that apply to the ``regex`` and ``uri``. limits = resource.Body("limit", type=list, list_type=RateLimit) #: A regex representing which routes this rate limit applies to. regex = resource.Body("regex") #: A URI representing which routes this rate limit applies to. uri = resource.Body("uri") class Limits(resource.Resource): resource_key = "limits" base_path = "/limits" # capabilities allow_fetch = True #: Properties #: An absolute limits object. absolute = resource.Body("absolute", type=AbsoluteLimit) #: Rate-limit volume copy bandwidth, used to mitigate #: slow down of data access from the instances. rate = resource.Body("rate", type=list, list_type=RateLimits) # legacy alias Limit = Limits ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v2/quota_class_set.py0000664000175000017500000000317600000000000025135 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class QuotaClassSet(resource.Resource): resource_key = 'quota_class_set' base_path = '/os-quota-class-sets' # Capabilities allow_fetch = True allow_commit = True # Properties #: The size (GB) of backups that are allowed for each project. backup_gigabytes = resource.Body('backup_gigabytes', type=int) #: The number of backups that are allowed for each project. backups = resource.Body('backups', type=int) #: The size (GB) of volumes and snapshots that are allowed for each #: project. gigabytes = resource.Body('gigabytes', type=int) #: The number of groups that are allowed for each project. groups = resource.Body('groups', type=int) #: The size (GB) of volumes in request that are allowed for each volume. per_volume_gigabytes = resource.Body('per_volume_gigabytes', type=int) #: The number of snapshots that are allowed for each project. snapshots = resource.Body('snapshots', type=int) #: The number of volumes that are allowed for each project. volumes = resource.Body('volumes', type=int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v2/quota_set.py0000664000175000017500000000302100000000000023735 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import quota_set from openstack import resource class QuotaSet(quota_set.QuotaSet): #: Properties #: The size (GB) of backups that are allowed for each project. backup_gigabytes = resource.Body('backup_gigabytes', type=int) #: The number of backups that are allowed for each project. backups = resource.Body('backups', type=int) #: The size (GB) of volumes and snapshots that are allowed for each #: project. gigabytes = resource.Body('gigabytes', type=int) #: The number of groups that are allowed for each project. groups = resource.Body('groups', type=int) #: The size (GB) of volumes in request that are allowed for each volume. per_volume_gigabytes = resource.Body('per_volume_gigabytes', type=int) #: The number of snapshots that are allowed for each project. snapshots = resource.Body('snapshots', type=int) #: The number of volumes that are allowed for each project. volumes = resource.Body('volumes', type=int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v2/snapshot.py0000664000175000017500000000471000000000000023576 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import metadata from openstack import exceptions from openstack import format from openstack import resource from openstack import utils class Snapshot(resource.Resource, metadata.MetadataMixin): resource_key = "snapshot" resources_key = "snapshots" base_path = "/snapshots" _query_mapping = resource.QueryParameters( 'name', 'status', 'volume_id', all_projects='all_tenants' ) # capabilities allow_fetch = True allow_create = True allow_delete = True allow_commit = True allow_list = True # Properties #: The timestamp of this snapshot creation. created_at = resource.Body("created_at") #: Description of snapshot. Default is None. description = resource.Body("description") #: Indicate whether to create snapshot, even if the volume is attached. #: Default is ``False``. *Type: bool* is_forced = resource.Body("force", type=format.BoolStr) #: The size of the volume, in GBs. size = resource.Body("size", type=int) #: The current status of this snapshot. Potential values are creating, #: available, deleting, error, and error_deleting. status = resource.Body("status") #: The date and time when the resource was updated. updated_at = resource.Body("updated_at") #: The ID of the volume this snapshot was taken of. volume_id = resource.Body("volume_id") def _action(self, session, body, microversion=None): """Preform backup actions given the message body.""" url = utils.urljoin(self.base_path, self.id, 'action') resp = session.post( url, json=body, microversion=self._max_microversion ) exceptions.raise_from_response(resp) return resp def reset(self, session, status): """Reset the status of the snapshot.""" body = {'os-reset_status': {'status': status}} self._action(session, body) SnapshotDetail = Snapshot ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v2/stats.py0000664000175000017500000000201300000000000023067 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Pools(resource.Resource): resource_key = "" resources_key = "pools" base_path = "/scheduler-stats/get_pools?detail=True" # capabilities allow_fetch = False allow_create = False allow_delete = False allow_list = True # Properties #: The Cinder name for the pool name = resource.Body("name") #: returns a dict with information about the pool capabilities = resource.Body("capabilities", type=dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v2/type.py0000664000175000017500000000504100000000000022716 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class Type(resource.Resource): resource_key = "volume_type" resources_key = "volume_types" base_path = "/types" # capabilities allow_fetch = True allow_create = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters("is_public") # Properties #: A dict of extra specifications. "capabilities" is a usual key. extra_specs = resource.Body("extra_specs", type=dict) #: a private volume-type. *Type: bool* is_public = resource.Body('os-volume-type-access:is_public', type=bool) def get_private_access(self, session): """List projects with private access to the volume type. :param session: The session to use for making this request. :returns: The volume type access response. """ url = utils.urljoin(self.base_path, self.id, "os-volume-type-access") resp = session.get(url) exceptions.raise_from_response(resp) return resp.json().get("volume_type_access", []) def add_private_access(self, session, project_id): """Add project access from the volume type. :param session: The session to use for making this request. :param project_id: The project to add access for. """ url = utils.urljoin(self.base_path, self.id, "action") body = {"addProjectAccess": {"project": project_id}} resp = session.post(url, json=body) exceptions.raise_from_response(resp) def remove_private_access(self, session, project_id): """Remove project access from the volume type. :param session: The session to use for making this request. :param project_id: The project to remove access for. """ url = utils.urljoin(self.base_path, self.id, "action") body = {"removeProjectAccess": {"project": project_id}} resp = session.post(url, json=body) exceptions.raise_from_response(resp) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v2/volume.py0000664000175000017500000002040700000000000023247 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty from openstack.common import metadata from openstack import format from openstack import resource from openstack import utils class Volume(resource.Resource, metadata.MetadataMixin): resource_key = "volume" resources_key = "volumes" base_path = "/volumes" _query_mapping = resource.QueryParameters( 'name', 'status', 'project_id', all_projects='all_tenants' ) # capabilities allow_fetch = True allow_create = True allow_delete = True allow_commit = True allow_list = True # Properties #: TODO(briancurtin): This is currently undocumented in the API. attachments = resource.Body("attachments") #: The availability zone. availability_zone = resource.Body("availability_zone") #: ID of the consistency group. consistency_group_id = resource.Body("consistencygroup_id") #: The timestamp of this volume creation. created_at = resource.Body("created_at") #: The date and time when the resource was updated. updated_at = resource.Body("updated_at") #: The volume description. description = resource.Body("description") #: Extended replication status on this volume. extended_replication_status = resource.Body( "os-volume-replication:extended_status" ) #: The volume's current back-end. host = resource.Body("os-vol-host-attr:host") #: The ID of the image from which you want to create the volume. #: Required to create a bootable volume. image_id = resource.Body("imageRef") #: Enables or disables the bootable attribute. You can boot an #: instance from a bootable volume. *Type: bool* is_bootable = resource.Body("bootable", type=format.BoolStr) #: ``True`` if this volume is encrypted, ``False`` if not. #: *Type: bool* is_encrypted = resource.Body("encrypted", type=format.BoolStr) #: The volume ID that this volume's name on the back-end is based on. migration_id = resource.Body("os-vol-mig-status-attr:name_id") #: The status of this volume's migration (None means that a migration #: is not currently in progress). migration_status = resource.Body("os-vol-mig-status-attr:migstat") #: The project ID associated with current back-end. project_id = resource.Body("os-vol-tenant-attr:tenant_id") #: Data set by the replication driver replication_driver_data = resource.Body( "os-volume-replication:driver_data" ) #: Status of replication on this volume. replication_status = resource.Body("replication_status") #: Scheduler hints for the volume scheduler_hints = resource.Body('OS-SCH-HNT:scheduler_hints', type=dict) #: The size of the volume, in GBs. *Type: int* size = resource.Body("size", type=int) #: To create a volume from an existing snapshot, specify the ID of #: the existing volume snapshot. If specified, the volume is created #: in same availability zone and with same size of the snapshot. snapshot_id = resource.Body("snapshot_id") #: To create a volume from an existing volume, specify the ID of #: the existing volume. If specified, the volume is created with #: same size of the source volume. source_volume_id = resource.Body("source_volid") #: One of the following values: creating, available, attaching, in-use #: deleting, error, error_deleting, backing-up, restoring-backup, #: error_restoring. For details on these statuses, see the #: Block Storage API documentation. status = resource.Body("status") #: The user ID associated with the volume user_id = resource.Body("user_id") #: One or more metadata key and value pairs about image volume_image_metadata = resource.Body("volume_image_metadata") #: The name of the associated volume type. volume_type = resource.Body("volume_type") def _action(self, session, body): """Preform volume actions given the message body.""" # NOTE: This is using Volume.base_path instead of self.base_path # as both Volume and VolumeDetail instances can be acted on, but # the URL used is sans any additional /detail/ part. url = utils.urljoin(Volume.base_path, self.id, 'action') return session.post(url, json=body, microversion=None) def extend(self, session, size): """Extend a volume size.""" body = {'os-extend': {'new_size': size}} self._action(session, body) def set_bootable_status(self, session, bootable=True): """Set volume bootable status flag""" body = {'os-set_bootable': {'bootable': bootable}} self._action(session, body) def set_readonly(self, session, readonly): """Set volume readonly flag""" body = {'os-update_readonly_flag': {'readonly': readonly}} self._action(session, body) def set_image_metadata(self, session, metadata): """Sets image metadata key-value pairs on the volume""" body = {'os-set_image_metadata': metadata} self._action(session, body) def delete_image_metadata(self, session): """Remove all image metadata from the volume""" for key in self.metadata: body = {'os-unset_image_metadata': key} self._action(session, body) def delete_image_metadata_item(self, session, key): """Remove a single image metadata from the volume""" body = {'os-unset_image_metadata': key} self._action(session, body) def reset_status( self, session, status=None, attach_status=None, migration_status=None ): """Reset volume statuses (admin operation)""" body: ty.Dict[str, ty.Dict[str, str]] = {'os-reset_status': {}} if status: body['os-reset_status']['status'] = status if attach_status: body['os-reset_status']['attach_status'] = attach_status if migration_status: body['os-reset_status']['migration_status'] = migration_status self._action(session, body) def attach(self, session, mountpoint, instance): """Attach volume to server""" body = { 'os-attach': {'mountpoint': mountpoint, 'instance_uuid': instance} } self._action(session, body) def detach(self, session, attachment, force=False): """Detach volume from server""" if not force: body = {'os-detach': {'attachment_id': attachment}} if force: body = {'os-force_detach': {'attachment_id': attachment}} self._action(session, body) def unmanage(self, session): """Unmanage volume""" body = {'os-unmanage': None} self._action(session, body) def retype(self, session, new_type, migration_policy=None): """Change volume type""" body = {'os-retype': {'new_type': new_type}} if migration_policy: body['os-retype']['migration_policy'] = migration_policy self._action(session, body) def migrate( self, session, host=None, force_host_copy=False, lock_volume=False ): """Migrate volume""" req = dict() if host is not None: req['host'] = host if force_host_copy: req['force_host_copy'] = force_host_copy if lock_volume: req['lock_volume'] = lock_volume body = {'os-migrate_volume': req} self._action(session, body) def complete_migration(self, session, new_volume_id, error=False): """Complete volume migration""" body = { 'os-migrate_volume_completion': { 'new_volume': new_volume_id, 'error': error, } } self._action(session, body) def force_delete(self, session): """Force volume deletion""" body = {'os-force_delete': None} self._action(session, body) VolumeDetail = Volume ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2293055 openstacksdk-4.0.0/openstack/block_storage/v3/0000775000175000017500000000000000000000000021364 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/__init__.py0000664000175000017500000000000000000000000023463 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/_proxy.py0000664000175000017500000026051100000000000023263 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty import warnings from openstack.block_storage import _base_proxy from openstack.block_storage.v3 import attachment as _attachment from openstack.block_storage.v3 import availability_zone from openstack.block_storage.v3 import backup as _backup from openstack.block_storage.v3 import block_storage_summary as _summary from openstack.block_storage.v3 import capabilities as _capabilities from openstack.block_storage.v3 import extension as _extension from openstack.block_storage.v3 import group as _group from openstack.block_storage.v3 import group_snapshot as _group_snapshot from openstack.block_storage.v3 import group_type as _group_type from openstack.block_storage.v3 import limits as _limits from openstack.block_storage.v3 import quota_class_set as _quota_class_set from openstack.block_storage.v3 import quota_set as _quota_set from openstack.block_storage.v3 import resource_filter as _resource_filter from openstack.block_storage.v3 import service as _service from openstack.block_storage.v3 import snapshot as _snapshot from openstack.block_storage.v3 import stats as _stats from openstack.block_storage.v3 import transfer as _transfer from openstack.block_storage.v3 import type as _type from openstack.block_storage.v3 import volume as _volume from openstack import exceptions from openstack.identity.v3 import project as _project from openstack import resource from openstack import utils from openstack import warnings as os_warnings class Proxy(_base_proxy.BaseBlockStorageProxy): _resource_registry = { "availability_zone": availability_zone.AvailabilityZone, "attachment": _attachment.Attachment, "backup": _backup.Backup, "capabilities": _capabilities.Capabilities, "extension": _extension.Extension, "group": _group.Group, "group_snapshot": _group_snapshot.GroupSnapshot, "group_type": _group_type.GroupType, "limits": _limits.Limits, "quota_set": _quota_set.QuotaSet, "resource_filter": _resource_filter.ResourceFilter, "snapshot": _snapshot.Snapshot, "stats_pools": _stats.Pools, "summary": _summary.BlockStorageSummary, "transfer": _transfer.Transfer, "type": _type.Type, "volume": _volume.Volume, } # ====== SNAPSHOTS ====== def get_snapshot(self, snapshot): """Get a single snapshot :param snapshot: The value can be the ID of a snapshot or a :class:`~openstack.block_storage.v3.snapshot.Snapshot` instance. :returns: One :class:`~openstack.block_storage.v3.snapshot.Snapshot` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_snapshot.Snapshot, snapshot) def find_snapshot( self, name_or_id, ignore_missing=True, *, details=True, all_projects=False, ): """Find a single snapshot :param snapshot: The name or ID a snapshot :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the snapshot does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param bool details: When set to ``False`` :class: `~openstack.block_storage.v3.snapshot.Snapshot` objects will be returned. The default, ``True``, will cause more attributes to be returned. :param bool all_projects: When set to ``True``, search for snapshot by name across all projects. Note that this will likely result in a higher chance of duplicates. Admin-only by default. :returns: One :class:`~openstack.block_storage.v3.snapshot.Snapshot` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ query = {} if all_projects: query['all_projects'] = True list_base_path = '/snapshots/detail' if details else None return self._find( _snapshot.Snapshot, name_or_id, ignore_missing=ignore_missing, list_base_path=list_base_path, **query, ) def snapshots(self, *, details=True, all_projects=False, **query): """Retrieve a generator of snapshots :param bool details: When set to ``False`` :class: `~openstack.block_storage.v3.snapshot.Snapshot` objects will be returned. The default, ``True``, will cause more attributes to be returned. :param bool all_projects: When set to ``True``, list snapshots from all projects. Admin-only by default. :param kwargs query: Optional query parameters to be sent to limit the snapshots being returned. Available parameters include: * name: Name of the snapshot as a string. * project_id: Filter the snapshots by project. * volume_id: volume id of a snapshot. * status: Value of the status of the snapshot so that you can filter on "available" for example. :returns: A generator of snapshot objects. """ if all_projects: query['all_projects'] = True base_path = '/snapshots/detail' if details else None return self._list(_snapshot.Snapshot, base_path=base_path, **query) def create_snapshot(self, **attrs): """Create a new snapshot from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.block_storage.v3.snapshot.Snapshot`, comprised of the properties on the Snapshot class. :returns: The results of snapshot creation :rtype: :class:`~openstack.block_storage.v3.snapshot.Snapshot` """ return self._create(_snapshot.Snapshot, **attrs) def update_snapshot(self, snapshot, **attrs): """Update a snapshot :param snapshot: Either the ID of a snapshot or a :class:`~openstack.block_storage.v3.snapshot.Snapshot` instance. :param dict attrs: The attributes to update on the snapshot. :returns: The updated snapshot :rtype: :class:`~openstack.block_storage.v3.snapshot.Snapshot` """ return self._update(_snapshot.Snapshot, snapshot, **attrs) def delete_snapshot(self, snapshot, ignore_missing=True, force=False): """Delete a snapshot :param snapshot: The value can be either the ID of a snapshot or a :class:`~openstack.block_storage.v3.snapshot.Snapshot` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the snapshot does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent snapshot. :param bool force: Whether to try forcing snapshot deletion. :returns: ``None`` """ if not force: self._delete( _snapshot.Snapshot, snapshot, ignore_missing=ignore_missing ) else: snapshot = self._get_resource(_snapshot.Snapshot, snapshot) snapshot.force_delete(self) def get_snapshot_metadata(self, snapshot): """Return a dictionary of metadata for a snapshot :param snapshot: Either the ID of a snapshot or a :class:`~openstack.block_storage.v3.snapshot.Snapshot`. :returns: A :class:`~openstack.block_storage.v3.snapshot.Snapshot` with the snapshot's metadata. All keys and values are Unicode text. :rtype: :class:`~openstack.block_storage.v3.snapshot.Snapshot` """ snapshot = self._get_resource(_snapshot.Snapshot, snapshot) return snapshot.fetch_metadata(self) def set_snapshot_metadata(self, snapshot, **metadata): """Update metadata for a snapshot :param snapshot: Either the ID of a snapshot or a :class:`~openstack.block_storage.v3.snapshot.Snapshot`. :param kwargs metadata: Key/value pairs to be updated in the snapshot's metadata. No other metadata is modified by this call. All keys and values are stored as Unicode. :returns: A :class:`~openstack.block_storage.v3.snapshot.Snapshot` with the snapshot's metadata. All keys and values are Unicode text. :rtype: :class:`~openstack.block_storage.v3.snapshot.Snapshot` """ snapshot = self._get_resource(_snapshot.Snapshot, snapshot) return snapshot.set_metadata(self, metadata=metadata) def delete_snapshot_metadata(self, snapshot, keys=None): """Delete metadata for a snapshot :param snapshot: Either the ID of a snapshot or a :class:`~openstack.block_storage.v3.snapshot.Snapshot`. :param list keys: The keys to delete. If left empty complete metadata will be removed. :rtype: ``None`` """ snapshot = self._get_resource(_snapshot.Snapshot, snapshot) if keys is not None: for key in keys: snapshot.delete_metadata_item(self, key) else: snapshot.delete_metadata(self) # ====== SNAPSHOT ACTIONS ====== def reset_snapshot(self, snapshot, status): """Reset status of the snapshot :param snapshot: The value can be either the ID of a backup or a :class:`~openstack.block_storage.v3.snapshot.Snapshot` instance. :param str status: New snapshot status :returns: None """ snapshot = self._get_resource(_snapshot.Snapshot, snapshot) snapshot.reset(self, status) def set_snapshot_status(self, snapshot, status, progress=None): """Update fields related to the status of a snapshot. :param snapshot: The value can be either the ID of a backup or a :class:`~openstack.block_storage.v3.snapshot.Snapshot` instance. :param str status: New snapshot status :param str progress: A percentage value for snapshot build progress. :returns: None """ snapshot = self._get_resource(_snapshot.Snapshot, snapshot) snapshot.set_status(self, status, progress) def manage_snapshot(self, **attrs): """Creates a snapshot by using existing storage rather than allocating new storage. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.block_storage.v3.snapshot.Snapshot`, comprised of the properties on the Snapshot class. :returns: The results of snapshot creation :rtype: :class:`~openstack.block_storage.v3.snapshot.Snapshot` """ return _snapshot.Snapshot.manage(self, **attrs) def unmanage_snapshot(self, snapshot): """Unmanage a snapshot from block storage provisioning. :param snapshot: Either the ID of a snapshot or a :class:`~openstack.block_storage.v3.snapshot.Snapshot`. :returns: None """ snapshot_obj = self._get_resource(_snapshot.Snapshot, snapshot) snapshot_obj.unmanage(self) # ====== TYPES ====== def get_type(self, type): """Get a single type :param type: The value can be the ID of a type or a :class:`~openstack.block_storage.v3.type.Type` instance. :returns: One :class:`~openstack.block_storage.v3.type.Type` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_type.Type, type) def find_type(self, name_or_id, ignore_missing=True): """Find a single volume type :param snapshot: The name or ID a volume type :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the volume type does not exist. :returns: One :class:`~openstack.block_storage.v3.type.Type` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ return self._find( _type.Type, name_or_id, ignore_missing=ignore_missing, ) def types(self, **query): """Retrieve a generator of volume types :returns: A generator of volume type objects. """ return self._list(_type.Type, **query) def create_type(self, **attrs): """Create a new type from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.block_storage.v3.type.Type`, comprised of the properties on the Type class. :returns: The results of type creation :rtype: :class:`~openstack.block_storage.v3.type.Type` """ return self._create(_type.Type, **attrs) def delete_type(self, type, ignore_missing=True): """Delete a type :param type: The value can be either the ID of a type or a :class:`~openstack.block_storage.v3.type.Type` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the type does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent type. :returns: ``None`` """ self._delete(_type.Type, type, ignore_missing=ignore_missing) def update_type(self, type, **attrs): """Update a type :param type: The value can be either the ID of a type or a :class:`~openstack.block_storage.v3.type.Type` instance. :param dict attrs: The attributes to update on the type :returns: The updated type :rtype: :class:`~openstack.block_storage.v3.type.Type` """ return self._update(_type.Type, type, **attrs) def update_type_extra_specs(self, type, **attrs): """Update the extra_specs for a type :param type: The value can be either the ID of a type or a :class:`~openstack.block_storage.v3.type.Type` instance. :param dict attrs: The extra spec attributes to update on the type :returns: A dict containing updated extra_specs """ res = self._get_resource(_type.Type, type) extra_specs = res.set_extra_specs(self, **attrs) result = _type.Type.existing(id=res.id, extra_specs=extra_specs) return result def delete_type_extra_specs(self, type, keys): """Delete the extra_specs for a type Note: This method will do a HTTP DELETE request for every key in keys. :param type: The value can be either the ID of a type or a :class:`~openstack.block_storage.v3.type.Type` instance. :param keys: The keys to delete :returns: ``None`` """ res = self._get_resource(_type.Type, type) return res.delete_extra_specs(self, keys) def get_type_access(self, type): """Lists project IDs that have access to private volume type. :param type: The value can be either the ID of a type or a :class:`~openstack.block_storage.v3.type.Type` instance. :returns: List of dictionaries describing projects that have access to the specified type """ res = self._get_resource(_type.Type, type) return res.get_private_access(self) def add_type_access(self, type, project_id): """Adds private volume type access to a project. :param type: The value can be either the ID of a type or a :class:`~openstack.block_storage.v3.type.Type` instance. :param str project_id: The ID of the project. Volume Type access to be added to this project ID. :returns: ``None`` """ res = self._get_resource(_type.Type, type) return res.add_private_access(self, project_id) def remove_type_access(self, type, project_id): """Remove private volume type access from a project. :param type: The value can be either the ID of a type or a :class:`~openstack.block_storage.v3.type.Type` instance. :param str project_id: The ID of the project. Volume Type access to be removed to this project ID. :returns: ``None`` """ res = self._get_resource(_type.Type, type) return res.remove_private_access(self, project_id) def get_type_encryption(self, volume_type_id): """Get the encryption details of a volume type :param volume_type_id: The value can be the ID of a type or a :class:`~openstack.block_storage.v3.type.Type` instance. :returns: One :class:`~openstack.block_storage.v3.type.TypeEncryption` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ volume_type = self._get_resource(_type.Type, volume_type_id) return self._get( _type.TypeEncryption, volume_type_id=volume_type.id, requires_id=False, ) def create_type_encryption(self, volume_type, **attrs): """Create new type encryption from attributes :param volume_type: The value can be the ID of a type or a :class:`~openstack.block_storage.v3.type.Type` instance. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.block_storage.v3.type.TypeEncryption`, comprised of the properties on the TypeEncryption class. :returns: The results of type encryption creation :rtype: :class:`~openstack.block_storage.v3.type.TypeEncryption` """ volume_type = self._get_resource(_type.Type, volume_type) return self._create( _type.TypeEncryption, volume_type_id=volume_type.id, **attrs ) def delete_type_encryption( self, encryption=None, volume_type=None, ignore_missing=True ): """Delete type encryption attributes :param encryption: The value can be None or a :class:`~openstack.block_storage.v3.type.TypeEncryption` instance. If encryption_id is None then volume_type_id must be specified. :param volume_type: The value can be the ID of a type or a :class:`~openstack.block_storage.v3.type.Type` instance. Required if encryption_id is None. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the type does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent type. :returns: ``None`` """ if volume_type: volume_type = self._get_resource(_type.Type, volume_type) encryption = self._get( _type.TypeEncryption, volume_type_id=volume_type.id, requires_id=False, ) self._delete( _type.TypeEncryption, encryption, ignore_missing=ignore_missing ) def update_type_encryption( self, encryption=None, volume_type=None, **attrs, ): """Update a type :param encryption: The value can be None or a :class:`~openstack.block_storage.v3.type.TypeEncryption` instance. If this is ``None`` then ``volume_type_id`` must be specified. :param volume_type: The value can be the ID of a type or a :class:`~openstack.block_storage.v3.type.Type` instance. Required if ``encryption_id`` is None. :param dict attrs: The attributes to update on the type encryption. :returns: The updated type encryption :rtype: :class:`~openstack.block_storage.v3.type.TypeEncryption` """ if volume_type: volume_type = self._get_resource(_type.Type, volume_type) encryption = self._get( _type.TypeEncryption, volume_type_id=volume_type.id, requires_id=False, ) return self._update(_type.TypeEncryption, encryption, **attrs) # ====== VOLUMES ====== def get_volume(self, volume): """Get a single volume :param volume: The value can be the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :returns: One :class:`~openstack.block_storage.v3.volume.Volume` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_volume.Volume, volume) def find_volume( self, name_or_id, ignore_missing=True, *, details=True, all_projects=False, ): """Find a single volume :param snapshot: The name or ID a volume :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the volume does not exist. :param bool details: When set to ``False`` no extended attributes will be returned. The default, ``True``, will cause objects with additional attributes to be returned. :param bool all_projects: When set to ``True``, search for volume by name across all projects. Note that this will likely result in a higher chance of duplicates. Admin-only by default. :returns: One :class:`~openstack.block_storage.v3.volume.Volume` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ query = {} if all_projects: query['all_projects'] = True list_base_path = '/volumes/detail' if details else None return self._find( _volume.Volume, name_or_id, ignore_missing=ignore_missing, list_base_path=list_base_path, **query, ) def volumes(self, *, details=True, all_projects=False, **query): """Retrieve a generator of volumes :param bool details: When set to ``False`` no extended attributes will be returned. The default, ``True``, will cause objects with additional attributes to be returned. :param bool all_projects: When set to ``True``, list volumes from all projects. Admin-only by default. :param kwargs query: Optional query parameters to be sent to limit the volumes being returned. Available parameters include: * name: Name of the volume as a string. * status: Value of the status of the volume so that you can filter on "available" for example. :returns: A generator of volume objects. """ if all_projects: query['all_projects'] = True base_path = '/volumes/detail' if details else None return self._list(_volume.Volume, base_path=base_path, **query) def create_volume(self, **attrs): """Create a new volume from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.block_storage.v3.volume.Volume`, comprised of the properties on the Volume class. :returns: The results of volume creation :rtype: :class:`~openstack.block_storage.v3.volume.Volume` """ return self._create(_volume.Volume, **attrs) def delete_volume(self, volume, ignore_missing=True, force=False): """Delete a volume :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the volume does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent volume. :param bool force: Whether to try forcing volume deletion. :returns: ``None`` """ if not force: self._delete(_volume.Volume, volume, ignore_missing=ignore_missing) else: volume = self._get_resource(_volume.Volume, volume) volume.force_delete(self) def update_volume(self, volume, **attrs): """Update a volume :param volume: Either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param dict attrs: The attributes to update on the volume. :returns: The updated volume :rtype: :class:`~openstack.block_storage.v3.volume.Volume` """ return self._update(_volume.Volume, volume, **attrs) def get_volume_metadata(self, volume): """Return a dictionary of metadata for a volume :param volume: Either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume`. :returns: A :class:`~openstack.block_storage.v3.volume.Volume` with the volume's metadata. All keys and values are Unicode text. :rtype: :class:`~openstack.block_storage.v3.volume.Volume` """ volume = self._get_resource(_volume.Volume, volume) return volume.fetch_metadata(self) def set_volume_metadata(self, volume, **metadata): """Update metadata for a volume :param volume: Either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume`. :param kwargs metadata: Key/value pairs to be updated in the volume's metadata. No other metadata is modified by this call. All keys and values are stored as Unicode. :returns: A :class:`~openstack.block_storage.v3.volume.Volume` with the volume's metadata. All keys and values are Unicode text. :rtype: :class:`~openstack.block_storage.v3.volume.Volume` """ volume = self._get_resource(_volume.Volume, volume) return volume.set_metadata(self, metadata=metadata) def delete_volume_metadata(self, volume, keys=None): """Delete metadata for a volume :param volume: Either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume`. :param list keys: The keys to delete. If left empty complete metadata will be removed. :rtype: ``None`` """ volume = self._get_resource(_volume.Volume, volume) if keys is not None: for key in keys: volume.delete_metadata_item(self, key) else: volume.delete_metadata(self) def summary(self, all_projects): """Get Volumes Summary This method returns the volumes summary in the deployment. :param all_projects: Whether to return the summary of all projects or not. :returns: One :class: `~openstack.block_storage.v3.block_storage_summary.Summary` instance. """ res = self._get(_summary.BlockStorageSummary, requires_id=False) return res.fetch( self, requires_id=False, resource_response_key='volume-summary', all_projects=all_projects, ) # ====== VOLUME ACTIONS ====== def extend_volume(self, volume, size): """Extend a volume :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param size: New volume size :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.extend(self, size) def set_volume_readonly(self, volume, readonly=True): """Set a volume's read-only flag. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param bool readonly: Whether the volume should be a read-only volume or not. :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.set_readonly(self, readonly) def retype_volume(self, volume, new_type, migration_policy="never"): """Retype the volume. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param new_type: The new volume type that volume is changed with. The value can be either the ID of the volume type or a :class:`~openstack.block_storage.v3.type.Type` instance. :param str migration_policy: Specify if the volume should be migrated when it is re-typed. Possible values are on-demand or never. Default: never. :returns: None """ volume = self._get_resource(_volume.Volume, volume) type_id = resource.Resource._get_id(new_type) volume.retype(self, type_id, migration_policy) def set_volume_bootable_status(self, volume, bootable): """Set bootable status of the volume. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param bool bootable: Specifies whether the volume should be bootable or not. :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.set_bootable_status(self, bootable) def set_volume_image_metadata(self, volume, **metadata): """Update image metadata for a volume :param volume: Either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume`. :param kwargs metadata: Key/value pairs to be updated in the volume's image metadata. No other metadata is modified by this call. :returns: None """ volume = self._get_resource(_volume.Volume, volume) return volume.set_image_metadata(self, metadata=metadata) def delete_volume_image_metadata(self, volume, keys=None): """Delete metadata for a volume :param volume: Either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume`. :param list keys: The keys to delete. If left empty complete metadata will be removed. :returns: None """ volume = self._get_resource(_volume.Volume, volume) if keys is not None: for key in keys: volume.delete_image_metadata_item(self, key) else: volume.delete_image_metadata(self) def reset_volume_status( self, volume, status=None, attach_status=None, migration_status=None ): """Reset volume statuses. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param str status: The new volume status. :param str attach_status: The new volume attach status. :param str migration_status: The new volume migration status (admin only). :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.reset_status(self, status, attach_status, migration_status) def revert_volume_to_snapshot(self, volume, snapshot): """Revert a volume to its latest snapshot. This method only support reverting a detached volume, and the volume status must be available. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param snapshot: The value can be either the ID of a snapshot or a :class:`~openstack.block_storage.v3.snapshot.Snapshot` instance. :returns: None """ volume = self._get_resource(_volume.Volume, volume) snapshot = self._get_resource(_snapshot.Snapshot, snapshot) volume.revert_to_snapshot(self, snapshot.id) def attach_volume(self, volume, mountpoint, instance=None, host_name=None): """Attaches a volume to a server. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param str mountpoint: The attaching mount point. :param str instance: The UUID of the attaching instance. :param str host_name: The name of the attaching host. :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.attach(self, mountpoint, instance, host_name) def detach_volume(self, volume, attachment, force=False, connector=None): """Detaches a volume from a server. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param str attachment: The ID of the attachment. :param bool force: Whether to force volume detach (Rolls back an unsuccessful detach operation after you disconnect the volume.) :param dict connector: The connector object. :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.detach(self, attachment, force, connector) def manage_volume(self, **attrs): """Creates a volume by using existing storage rather than allocating new storage. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.block_storage.v3.volume.Volume`, comprised of the properties on the Volume class. :returns: The results of volume creation :rtype: :class:`~openstack.block_storage.v3.volume.Volume` """ return _volume.Volume.manage(self, **attrs) def unmanage_volume(self, volume): """Removes a volume from Block Storage management without removing the back-end storage object that is associated with it. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.unmanage(self) def migrate_volume( self, volume, host=None, force_host_copy=False, lock_volume=False, cluster=None, ): """Migrates a volume to the specified host. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param str host: The target host for the volume migration. Host format is host@backend. :param bool force_host_copy: If false (the default), rely on the volume backend driver to perform the migration, which might be optimized. If true, or the volume driver fails to migrate the volume itself, a generic host-based migration is performed. :param bool lock_volume: If true, migrating an available volume will change its status to maintenance preventing other operations from being performed on the volume such as attach, detach, retype, etc. :param str cluster: The target cluster for the volume migration. Cluster format is cluster@backend. Starting with microversion 3.16, either cluster or host must be specified. If host is specified and is part of a cluster, the cluster is used as the target for the migration. :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.migrate(self, host, force_host_copy, lock_volume, cluster) def complete_volume_migration(self, volume, new_volume, error=False): """Complete the migration of a volume. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param str new_volume: The UUID of the new volume. :param bool error: Used to indicate if an error has occured elsewhere that requires clean up. :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.complete_migration(self, new_volume, error) def upload_volume_to_image( self, volume, image_name, force=False, disk_format=None, container_format=None, visibility=None, protected=None, ): """Uploads the specified volume to image service. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param str image name: The name for the new image. :param bool force: Enables or disables upload of a volume that is attached to an instance. :param str disk_format: Disk format for the new image. :param str container_format: Container format for the new image. :param str visibility: The visibility property of the new image. :param str protected: Whether the new image is protected. :returns: dictionary describing the image. """ volume = self._get_resource(_volume.Volume, volume) return volume.upload_to_image( self, image_name, force=force, disk_format=disk_format, container_format=container_format, visibility=visibility, protected=protected, ) def reserve_volume(self, volume): """Mark volume as reserved. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :returns: None""" volume = self._get_resource(_volume.Volume, volume) volume.reserve(self) def unreserve_volume(self, volume): """Unmark volume as reserved. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :returns: None""" volume = self._get_resource(_volume.Volume, volume) volume.unreserve(self) def begin_volume_detaching(self, volume): """Update volume status to 'detaching'. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :returns: None""" volume = self._get_resource(_volume.Volume, volume) volume.begin_detaching(self) def abort_volume_detaching(self, volume): """Update volume status to 'in-use'. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :returns: None""" volume = self._get_resource(_volume.Volume, volume) volume.abort_detaching(self) def init_volume_attachment(self, volume, connector): """Initialize volume attachment. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param dict connector: The connector object. :returns: Dictionary containing the modified connector object""" volume = self._get_resource(_volume.Volume, volume) return volume.init_attachment(self, connector) def terminate_volume_attachment(self, volume, connector): """Update volume status to 'in-use'. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param dict connector: The connector object. :returns: None """ volume = self._get_resource(_volume.Volume, volume) volume.terminate_attachment(self, connector) # ====== ATTACHMENTS ====== def create_attachment(self, volume, **attrs): """Create a new attachment This is an internal API and should only be called by services consuming volume attachments like nova, glance, ironic etc. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.block_storage.v3.attachment.Attachment` comprised of the properties on the Attachment class like connector, instance_id, mode etc. :returns: The results of attachment creation :rtype: :class:`~openstack.block_storage.v3.attachment.Attachment` """ volume_id = resource.Resource._get_id(volume) return self._create( _attachment.Attachment, volume_id=volume_id, **attrs ) def get_attachment(self, attachment): """Get a single volume This is an internal API and should only be called by services consuming volume attachments like nova, glance, ironic etc. :param attachment: The value can be the ID of an attachment or a :class:`~attachment.Attachment` instance. :returns: One :class:`~attachment.Attachment` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_attachment.Attachment, attachment) def attachments(self, **query): """Returns a generator of attachments. This is an internal API and should only be called by services consuming volume attachments like nova, glance, ironic etc. :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of attachment objects. """ return self._list(_attachment.Attachment, **query) def delete_attachment(self, attachment, ignore_missing=True): """Delete an attachment This is an internal API and should only be called by services consuming volume attachments like nova, glance, ironic etc. :param type: The value can be either the ID of a attachment or a :class:`~openstack.block_storage.v3.attachment.Attachment` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the attachment does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent attachment. :returns: ``None`` """ self._delete( _attachment.Attachment, attachment, ignore_missing=ignore_missing, ) def update_attachment(self, attachment, **attrs): """Update an attachment This is an internal API and should only be called by services consuming volume attachments like nova, glance, ironic etc. :param attachment: The value can be the ID of an attachment or a :class:`~openstack.block_storage.v3.attachment.Attachment` instance. :param dict attrs: Keyword arguments which will be used to update a :class:`~openstack.block_storage.v3.attachment.Attachment` comprised of the properties on the Attachment class :returns: The updated attachment :rtype: :class:`~openstack.volume.v3.attachment.Attachment` """ return self._update(_attachment.Attachment, attachment, **attrs) def complete_attachment(self, attachment): """Complete an attachment This is an internal API and should only be called by services consuming volume attachments like nova, glance, ironic etc. :param attachment: The value can be the ID of an attachment or a :class:`~openstack.block_storage.v3.attachment.Attachment` instance. :returns: ``None`` :rtype: :class:`~openstack.volume.v3.attachment.Attachment` """ attachment_obj = self._get_resource(_attachment.Attachment, attachment) return attachment_obj.complete(self) # ====== BACKEND POOLS ====== def backend_pools(self, **query): """Returns a generator of cinder Back-end storage pools :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns A generator of cinder Back-end storage pools objects """ return self._list(_stats.Pools, **query) # ====== BACKUPS ====== def backups(self, *, details=True, **query): """Retrieve a generator of backups :param bool details: When set to ``False`` no additional details will be returned. The default, ``True``, will cause objects with additional attributes to be returned. :param dict query: Optional query parameters to be sent to limit the resources being returned: * offset: pagination marker * limit: pagination limit * sort_key: Sorts by an attribute. A valid value is name, status, container_format, disk_format, size, id, created_at, or updated_at. Default is created_at. The API uses the natural sorting direction of the sort_key attribute value. * sort_dir: Sorts by one or more sets of attribute and sort direction combinations. If you omit the sort direction in a set, default is desc. * project_id: Project ID to query backups for. :returns: A generator of backup objects. """ base_path = '/backups/detail' if details else None return self._list(_backup.Backup, base_path=base_path, **query) def get_backup(self, backup): """Get a backup :param backup: The value can be the ID of a backup or a :class:`~openstack.block_storage.v3.backup.Backup` instance. :returns: Backup instance :rtype: :class:`~openstack.block_storage.v3.backup.Backup` """ return self._get(_backup.Backup, backup) def find_backup(self, name_or_id, ignore_missing=True, *, details=True): """Find a single backup :param snapshot: The name or ID a backup :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the backup does not exist. :param bool details: When set to ``False`` no additional details will be returned. The default, ``True``, will cause objects with additional attributes to be returned. :returns: One :class:`~openstack.block_storage.v3.backup.Backup` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ list_base_path = '/backups/detail' if details else None return self._find( _backup.Backup, name_or_id, ignore_missing=ignore_missing, list_base_path=list_base_path, ) def create_backup(self, **attrs): """Create a new Backup from attributes with native API :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.block_storage.v3.backup.Backup` comprised of the properties on the Backup class. :returns: The results of Backup creation :rtype: :class:`~openstack.block_storage.v3.backup.Backup` """ return self._create(_backup.Backup, **attrs) def delete_backup(self, backup, ignore_missing=True, force=False): """Delete a CloudBackup :param backup: The value can be the ID of a backup or a :class:`~openstack.block_storage.v3.backup.Backup` instance :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the zone does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent zone. :param bool force: Whether to try forcing backup deletion :returns: ``None`` """ if not force: self._delete(_backup.Backup, backup, ignore_missing=ignore_missing) else: backup = self._get_resource(_backup.Backup, backup) backup.force_delete(self) # ====== BACKUP ACTIONS ====== def restore_backup(self, backup, volume_id=None, name=None): """Restore a Backup to volume :param backup: The value can be the ID of a backup or a :class:`~openstack.block_storage.v3.backup.Backup` instance :param volume_id: The ID of the volume to restore the backup to. :param name: The name for new volume creation to restore. :returns: Updated backup instance :rtype: :class:`~openstack.block_storage.v3.backup.Backup` """ backup = self._get_resource(_backup.Backup, backup) return backup.restore(self, volume_id=volume_id, name=name) def reset_backup(self, backup, status): """Reset status of the backup :param backup: The value can be either the ID of a backup or a :class:`~openstack.block_storage.v3.backup.Backup` instance. :param str status: New backup status :returns: None """ backup = self._get_resource(_backup.Backup, backup) backup.reset(self, status) # ====== LIMITS ====== def get_limits(self, project=None): """Retrieves limits :param project: A project to get limits for. The value can be either the ID of a project or an :class:`~openstack.identity.v3.project.Project` instance. :returns: A Limits object, including both :class:`~openstack.block_storage.v3.limits.AbsoluteLimit` and :class:`~openstack.block_storage.v3.limits.RateLimit` :rtype: :class:`~openstack.block_storage.v3.limits.Limits` """ params = {} if project: params['project_id'] = resource.Resource._get_id(project) # we don't use Proxy._get since that doesn't allow passing arbitrary # query string parameters res = self._get_resource(_limits.Limits, None) return res.fetch(self, requires_id=False, **params) # ====== CAPABILITIES ====== def get_capabilities(self, host): """Get a backend's capabilites :param host: Specified backend to obtain volume stats and properties. :returns: One :class: `~openstack.block_storage.v3.capabilites.Capabilities` instance. :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_capabilities.Capabilities, host) # ====== GROUPS ====== def get_group(self, group_id, **attrs): """Get a group :param group_id: The ID of the group to get. :param dict attrs: Optional query parameters to be sent to limit the resources being returned. :returns: A Group instance. :rtype: :class:`~openstack.block_storage.v3.group` """ return self._get(_group.Group, group_id, **attrs) def find_group(self, name_or_id, ignore_missing=True, *, details=True): """Find a single group :param name_or_id: The name or ID of a group. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the group snapshot does not exist. :param bool details: When set to ``False``, no additional details will be returned. The default, ``True``, will cause additional details to be returned. :returns: One :class:`~openstack.block_storage.v3.group.Group` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ list_base_path = '/groups/detail' if details else None return self._find( _group.Group, name_or_id, ignore_missing=ignore_missing, list_base_path=list_base_path, ) def groups(self, *, details=True, **query): """Retrieve a generator of groups :param bool details: When set to ``False``, no additional details will be returned. The default, ``True``, will cause additional details to be returned. :param dict query: Optional query parameters to be sent to limit the resources being returned: * all_tenants: Shows details for all project. * sort: Comma-separated list of sort keys and optional sort directions. * limit: Returns a number of items up to the limit value. * offset: Used in conjunction with limit to return a slice of items. Specifies where to start in the list. * marker: The ID of the last-seen item. * list_volume: Show volume ids in this group. * detailed: If True, will list groups with details. * search_opts: Search options. :returns: A generator of group objects. """ base_path = '/groups/detail' if details else '/groups' return self._list(_group.Group, base_path=base_path, **query) def create_group(self, **attrs): """Create a new group from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.block_storage.v3.group.Group` comprised of the properties on the Group class. :returns: The results of group creation. :rtype: :class:`~openstack.block_storage.v3.group.Group`. """ return self._create(_group.Group, **attrs) def create_group_from_source(self, **attrs): """Creates a new group from source :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.block_storage.v3.group.Group` comprised of the properties on the Group class. :returns: The results of group creation. :rtype: :class:`~openstack.block_storage.v3.group.Group`. """ return _group.Group.create_from_source(self, **attrs) def reset_group_state(self, group, status): """Reset group status :param group: The :class:`~openstack.block_storage.v3.group.Group` to set the state. :param status: The status for a group. :returns: ``None`` """ res = self._get_resource(_group.Group, group) return res.reset_status(self, status) def delete_group(self, group, delete_volumes=False): """Delete a group :param group: The :class:`~openstack.block_storage.v3.group.Group` to delete. :param bool delete_volumes: When set to ``True``, volumes in group will be deleted. :returns: ``None``. """ res = self._get_resource(_group.Group, group) res.delete(self, delete_volumes=delete_volumes) def update_group(self, group, **attrs): """Update a group :param group: The value can be the ID of a group or a :class:`~openstack.block_storage.v3.group.Group` instance. :param dict attrs: The attributes to update on the group. :returns: The updated group :rtype: :class:`~openstack.volume.v3.group.Group` """ return self._update(_group.Group, group, **attrs) # ====== AVAILABILITY ZONES ====== def availability_zones(self): """Return a generator of availability zones :returns: A generator of availability zone :rtype: :class:`~openstack.block_storage.v3.availability_zone.AvailabilityZone` """ return self._list(availability_zone.AvailabilityZone) # ====== GROUP SNAPSHOT ====== def get_group_snapshot(self, group_snapshot_id): """Get a group snapshot :param group_snapshot_id: The ID of the group snapshot to get. :returns: A GroupSnapshot instance. :rtype: :class:`~openstack.block_storage.v3.group_snapshot` """ return self._get(_group_snapshot.GroupSnapshot, group_snapshot_id) def find_group_snapshot( self, name_or_id, ignore_missing=True, *, details=True, ): """Find a single group snapshot :param name_or_id: The name or ID of a group snapshot. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the group snapshot does not exist. :param bool details: When set to ``False``, no additional details will be returned. The default, ``True``, will cause additional details to be returned. :returns: One :class:`~openstack.block_storage.v3.group_snapshot` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ list_base_path = '/group_snapshots/detail' if details else None return self._find( _group_snapshot.GroupSnapshot, name_or_id, ignore_missing=ignore_missing, list_base_path=list_base_path, ) def group_snapshots(self, *, details=True, **query): """Retrieve a generator of group snapshots :param bool details: When ``True``, returns :class:`~openstack.block_storage.v3.group_snapshot.GroupSnapshot` objects with additional attributes filled. :param kwargs query: Optional query parameters to be sent to limit the group snapshots being returned. :returns: A generator of group snapshtos. """ base_path = '/group_snapshots/detail' if details else None return self._list( _group_snapshot.GroupSnapshot, base_path=base_path, **query, ) def create_group_snapshot(self, **attrs): """Create a group snapshot :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.block_storage.v3.group_snapshot.GroupSnapshot` comprised of the properties on the GroupSnapshot class. :returns: The results of group snapshot creation. :rtype: :class:`~openstack.block_storage.v3.group_snapshot`. """ return self._create(_group_snapshot.GroupSnapshot, **attrs) def reset_group_snapshot_state(self, group_snapshot, state): """Reset group snapshot status :param group_snapshot: The :class:`~openstack.block_storage.v3.group_snapshot.GroupSnapshot` to set the state. :param state: The state of the group snapshot to be set. :returns: None """ resource = self._get_resource( _group_snapshot.GroupSnapshot, group_snapshot ) resource.reset_state(self, state) def delete_group_snapshot(self, group_snapshot, ignore_missing=True): """Delete a group snapshot :param group_snapshot: The :class:`~openstack.block_storage.v3. group_snapshot.GroupSnapshot` to delete. :returns: None """ self._delete( _group_snapshot.GroupSnapshot, group_snapshot, ignore_missing=ignore_missing, ) # ====== GROUP TYPE ====== def get_group_type(self, group_type): """Get a specific group type :param group_type: The value can be the ID of a group type or a :class:`~openstack.block_storage.v3.group_type.GroupType` instance. :returns: One :class: `~openstack.block_storage.v3.group_type.GroupType` instance. :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_group_type.GroupType, group_type) def find_group_type(self, name_or_id, ignore_missing=True): """Find a single group type :param name_or_id: The name or ID of a group type. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the group type does not exist. :returns: One :class:`~openstack.block_storage.v3.group_type.GroupType` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ return self._find( _group_type.GroupType, name_or_id, ignore_missing=ignore_missing, ) def group_types(self, **query): """Retrive a generator of group types :param dict query: Optional query parameters to be sent to limit the resources being returned: * sort: Comma-separated list of sort keys and optional sort directions in the form of [:]. A valid direction is asc (ascending) or desc (descending). * limit: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. * offset: Used in conjunction with limit to return a slice of items. Is where to start in the list. * marker: The ID of the last-seen item. :returns: A generator of group type objects. """ return self._list(_group_type.GroupType, **query) def create_group_type(self, **attrs): """Create a group type :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.block_storage.v3.group_type.GroupType` comprised of the properties on the GroupType class. :returns: The results of group type creation. :rtype: :class:`~openstack.block_storage.v3.group_type.GroupTye`. """ return self._create(_group_type.GroupType, **attrs) def delete_group_type(self, group_type, ignore_missing=True): """Delete a group type :param group_type: The value can be the ID of a group type or a :class:`~openstack.block_storage.v3.group_type.GroupType` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the zone does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent zone. :returns: None """ self._delete( _group_type.GroupType, group_type, ignore_missing=ignore_missing ) def update_group_type(self, group_type, **attrs): """Update a group_type :param group_type: The value can be the ID of a group type or a :class:`~openstack.block_storage.v3.group_type.GroupType` instance. :param dict attrs: The attributes to update on the group type. :returns: The updated group type. :rtype: :class:`~openstack.block_storage.v3.group_type.GroupType` """ return self._update(_group_type.GroupType, group_type, **attrs) def fetch_group_type_group_specs(self, group_type): """Lists group specs of a group type. :param group_type: Either the ID of a group type or a :class:`~openstack.block_storage.v3.group_type.GroupType` instance. :returns: One :class:`~openstack.block_storage.v3.group_type.GroupType` """ group_type = self._get_resource(_group_type.GroupType, group_type) return group_type.fetch_group_specs(self) def create_group_type_group_specs(self, group_type, group_specs): """Create group specs for a group type. :param group_type: Either the ID of a group type or a :class:`~openstack.block_storage.v3.group_type.GroupType` instance. :param dict group_specs: dict of extra specs :returns: One :class:`~openstack.block_storage.v3.group_type.GroupType` """ group_type = self._get_resource(_group_type.GroupType, group_type) return group_type.create_group_specs(self, specs=group_specs) def get_group_type_group_specs_property(self, group_type, prop): """Retrieve a group spec property for a group type. :param group_type: Either the ID of a group type or a :class:`~openstack.block_storage.v3.group_type.GroupType` instance. :param str prop: Property name. :returns: String value of the requested property. """ group_type = self._get_resource(_group_type.GroupType, group_type) return group_type.get_group_specs_property(self, prop) def update_group_type_group_specs_property(self, group_type, prop, val): """Update a group spec property for a group type. :param group_type: Either the ID of a group type or a :class:`~openstack.block_storage.v3.group_type.GroupType` instance. :param str prop: Property name. :param str val: Property value. :returns: String value of the requested property. """ group_type = self._get_resource(_group_type.GroupType, group_type) return group_type.update_group_specs_property(self, prop, val) def delete_group_type_group_specs_property(self, group_type, prop): """Delete a group spec property from a group type. :param group_type: Either the ID of a group type or a :class:`~openstack.block_storage.v3.group_type.GroupType` instance. :param str prop: Property name. :returns: None """ group_type = self._get_resource(_group_type.GroupType, group_type) return group_type.delete_group_specs_property(self, prop) # ====== QUOTA CLASS SETS ====== def get_quota_class_set(self, quota_class_set='default'): """Get a single quota class set Only one quota class is permitted, ``default``. :param quota_class_set: The value can be the ID of a quota class set (only ``default`` is supported) or a :class:`~openstack.block_storage.v3.quota_class_set.QuotaClassSet` instance. :returns: One :class:`~openstack.block_storage.v3.quota_class_set.QuotaClassSet` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_quota_class_set.QuotaClassSet, quota_class_set) def update_quota_class_set(self, quota_class_set, **attrs): """Update a QuotaClassSet. Only one quota class is permitted, ``default``. :param quota_class_set: Either the ID of a quota class set (only ``default`` is supported) or a :param attrs: The attributes to update on the QuotaClassSet represented by ``quota_class_set``. :returns: The updated QuotaSet :rtype: :class:`~openstack.block_storage.v3.quota_set.QuotaSet` """ return self._update( _quota_class_set.QuotaClassSet, quota_class_set, **attrs ) # ====== QUOTA SETS ====== def get_quota_set(self, project, usage=False, **query): """Show QuotaSet information for the project :param project: ID or instance of :class:`~openstack.identity.project.Project` of the project for which the quota should be retrieved :param bool usage: When set to ``True`` quota usage and reservations would be filled. :param dict query: Additional query parameters to use. :returns: One :class:`~openstack.block_storage.v3.quota_set.QuotaSet` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ project = self._get_resource(_project.Project, project) res = self._get_resource( _quota_set.QuotaSet, None, project_id=project.id ) return res.fetch(self, usage=usage, **query) def get_quota_set_defaults(self, project): """Show QuotaSet defaults for the project :param project: ID or instance of :class:`~openstack.identity.project.Project` of the project for which the quota should be retrieved :returns: One :class:`~openstack.block_storage.v3.quota_set.QuotaSet` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ project = self._get_resource(_project.Project, project) res = self._get_resource( _quota_set.QuotaSet, None, project_id=project.id ) return res.fetch(self, base_path='/os-quota-sets/defaults') def revert_quota_set(self, project, **query): """Reset Quota for the project/user. :param project: ID or instance of :class:`~openstack.identity.project.Project` of the project for which the quota should be resetted. :param dict query: Additional parameters to be used. :returns: ``None`` """ project = self._get_resource(_project.Project, project) res = self._get_resource( _quota_set.QuotaSet, None, project_id=project.id ) return res.delete(self, **query) def update_quota_set(self, project, **attrs): """Update a QuotaSet. :param project: ID or instance of :class:`~openstack.identity.project.Project` of the project for which the quota should be reset. :param attrs: The attributes to update on the QuotaSet represented by ``quota_set``. :returns: The updated QuotaSet :rtype: :class:`~openstack.block_storage.v3.quota_set.QuotaSet` """ if 'project_id' in attrs or isinstance(project, _quota_set.QuotaSet): warnings.warn( "The signature of 'update_quota_set' has changed and it " "now expects a Project as the first argument, in line " "with the other quota set methods.", os_warnings.RemovedInSDK50Warning, ) if isinstance(project, _quota_set.QuotaSet): attrs['project_id'] = project.project_id # cinder doesn't support any query parameters so we simply pop # these if 'query' in attrs: attrs.pop('params') else: project = self._get_resource(_project.Project, project) attrs['project_id'] = project.id return self._update(_quota_set.QuotaSet, None, **attrs) # ====== SERVICES ====== @ty.overload def find_service( self, name_or_id: str, ignore_missing: ty.Literal[True] = True, **query, ) -> ty.Optional[_service.Service]: ... @ty.overload def find_service( self, name_or_id: str, ignore_missing: ty.Literal[False], **query, ) -> _service.Service: ... # excuse the duplication here: it's mypy's fault # https://github.com/python/mypy/issues/14764 @ty.overload def find_service( self, name_or_id: str, ignore_missing: bool, **query, ) -> ty.Optional[_service.Service]: ... def find_service( self, name_or_id: str, ignore_missing: bool = True, **query, ) -> ty.Optional[_service.Service]: """Find a single service :param name_or_id: The name or ID of a service :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Additional attributes like 'host' :returns: One: class:`~openstack.block_storage.v3.service.Service` or None :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ return self._find( _service.Service, name_or_id, ignore_missing=ignore_missing, **query, ) def services( self, **query: ty.Any, ) -> ty.Generator[_service.Service, None, None]: """Return a generator of service :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of Service objects :rtype: class: `~openstack.block_storage.v3.service.Service` """ return self._list(_service.Service, **query) def enable_service( self, service: ty.Union[str, _service.Service], ) -> _service.Service: """Enable a service :param service: Either the ID of a service or a :class:`~openstack.block_storage.v3.service.Service` instance. :returns: Updated service instance :rtype: class: `~openstack.block_storage.v3.service.Service` """ service_obj = self._get_resource(_service.Service, service) return service_obj.enable(self) def disable_service( self, service: ty.Union[str, _service.Service], *, reason: ty.Optional[str] = None, ) -> _service.Service: """Disable a service :param service: Either the ID of a service or a :class:`~openstack.block_storage.v3.service.Service` instance :param str reason: The reason to disable a service :returns: Updated service instance :rtype: class: `~openstack.block_storage.v3.service.Service` """ service_obj = self._get_resource(_service.Service, service) return service_obj.disable(self, reason=reason) def thaw_service( self, service: ty.Union[str, _service.Service], ) -> _service.Service: """Thaw a service :param service: Either the ID of a service or a :class:`~openstack.block_storage.v3.service.Service` instance :returns: Updated service instance :rtype: class: `~openstack.block_storage.v3.service.Service` """ service_obj = self._get_resource(_service.Service, service) return service_obj.thaw(self) def freeze_service( self, service: ty.Union[str, _service.Service], ) -> _service.Service: """Freeze a service :param service: Either the ID of a service or a :class:`~openstack.block_storage.v3.service.Service` instance :returns: Updated service instance :rtype: class: `~openstack.block_storage.v3.service.Service` """ service_obj = self._get_resource(_service.Service, service) return service_obj.freeze(self) def failover_service( self, service: ty.Union[str, _service.Service], *, cluster: ty.Optional[str] = None, backend_id: ty.Optional[str] = None, ) -> _service.Service: """Failover a service Only applies to replicating cinder-volume services. :param service: Either the ID of a service or a :class:`~openstack.block_storage.v3.service.Service` instance :returns: Updated service instance :rtype: class: `~openstack.block_storage.v3.service.Service` """ service_obj = self._get_resource(_service.Service, service) return service_obj.failover( self, cluster=cluster, backend_id=backend_id ) # ====== RESOURCE FILTERS ====== def resource_filters(self, **query): """Retrieve a generator of resource filters :returns: A generator of resource filters. """ return self._list(_resource_filter.ResourceFilter, **query) # ====== EXTENSIONS ====== def extensions(self): """Return a generator of extensions :returns: A generator of extension :rtype: :class:`~openstack.block_storage.v3.extension.Extension` """ return self._list(_extension.Extension) # ===== TRANFERS ===== def create_transfer(self, **attrs): """Create a new Transfer record :param volume_id: The value is ID of the volume. :param name: The value is name of the transfer :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.block_storage.v3.transfer.Transfer` comprised of the properties on the Transfer class. :returns: The results of Transfer creation :rtype: :class:`~openstack.block_storage.v3.transfer.Transfer` """ return self._create(_transfer.Transfer, **attrs) def delete_transfer(self, transfer, ignore_missing=True): """Delete a volume transfer :param transfer: The value can be either the ID of a transfer or a :class:`~openstack.block_storage.v3.transfer.Transfer`` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the transfer does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent transfer. :returns: ``None`` """ self._delete( _transfer.Transfer, transfer, ignore_missing=ignore_missing, ) def find_transfer(self, name_or_id, ignore_missing=True): """Find a single transfer :param name_or_id: The name or ID a transfer :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the volume transfer does not exist. :returns: One :class:`~openstack.block_storage.v3.transfer.Transfer` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ return self._find( _transfer.Transfer, name_or_id, ignore_missing=ignore_missing, ) def get_transfer(self, transfer): """Get a single transfer :param transfer: The value can be the ID of a transfer or a :class:`~openstack.block_storage.v3.transfer.Transfer` instance. :returns: One :class:`~openstack.block_storage.v3.transfer.Transfer` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_transfer.Transfer, transfer) def transfers(self, *, details=True, all_projects=False, **query): """Retrieve a generator of transfers :param bool details: When set to ``False`` no extended attributes will be returned. The default, ``True``, will cause objects with additional attributes to be returned. :param bool all_projects: When set to ``True``, list transfers from all projects. Admin-only by default. :param kwargs query: Optional query parameters to be sent to limit the transfers being returned. :returns: A generator of transfer objects. """ if all_projects: query['all_projects'] = True base_path = '/volume-transfers' if not utils.supports_microversion(self, '3.55'): base_path = '/os-volume-transfer' if details: base_path = utils.urljoin(base_path, 'detail') return self._list(_transfer.Transfer, base_path=base_path, **query) def accept_transfer(self, transfer_id, auth_key): """Accept a Transfer :param transfer_id: The value can be the ID of a transfer or a :class:`~openstack.block_storage.v3.transfer.Transfer` instance. :param auth_key: The key to authenticate volume transfer. :returns: The results of Transfer creation :rtype: :class:`~openstack.block_storage.v3.transfer.Transfer` """ transfer = self._get_resource(_transfer.Transfer, transfer_id) return transfer.accept(self, auth_key=auth_key) # ====== UTILS ====== def wait_for_status( self, res, status='available', failures=None, interval=2, wait=120, callback=None, ): """Wait for a resource to be in a particular status. :param res: The resource to wait on to reach the specified status. The resource must have a ``status`` attribute. :type resource: A :class:`~openstack.resource.Resource` object. :param str status: Desired status. :param list failures: Statuses that would be interpreted as failures. :param interval: Number of seconds to wait before to consecutive checks. Default to 2. :param wait: Maximum number of seconds to wait before the change. Default to 120. :param callback: A callback function. This will be called with a single value, progress. :returns: The resource is returned on success. :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition to the desired status failed to occur in specified seconds. :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource has transited to one of the failure statuses. :raises: :class:`~AttributeError` if the resource does not have a ``status`` attribute. """ failures = ['error'] if failures is None else failures return resource.wait_for_status( self, res, status, failures, interval, wait, callback=callback, ) def wait_for_delete(self, res, interval=2, wait=120, callback=None): """Wait for a resource to be deleted. :param res: The resource to wait on to be deleted. :type resource: A :class:`~openstack.resource.Resource` object. :param int interval: Number of seconds to wait before two consecutive checks. Default to 2. :param int wait: Maximum number of seconds to wait before the change. Default to 120. :param callback: A callback function. This will be called with a single value, progress. :returns: The resource is returned on success. :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition to delete failed to occur in the specified seconds. """ return resource.wait_for_delete( self, res, interval, wait, callback=callback, ) def _get_cleanup_dependencies(self): return {'block_storage': {'before': []}} def _service_cleanup( self, dry_run=True, client_status_queue=None, identified_resources=None, filters=None, resource_evaluation_fn=None, skip_resources=None, ): # It is not possible to delete backup if there are dependent backups. # In order to be able to do cleanup those is required to have multiple # iterations (first clean up backups with has no dependent backups, and # in next iterations there should be no backups with dependencies # remaining. Logically we can have also failures, therefore it is # required to limit amount of iterations we do (currently pick 10). In # dry_run all those iterations are doing not what we want, therefore # only iterate in a real cleanup mode. if not self.should_skip_resource_cleanup("backup", skip_resources): if dry_run: # Just iterate and evaluate backups in dry_run mode for obj in self.backups(details=False): need_delete = self._service_cleanup_del_res( self.delete_backup, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) else: # Set initial iterations conditions need_backup_iteration = True max_iterations = 10 while need_backup_iteration and max_iterations > 0: # Reset iteration controls need_backup_iteration = False max_iterations -= 1 backups = [] # To increase success chance sort backups by age, dependent # backups are logically younger. for obj in self.backups( details=True, sort_key='created_at', sort_dir='desc' ): if not obj.has_dependent_backups: # If no dependent backups - go with it need_delete = self._service_cleanup_del_res( self.delete_backup, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) if not dry_run and need_delete: backups.append(obj) else: # Otherwise we need another iteration need_backup_iteration = True # Before proceeding need to wait for backups to be deleted for obj in backups: try: self.wait_for_delete(obj) except exceptions.SDKException: # Well, did our best, still try further pass if not self.should_skip_resource_cleanup("snapshot", skip_resources): snapshots = [] for obj in self.snapshots(details=False): need_delete = self._service_cleanup_del_res( self.delete_snapshot, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) if not dry_run and need_delete: snapshots.append(obj) # Before deleting volumes need to wait for snapshots to be deleted for obj in snapshots: try: self.wait_for_delete(obj) except exceptions.SDKException: # Well, did our best, still try further pass if not self.should_skip_resource_cleanup("volume", skip_resources): for obj in self.volumes(details=True): self._service_cleanup_del_res( self.delete_volume, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/attachment.py0000664000175000017500000000654600000000000024101 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from openstack import exceptions from openstack import resource from openstack import utils class Attachment(resource.Resource): resource_key = "attachment" resources_key = "attachments" base_path = "/attachments" # capabilities allow_create = True allow_delete = True allow_commit = True allow_list = True allow_get = True allow_fetch = True _max_microversion = "3.54" # Properties #: The ID of the attachment. id = resource.Body("id") #: The status of the attachment. status = resource.Body("status") #: The UUID of the attaching instance. instance = resource.Body("instance") #: The UUID of the volume which the attachment belongs to. volume_id = resource.Body("volume_id") #: The time when attachment is attached. attached_at = resource.Body("attach_time") #: The time when attachment is detached. detached_at = resource.Body("detach_time") #: The attach mode of attachment, read-only ('ro') or read-and-write # ('rw'), default is 'rw'. attach_mode = resource.Body("mode") #: The connection info used for server to connect the volume. connection_info = resource.Body("connection_info") #: The connector object. connector = resource.Body("connector") def create( self, session, prepend_key=True, base_path=None, *, resource_request_key=None, resource_response_key=None, microversion=None, **params, ): if utils.supports_microversion(session, '3.54'): if not self.attach_mode: self._body.clean(only={'mode'}) return super().create( session, prepend_key=prepend_key, base_path=base_path, resource_request_key=resource_request_key, resource_response_key=resource_response_key, microversion=microversion, **params, ) def complete(self, session, *, microversion=None): """Mark the attachment as completed.""" body = {'os-complete': self.id} if not microversion: microversion = self._get_microversion(session, action='commit') url = os.path.join(Attachment.base_path, self.id, 'action') response = session.post(url, json=body, microversion=microversion) exceptions.raise_from_response(response) def _prepare_request_body( self, patch, prepend_key, *, resource_request_key=None, ): body = self._body.dirty if body.get('volume_id'): body['volume_uuid'] = body.pop('volume_id') if body.get('instance'): body['instance_uuid'] = body.pop('instance') if prepend_key and self.resource_key is not None: body = {self.resource_key: body} return body ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/availability_zone.py0000664000175000017500000000172200000000000025445 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class AvailabilityZone(resource.Resource): resource_key = "" resources_key = "availabilityZoneInfo" base_path = "/os-availability-zone" # capabilities allow_list = True #: Properties #: Name of availability zone name = resource.Body("zoneName", type=str) #: State of availability zone, "available" is usual key state = resource.Body("zoneState", type=dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/backup.py0000664000175000017500000002055400000000000023211 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty from openstack import exceptions from openstack import resource from openstack import utils class Backup(resource.Resource): """Volume Backup""" resource_key = "backup" resources_key = "backups" base_path = "/backups" # TODO(gtema): Starting from ~3.31(3.45) Cinder seems to support also fuzzy # search (name~, status~, volume_id~). But this is not documented # officially and seem to require microversion be set _query_mapping = resource.QueryParameters( "limit", "marker", "offset", "project_id", "name", "status", "volume_id", "sort_key", "sort_dir", "sort", all_projects="all_tenants", ) # capabilities allow_fetch = True allow_create = True allow_delete = True allow_list = True allow_get = True #: Properties #: backup availability zone availability_zone = resource.Body("availability_zone") #: The container backup in container = resource.Body("container") #: The date and time when the resource was created. created_at = resource.Body("created_at") #: data timestamp #: The time when the data on the volume was first saved. #: If it is a backup from volume, it will be the same as created_at #: for a backup. If it is a backup from a snapshot, #: it will be the same as created_at for the snapshot. data_timestamp = resource.Body('data_timestamp') #: backup description description = resource.Body("description") #: The UUID of the encryption key. Only included for encrypted volumes. encryption_key_id = resource.Body("encryption_key_id") #: Backup fail reason fail_reason = resource.Body("fail_reason") #: Force backup force = resource.Body("force", type=bool) #: has_dependent_backups #: If this value is true, there are other backups depending on this backup. has_dependent_backups = resource.Body('has_dependent_backups', type=bool) #: Indicates whether the backup mode is incremental. #: If this value is true, the backup mode is incremental. #: If this value is false, the backup mode is full. is_incremental = resource.Body("is_incremental", type=bool) #: A list of links associated with this volume. *Type: list* links = resource.Body("links", type=list) #: The backup metadata. New in version 3.43 metadata = resource.Body('metadata', type=dict) #: backup name name = resource.Body("name") #: backup object count object_count = resource.Body("object_count", type=int) #: The UUID of the owning project. #: New in version 3.18 project_id = resource.Body('os-backup-project-attr:project_id') #: The size of the volume, in gibibytes (GiB). size = resource.Body("size", type=int) #: The UUID of the source volume snapshot. snapshot_id = resource.Body("snapshot_id") #: backup status #: values: creating, available, deleting, error, restoring, error_restoring status = resource.Body("status") #: The date and time when the resource was updated. updated_at = resource.Body("updated_at") #: The UUID of the project owner. New in 3.56 user_id = resource.Body('user_id') #: The UUID of the volume. volume_id = resource.Body("volume_id") _max_microversion = "3.64" def create(self, session, prepend_key=True, base_path=None, **params): """Create a remote resource based on this instance. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param prepend_key: A boolean indicating whether the resource_key should be prepended in a resource creation request. Default to True. :param str base_path: Base part of the URI for creating resources, if different from :data:`~openstack.resource.Resource.base_path`. :param dict params: Additional params to pass. :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_create` is not set to ``True``. """ if not self.allow_create: raise exceptions.MethodNotSupported(self, "create") session = self._get_session(session) microversion = self._get_microversion(session, action='create') requires_id = ( self.create_requires_id if self.create_requires_id is not None else self.create_method == 'PUT' ) if self.create_exclude_id_from_body: self._body._dirty.discard("id") if self.create_method == 'POST': request = self._prepare_request( requires_id=requires_id, prepend_key=prepend_key, base_path=base_path, ) # NOTE(gtema) this is a funny example of when attribute # is called "incremental" on create, "is_incremental" on get # and use of "alias" or "aka" is not working for such conflict, # since our preferred attr name is exactly "is_incremental" body = request.body if 'is_incremental' in body['backup']: body['backup']['incremental'] = body['backup'].pop( 'is_incremental' ) response = session.post( request.url, json=request.body, headers=request.headers, microversion=microversion, params=params, ) else: # Just for safety of the implementation (since PUT removed) raise exceptions.ResourceFailure( "Invalid create method: %s" % self.create_method ) has_body = ( self.has_body if self.create_returns_body is None else self.create_returns_body ) self.microversion = microversion self._translate_response(response, has_body=has_body) # direct comparision to False since we need to rule out None if self.has_body and self.create_returns_body is False: # fetch the body if it's required but not returned by create return self.fetch(session) return self def _action(self, session, body, microversion=None): """Preform backup actions given the message body.""" url = utils.urljoin(self.base_path, self.id, 'action') resp = session.post( url, json=body, microversion=self._max_microversion ) exceptions.raise_from_response(resp) return resp def restore(self, session, volume_id=None, name=None): """Restore current backup to volume :param session: openstack session :param volume_id: The ID of the volume to restore the backup to. :param name: The name for new volume creation to restore. :return: Updated backup instance """ url = utils.urljoin(self.base_path, self.id, "restore") body: ty.Dict[str, ty.Dict] = {'restore': {}} if volume_id: body['restore']['volume_id'] = volume_id if name: body['restore']['name'] = name if not (volume_id or name): raise exceptions.SDKException( 'Either of `name` or `volume_id` must be specified.' ) response = session.post(url, json=body) self._translate_response(response, has_body=False) return self def force_delete(self, session): """Force backup deletion""" body = {'os-force_delete': None} self._action(session, body) def reset(self, session, status): """Reset the status of the backup""" body = {'os-reset_status': {'status': status}} self._action(session, body) BackupDetail = Backup ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/block_storage_summary.py0000664000175000017500000000174400000000000026337 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class BlockStorageSummary(resource.Resource): base_path = "/volumes/summary" # capabilities allow_fetch = True # Properties #: Total size of all the volumes total_size = resource.Body("total_size") #: Total count of all the volumes total_count = resource.Body("total_count") #: Metadata of all the volumes metadata = resource.Body("metadata") _max_microversion = "3.36" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/capabilities.py0000664000175000017500000000351000000000000024366 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Capabilities(resource.Resource): base_path = "/capabilities" # Capabilities allow_fetch = True #: Properties #: The capabilities description description = resource.Body("description") #: The name of volume backend capabilities. display_name = resource.Body("display_name") #: The driver version. driver_version = resource.Body("driver_version") #: The storage namespace, such as OS::Storage::Capabilities::foo. namespace = resource.Body("namespace") #: The name of the storage pool. pool_name = resource.Body("pool_name") #: The backend volume capabilites list, which consists of cinder #: standard capabilities and vendor unique properties. properties = resource.Body("properties", type=dict) #: A list of volume backends used to replicate volumes on this backend. replication_targets = resource.Body("replication_targets", type=list) #: The storage backend for the backend volume. storage_protocol = resource.Body("storage_protocol") #: The name of the vendor. vendor_name = resource.Body("vendor_name") #: The volume type access. visibility = resource.Body("visibility") #: The name of the back-end volume. volume_backend_name = resource.Body("volume_backend_name") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/extension.py0000664000175000017500000000231300000000000023751 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Extension(resource.Resource): resources_key = "extensions" base_path = "/extensions" # Capabilities allow_list = True #: Properties #: The alias for the extension. alias = resource.Body('alias', type=str) #: The extension description. description = resource.Body('description', type=str) #: Links pertaining to this extension. links = resource.Body('links', type=list) #: The name of this extension. name = resource.Body('name') #: The date and time when the resource was updated. #: The date and time stamp format is ISO 8601. updated_at = resource.Body('updated', type=str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/group.py0000664000175000017500000000642000000000000023074 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class Group(resource.Resource): resource_key = "group" resources_key = "groups" base_path = "/groups" # capabilities allow_fetch = True allow_create = True allow_delete = True allow_commit = True allow_list = True _query_mapping = resource.QueryParameters( "limit", "marker", "offset", "sort_dir", "sort_key", "sort", all_projects="all_tenants", ) availability_zone = resource.Body("availability_zone") created_at = resource.Body("created_at") description = resource.Body("description") group_snapshot_id = resource.Body("group_snapshot_id") group_type = resource.Body("group_type") project_id = resource.Body("project_id") replication_status = resource.Body("replication_status") source_group_id = resource.Body("source_group_id") status = resource.Body("status") volumes = resource.Body("volumes", type=list) volume_types = resource.Body("volume_types", type=list) _max_microversion = "3.38" def _action(self, session, body): """Preform group actions given the message body.""" session = self._get_session(session) microversion = self._get_microversion(session, action='create') url = utils.urljoin(self.base_path, self.id, 'action') response = session.post(url, json=body, microversion=microversion) exceptions.raise_from_response(response) return response def delete(self, session, *, delete_volumes=False): """Delete a group.""" body = {'delete': {'delete-volumes': delete_volumes}} self._action(session, body) def reset(self, session, status): """Resets the status for a group.""" body = {'reset_status': {'status': status}} self._action(session, body) @classmethod def create_from_source( cls, session, group_snapshot_id, source_group_id, name=None, description=None, ): """Creates a new group from source.""" session = cls._get_session(session) microversion = cls._get_microversion(session, action='create') url = utils.urljoin(cls.base_path, 'action') body = { 'create-from-src': { 'name': name, 'description': description, 'group_snapshot_id': group_snapshot_id, 'source_group_id': source_group_id, } } response = session.post(url, json=body, microversion=microversion) exceptions.raise_from_response(response) group = Group() group._translate_response(response=response) return group ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/group_snapshot.py0000664000175000017500000000555000000000000025016 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class GroupSnapshot(resource.Resource): resource_key = "group_snapshot" resources_key = "group_snapshots" base_path = "/group_snapshots" # capabilities allow_fetch = True allow_create = True allow_delete = True allow_commit = False allow_list = True _query_mapping = resource.QueryParameters( "limit", "marker", "offset", "sort_dir", "sort_key", "sort", all_projects="all_tenants", ) #: Properties #: The date and time when the resource was created. created_at = resource.Body("created_at") #: The group snapshot description. description = resource.Body("description") #: The UUID of the source group. group_id = resource.Body("group_id") #: The group type ID. group_type_id = resource.Body("group_type_id") #: The ID of the group snapshot. id = resource.Body("id") #: The group snapshot name. name = resource.Body("name") #: The UUID of the volume group snapshot project. project_id = resource.Body("project_id") #: The status of the generic group snapshot. status = resource.Body("status") # Pagination support was added in microversion 3.29 _max_microversion = '3.29' def _action(self, session, body, microversion=None): """Preform aggregate actions given the message body.""" url = utils.urljoin(self.base_path, self.id, 'action') headers = {'Accept': ''} # TODO(stephenfin): This logic belongs in openstack.resource I suspect if microversion is None: if session.default_microversion: microversion = session.default_microversion else: microversion = utils.maximum_supported_microversion( session, self._max_microversion, ) response = session.post( url, json=body, headers=headers, microversion=microversion, ) exceptions.raise_from_response(response) return response def reset_state(self, session, state): """Resets the status for a group snapshot.""" body = {'reset_status': {'status': state}} return self._action(session, body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/group_type.py0000664000175000017500000001176500000000000024145 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class GroupType(resource.Resource): resource_key = "group_type" resources_key = "group_types" base_path = "/group_types" # capabilities allow_fetch = True allow_create = True allow_delete = True allow_commit = True allow_list = True _max_microversion = "3.11" #: Properties #: The group type description. description = resource.Body("description") #: Contains the specifications for a group type. group_specs = resource.Body("group_specs", type=dict, default={}) #: Whether the group type is publicly visible. is_public = resource.Body("is_public", type=bool) def fetch_group_specs(self, session): """Fetch group_specs of the group type. These are returned by default if the user has suitable permissions (i.e. you're an admin) but by default you also need the same permissions to access this API. That means this function is kind of useless. However, that is how the API was designed and it is theoretically possible that people will have modified their policy to allow this but not the other so we provide this anyway. :param session: The session to use for making this request. :returns: An updated version of this object. """ url = utils.urljoin(GroupType.base_path, self.id, 'group_specs') microversion = self._get_microversion(session, action='fetch') response = session.get(url, microversion=microversion) exceptions.raise_from_response(response) specs = response.json().get('group_specs', {}) self._update(group_specs=specs) return self def create_group_specs(self, session, specs): """Creates group specs for the group type. This will override whatever specs are already present on the group type. :param session: The session to use for making this request. :param specs: A dict of group specs to set on the group type. :returns: An updated version of this object. """ url = utils.urljoin(GroupType.base_path, self.id, 'group_specs') microversion = self._get_microversion(session, action='create') response = session.post( url, json={'group_specs': specs}, microversion=microversion, ) exceptions.raise_from_response(response) specs = response.json().get('group_specs', {}) self._update(group_specs=specs) return self def get_group_specs_property(self, session, prop): """Retrieve a group spec property of the group type. :param session: The session to use for making this request. :param prop: The name of the group spec property to update. :returns: The value of the group spec property. """ url = utils.urljoin(GroupType.base_path, self.id, 'group_specs', prop) microversion = self._get_microversion(session, action='fetch') response = session.get(url, microversion=microversion) exceptions.raise_from_response(response) val = response.json().get(prop) return val def update_group_specs_property(self, session, prop, val): """Update a group spec property of the group type. :param session: The session to use for making this request. :param prop: The name of the group spec property to update. :param val: The value to set for the group spec property. :returns: The updated value of the group spec property. """ url = utils.urljoin(GroupType.base_path, self.id, 'group_specs', prop) microversion = self._get_microversion(session, action='commit') response = session.put( url, json={prop: val}, microversion=microversion ) exceptions.raise_from_response(response) val = response.json().get(prop) return val def delete_group_specs_property(self, session, prop): """Delete a group spec property from the group type. :param session: The session to use for making this request. :param prop: The name of the group spec property to delete. :returns: None """ url = utils.urljoin(GroupType.base_path, self.id, 'group_specs', prop) microversion = self._get_microversion(session, action='delete') response = session.delete(url, microversion=microversion) exceptions.raise_from_response(response) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/limits.py0000664000175000017500000000643600000000000023250 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class AbsoluteLimit(resource.Resource): # Properties #: The maximum total amount of backups, in gibibytes (GiB). max_total_backup_gigabytes = resource.Body( "maxTotalBackupGigabytes", type=int ) #: The maximum number of backups. max_total_backups = resource.Body("maxTotalBackups", type=int) #: The maximum number of snapshots. max_total_snapshots = resource.Body("maxTotalSnapshots", type=int) #: The maximum total amount of volumes, in gibibytes (GiB). max_total_volume_gigabytes = resource.Body( "maxTotalVolumeGigabytes", type=int ) #: The maximum number of volumes. max_total_volumes = resource.Body("maxTotalVolumes", type=int) #: The total number of backups gibibytes (GiB) used. total_backup_gigabytes_used = resource.Body( "totalBackupGigabytesUsed", type=int ) #: The total number of backups used. total_backups_used = resource.Body("totalBackupsUsed", type=int) #: The total number of gibibytes (GiB) used. total_gigabytes_used = resource.Body("totalGigabytesUsed", type=int) #: The total number of snapshots used. total_snapshots_used = resource.Body("totalSnapshotsUsed", type=int) #: The total number of volumes used. total_volumes_used = resource.Body("totalVolumesUsed", type=int) class RateLimit(resource.Resource): # Properties #: Rate limits next availabe time. next_available = resource.Body("next-available") #: Integer for rate limits remaining. remaining = resource.Body("remaining", type=int) #: Unit of measurement for the value parameter. unit = resource.Body("unit") #: Integer number of requests which can be made. value = resource.Body("value", type=int) #: An HTTP verb (POST, PUT, etc.). verb = resource.Body("verb") class RateLimits(resource.Resource): # Properties #: A list of the specific limits that apply to the ``regex`` and ``uri``. limits = resource.Body("limit", type=list, list_type=RateLimit) #: A regex representing which routes this rate limit applies to. regex = resource.Body("regex") #: A URI representing which routes this rate limit applies to. uri = resource.Body("uri") class Limits(resource.Resource): resource_key = "limits" base_path = "/limits" _max_microversion = "3.39" _query_mapping = resource.QueryParameters( "project_id", ) # capabilities allow_fetch = True # Properties #: An absolute limits object. absolute = resource.Body("absolute", type=AbsoluteLimit) #: Rate-limit volume copy bandwidth, used to mitigate #: slow down of data access from the instances. rate = resource.Body("rate", type=list, list_type=RateLimits) # Legacy alias Limit = Limits ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/quota_class_set.py0000664000175000017500000000317600000000000025136 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class QuotaClassSet(resource.Resource): resource_key = 'quota_class_set' base_path = '/os-quota-class-sets' # Capabilities allow_fetch = True allow_commit = True # Properties #: The size (GB) of backups that are allowed for each project. backup_gigabytes = resource.Body('backup_gigabytes', type=int) #: The number of backups that are allowed for each project. backups = resource.Body('backups', type=int) #: The size (GB) of volumes and snapshots that are allowed for each #: project. gigabytes = resource.Body('gigabytes', type=int) #: The number of groups that are allowed for each project. groups = resource.Body('groups', type=int) #: The size (GB) of volumes in request that are allowed for each volume. per_volume_gigabytes = resource.Body('per_volume_gigabytes', type=int) #: The number of snapshots that are allowed for each project. snapshots = resource.Body('snapshots', type=int) #: The number of volumes that are allowed for each project. volumes = resource.Body('volumes', type=int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/quota_set.py0000664000175000017500000000302100000000000023736 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import quota_set from openstack import resource class QuotaSet(quota_set.QuotaSet): #: Properties #: The size (GB) of backups that are allowed for each project. backup_gigabytes = resource.Body('backup_gigabytes', type=int) #: The number of backups that are allowed for each project. backups = resource.Body('backups', type=int) #: The size (GB) of volumes and snapshots that are allowed for each #: project. gigabytes = resource.Body('gigabytes', type=int) #: The number of groups that are allowed for each project. groups = resource.Body('groups', type=int) #: The size (GB) of volumes in request that are allowed for each volume. per_volume_gigabytes = resource.Body('per_volume_gigabytes', type=int) #: The number of snapshots that are allowed for each project. snapshots = resource.Body('snapshots', type=int) #: The number of volumes that are allowed for each project. volumes = resource.Body('volumes', type=int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/resource_filter.py0000664000175000017500000000226600000000000025140 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ResourceFilter(resource.Resource): """Resource Filter""" resources_key = "resource_filters" base_path = "/resource_filters" _query_mapping = resource.QueryParameters( 'resource', include_pagination_defaults=False, ) # Capabilities allow_list = True # resource_filters introduced in 3.33 _max_microversion = '3.33' #: Properties #: The list of filters that are applicable to the specified resource. filters = resource.Body('filters', type=list) #: The resource that the filters will be applied to. resource = resource.Body('resource', type=str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/service.py0000664000175000017500000001237300000000000023404 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class Service(resource.Resource): resources_key = 'services' base_path = '/os-services' # capabilities allow_list = True _query_mapping = resource.QueryParameters( 'binary', 'host', ) # Properties #: The ID of active storage backend (cinder-volume services only) active_backend_id = resource.Body('active_backend_id') #: The availability zone of service availability_zone = resource.Body('zone') #: The state of storage backend (cinder-volume services only) backend_state = resource.Body('backend_state') #: Binary name of service binary = resource.Body('binary') #: The cluster name (since 3.7) cluster = resource.Body('cluster') #: Disabled reason of service disabled_reason = resource.Body('disabled_reason') #: The name of the host where service runs host = resource.Body('host') # Whether the host is frozen or not (cinder-volume services only) is_frozen = resource.Body('frozen') #: Service name name = resource.Body('name', alias='binary') #: The volume service replication status (cinder-volume services only) replication_status = resource.Body('replication_status') #: State of service state = resource.Body('state') #: Status of service status = resource.Body('status') #: The date and time when the resource was updated updated_at = resource.Body('updated_at') # 3.7 introduced the 'cluster' field _max_microversion = '3.7' @classmethod def find(cls, session, name_or_id, ignore_missing=True, **params): # No direct request possible, thus go directly to list data = cls.list(session, **params) result = None for maybe_result in data: # Since ID might be both int and str force cast id_value = str(cls._get_id(maybe_result)) name_value = maybe_result.name if str(name_or_id) in (id_value, name_value): if 'host' in params and maybe_result['host'] != params['host']: continue # Only allow one resource to be found. If we already # found a match, raise an exception to show it. if result is None: result = maybe_result else: msg = "More than one %s exists with the name '%s'." msg = msg % (cls.__name__, name_or_id) raise exceptions.DuplicateResource(msg) if result is not None: return result if ignore_missing: return None raise exceptions.NotFoundException( f"No {cls.__name__} found for {name_or_id}" ) def commit(self, session, prepend_key=False, **kwargs): # we need to set prepend_key to false return super().commit( session, prepend_key=prepend_key, **kwargs, ) def _action(self, session, action, body, microversion=None): if not microversion: microversion = session.default_microversion url = utils.urljoin(Service.base_path, action) response = session.put(url, json=body, microversion=microversion) self._translate_response(response) return self # TODO(stephenfin): Add support for log levels once we have the resource # modelled (it can be done on a deployment wide basis) def enable(self, session): """Enable service.""" body = {'binary': self.binary, 'host': self.host} return self._action(session, 'enable', body) def disable(self, session, *, reason=None): """Disable service.""" body = {'binary': self.binary, 'host': self.host} if not reason: action = 'disable' else: action = 'disable-log-reason' body['disabled_reason'] = reason return self._action(session, action, body) def thaw(self, session): body = {'host': self.host} return self._action(session, 'thaw', body) def freeze(self, session): body = {'host': self.host} return self._action(session, 'freeze', body) def failover( self, session, *, cluster=None, backend_id=None, ): """Failover a service Only applies to replicating cinder-volume services. """ body = {'host': self.host} if cluster: body['cluster'] = cluster if backend_id: body['backend_id'] = backend_id action = 'failover_host' if utils.supports_microversion(self, '3.26'): action = 'failover' return self._action(session, action, body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/snapshot.py0000664000175000017500000001161600000000000023602 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import metadata from openstack import exceptions from openstack import format from openstack import resource from openstack import utils class Snapshot(resource.Resource, metadata.MetadataMixin): resource_key = "snapshot" resources_key = "snapshots" base_path = "/snapshots" _query_mapping = resource.QueryParameters( "name", "status", "volume_id", "project_id", "limit", "marker", "offset", "sort_dir", "sort_key", "sort", all_projects="all_tenants", ) # capabilities allow_fetch = True allow_create = True allow_delete = True allow_commit = True allow_list = True # Properties #: Whether this resource consumes quota or not. Resources that not #: counted for quota usage are usually temporary internal resources #: created to perform an operation. #: This is included from microversion 3.65 consumes_quota = resource.Body("consumes_quota") #: The timestamp of this snapshot creation. created_at = resource.Body("created_at") #: Description of snapshot. Default is None. description = resource.Body("description") #: The ID of the group snapshot. #: This is included from microversion 3.14 group_snapshot_id = resource.Body("group_snapshot_id") #: Indicate whether to create snapshot, even if the volume is attached. #: Default is ``False``. *Type: bool* is_forced = resource.Body("force", type=format.BoolStr) #: The percentage of completeness the snapshot is currently at. progress = resource.Body("os-extended-snapshot-attributes:progress") #: The project ID this snapshot is associated with. project_id = resource.Body("os-extended-snapshot-attributes:project_id") #: The size of the volume, in GBs. size = resource.Body("size", type=int) #: The current status of this snapshot. Potential values are creating, #: available, deleting, error, and error_deleting. status = resource.Body("status") #: The date and time when the resource was updated. updated_at = resource.Body("updated_at") #: The UUID of the user. #: This is included from microversion 3.41 user_id = resource.Body("user_id") #: The ID of the volume this snapshot was taken of. volume_id = resource.Body("volume_id") _max_microversion = '3.65' def _action(self, session, body, microversion=None): """Preform backup actions given the message body.""" url = utils.urljoin(self.base_path, self.id, 'action') resp = session.post( url, json=body, microversion=self._max_microversion ) exceptions.raise_from_response(resp) return resp def force_delete(self, session): """Force snapshot deletion.""" body = {'os-force_delete': None} self._action(session, body) def reset(self, session, status): """Reset the status of the snapshot.""" body = {'os-reset_status': {'status': status}} self._action(session, body) def set_status(self, session, status, progress=None): """Update fields related to the status of a snapshot.""" body = {'os-update_snapshot_status': {'status': status}} if progress is not None: body['os-update_snapshot_status']['progress'] = progress self._action(session, body) @classmethod def manage( cls, session, volume_id, ref, name=None, description=None, metadata=None, ): """Manage a snapshot under block storage provisioning.""" url = '/manageable_snapshots' if not utils.supports_microversion(session, '3.8'): url = '/os-snapshot-manage' body = { 'snapshot': { 'volume_id': volume_id, 'ref': ref, 'name': name, 'description': description, 'metadata': metadata, } } resp = session.post(url, json=body, microversion=cls._max_microversion) exceptions.raise_from_response(resp) snapshot = Snapshot() snapshot._translate_response(resp) return snapshot def unmanage(self, session): """Unmanage a snapshot from block storage provisioning.""" body = {'os-unmanage': None} self._action(session, body) SnapshotDetail = Snapshot ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/stats.py0000664000175000017500000000201300000000000023070 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Pools(resource.Resource): resource_key = "" resources_key = "pools" base_path = "/scheduler-stats/get_pools?detail=True" # capabilities allow_fetch = False allow_create = False allow_delete = False allow_list = True # Properties #: The Cinder name for the pool name = resource.Body("name") #: returns a dict with information about the pool capabilities = resource.Body("capabilities", type=dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/transfer.py0000664000175000017500000001676300000000000023577 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class Transfer(resource.Resource): resource_key = "transfer" resources_key = "transfers" base_path = "/volume-transfers" # capabilities allow_create = True allow_delete = True allow_fetch = True allow_list = True allow_get = True # Properties #: UUID of the transfer. id = resource.Body("id") #: The date and time when the resource was created. created_at = resource.Body("created_at") #: Name of the volume to transfer. name = resource.Body("name") #: ID of the volume to transfer. volume_id = resource.Body("volume_id") #: Auth key for the transfer. auth_key = resource.Body("auth_key") #: A list of links associated with this volume. *Type: list* links = resource.Body("links") #: Whether to transfer snapshots or not no_snapshots = resource.Body("no_snapshots") _max_microversion = "3.55" def create( self, session, prepend_key=True, base_path=None, *, resource_request_key=None, resource_response_key=None, microversion=None, **params, ): """Create a volume transfer. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param prepend_key: A boolean indicating whether the resource_key should be prepended in a resource creation request. Default to True. :param str base_path: Base part of the URI for creating resources, if different from :data:`~openstack.resource.Resource.base_path`. :param str resource_request_key: Overrides the usage of self.resource_key when prepending a key to the request body. Ignored if `prepend_key` is false. :param str resource_response_key: Overrides the usage of self.resource_key when processing response bodies. Ignored if `prepend_key` is false. :param str microversion: API version to override the negotiated one. :param dict params: Additional params to pass. :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_create` is not set to ``True``. """ # With MV 3.55 we introduced new API for volume transfer # (/volume-transfers). Prior to that (MV < 3.55), we use # the old API (/os-volume-transfer) if not utils.supports_microversion(session, '3.55'): base_path = '/os-volume-transfer' # With MV 3.55, we also introduce the ability to transfer # snapshot along with the volume. If MV < 3.55, we should # not send 'no_snapshots' parameter in the request. if 'no_snapshots' in params: params.pop('no_snapshots') return super().create( session, prepend_key=prepend_key, base_path=base_path, resource_request_key=resource_request_key, resource_response_key=resource_response_key, microversion=microversion, **params, ) def fetch( self, session, requires_id=True, base_path=None, error_message=None, skip_cache=False, *, resource_response_key=None, microversion=None, **params, ): """Get volume transfer. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param boolean requires_id: A boolean indicating whether resource ID should be part of the requested URI. :param str base_path: Base part of the URI for fetching resources, if different from :data:`~openstack.resource.Resource.base_path`. :param str error_message: An Error message to be returned if requested object does not exist. :param bool skip_cache: A boolean indicating whether optional API cache should be skipped for this invocation. :param str resource_response_key: Overrides the usage of self.resource_key when processing the response body. :param str microversion: API version to override the negotiated one. :param dict params: Additional parameters that can be consumed. :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_fetch` is not set to ``True``. :raises: :exc:`~openstack.exceptions.NotFoundException` if the resource was not found. """ if not utils.supports_microversion(session, '3.55'): base_path = '/os-volume-transfer' return super().fetch( session, requires_id=requires_id, base_path=base_path, error_message=error_message, skip_cache=skip_cache, resource_response_key=resource_response_key, microversion=microversion, **params, ) def delete( self, session, error_message=None, *, microversion=None, **kwargs ): """Delete a volume transfer. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param str microversion: API version to override the negotiated one. :param dict kwargs: Parameters that will be passed to _prepare_request() :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_commit` is not set to ``True``. :raises: :exc:`~openstack.exceptions.NotFoundException` if the resource was not found. """ if not utils.supports_microversion(session, '3.55'): kwargs['base_path'] = '/os-volume-transfer' return super().delete( session, error_message=error_message, microversion=microversion, **kwargs, ) def accept(self, session, *, auth_key=None): """Accept a volume transfer. :param session: The session to use for making this request. :param auth_key: The authentication key for the volume transfer. :return: This :class:`Transfer` instance. """ body = {'accept': {'auth_key': auth_key}} path = self.base_path if not utils.supports_microversion(session, '3.55'): path = '/os-volume-transfer' url = utils.urljoin(path, self.id, 'accept') microversion = self._get_microversion(session, action='commit') resp = session.post( url, json=body, microversion=microversion, ) exceptions.raise_from_response(resp) transfer = Transfer() transfer._translate_response(response=resp) return transfer ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/type.py0000664000175000017500000001365400000000000022730 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class Type(resource.Resource): resource_key = "volume_type" resources_key = "volume_types" base_path = "/types" # capabilities allow_fetch = True allow_create = True allow_delete = True allow_list = True allow_commit = True _query_mapping = resource.QueryParameters( "is_public", "limit", "marker", "offset", "sort_dir", "sort_key", "sort", all_projects="all_tenants", ) # Properties #: Description of the type. description = resource.Body("description") #: A dict of extra specifications. "capabilities" is a usual key. extra_specs = resource.Body("extra_specs", type=dict) #: a private volume-type. *Type: bool* is_public = resource.Body('os-volume-type-access:is_public', type=bool) def _extra_specs(self, method, key=None, delete=False, extra_specs=None): extra_specs = extra_specs or {} for k, v in extra_specs.items(): if not isinstance(v, str): raise ValueError( f"The value for {k} ({v}) must be a text string" ) if key is not None: url = utils.urljoin(self.base_path, self.id, "extra_specs", key) else: url = utils.urljoin(self.base_path, self.id, "extra_specs") kwargs = {} if extra_specs: kwargs["json"] = {"extra_specs": extra_specs} response = method(url, headers={}, **kwargs) # ensure Cinder API has not returned us an error exceptions.raise_from_response(response) # DELETE doesn't return a JSON body while everything else does. return response.json() if not delete else None def set_extra_specs(self, session, **extra_specs): """Update extra specs. This call will replace only the extra_specs with the same keys given here. Other keys will not be modified. :param session: The session to use for making this request. :param kwargs extra_specs: Key/value extra_specs pairs to be update on this volume type. All keys and values. :returns: The updated extra specs. """ if not extra_specs: return dict() result = self._extra_specs(session.post, extra_specs=extra_specs) return result["extra_specs"] def delete_extra_specs(self, session, keys): """Delete extra specs. .. note:: This method will do a HTTP DELETE request for every key in keys. :param session: The session to use for this request. :param list keys: The keys to delete. :returns: ``None`` """ for key in keys: self._extra_specs(session.delete, key=key, delete=True) def get_private_access(self, session): """List projects with private access to the volume type. :param session: The session to use for making this request. :returns: The volume type access response. """ url = utils.urljoin(self.base_path, self.id, "os-volume-type-access") resp = session.get(url) exceptions.raise_from_response(resp) return resp.json().get("volume_type_access", []) def add_private_access(self, session, project_id): """Add project access from the volume type. :param session: The session to use for making this request. :param project_id: The project to add access for. """ url = utils.urljoin(self.base_path, self.id, "action") body = {"addProjectAccess": {"project": project_id}} resp = session.post(url, json=body) exceptions.raise_from_response(resp) def remove_private_access(self, session, project_id): """Remove project access from the volume type. :param session: The session to use for making this request. :param project_id: The project to remove access for. """ url = utils.urljoin(self.base_path, self.id, "action") body = {"removeProjectAccess": {"project": project_id}} resp = session.post(url, json=body) exceptions.raise_from_response(resp) class TypeEncryption(resource.Resource): resource_key = "encryption" resources_key = "encryption" base_path = "/types/%(volume_type_id)s/encryption" # capabilities allow_fetch = True allow_create = True allow_delete = True allow_list = False allow_commit = True # Properties #: The encryption algorithm or mode. cipher = resource.Body("cipher") #: Notional service where encryption is performed. control_location = resource.Body("control_location") #: The date and time when the resource was created. created_at = resource.Body("created_at") #: The resource is deleted or not. deleted = resource.Body("deleted") #: The date and time when the resource was deleted. deleted_at = resource.Body("deleted_at") #: A ID representing this type. encryption_id = resource.Body("encryption_id", alternate_id=True) #: The Size of encryption key. key_size = resource.Body("key_size") #: The class that provides encryption support. provider = resource.Body("provider") #: The date and time when the resource was updated. updated_at = resource.Body("updated_at") #: The ID of the Volume Type. volume_type_id = resource.URI("volume_type_id") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/block_storage/v3/volume.py0000664000175000017500000003252000000000000023247 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty from openstack.common import metadata from openstack import exceptions from openstack import format from openstack import resource from openstack import utils class Volume(resource.Resource, metadata.MetadataMixin): resource_key = "volume" resources_key = "volumes" base_path = "/volumes" _query_mapping = resource.QueryParameters( 'name', 'status', 'project_id', 'created_at', 'updated_at', all_projects='all_tenants', ) # capabilities allow_fetch = True allow_create = True allow_delete = True allow_commit = True allow_list = True # Properties #: TODO(briancurtin): This is currently undocumented in the API. attachments = resource.Body("attachments") #: The availability zone. availability_zone = resource.Body("availability_zone") #: ID of the consistency group. consistency_group_id = resource.Body("consistencygroup_id") #: The timestamp of this volume creation. created_at = resource.Body("created_at") #: The date and time when the resource was updated. updated_at = resource.Body("updated_at") #: The volume description. description = resource.Body("description") #: Extended replication status on this volume. extended_replication_status = resource.Body( "os-volume-replication:extended_status" ) #: The ID of the group that the volume belongs to. group_id = resource.Body("group_id") #: The volume's current back-end. host = resource.Body("os-vol-host-attr:host") #: The ID of the image from which you want to create the volume. #: Required to create a bootable volume. image_id = resource.Body("imageRef") #: Enables or disables the bootable attribute. You can boot an #: instance from a bootable volume. *Type: bool* is_bootable = resource.Body("bootable", type=format.BoolStr) #: ``True`` if this volume is encrypted, ``False`` if not. #: *Type: bool* is_encrypted = resource.Body("encrypted", type=format.BoolStr) #: Whether volume will be sharable or not. is_multiattach = resource.Body("multiattach", type=bool) #: The volume ID that this volume's name on the back-end is based on. migration_id = resource.Body("os-vol-mig-status-attr:name_id") #: The status of this volume's migration (None means that a migration #: is not currently in progress). migration_status = resource.Body("os-vol-mig-status-attr:migstat") #: The project ID associated with current back-end. project_id = resource.Body("os-vol-tenant-attr:tenant_id") #: Data set by the replication driver replication_driver_data = resource.Body( "os-volume-replication:driver_data" ) #: The provider ID for the volume. provider_id = resource.Body("provider_id") #: Status of replication on this volume. replication_status = resource.Body("replication_status") #: Scheduler hints for the volume scheduler_hints = resource.Body('OS-SCH-HNT:scheduler_hints', type=dict) #: The size of the volume, in GBs. *Type: int* size = resource.Body("size", type=int) #: To create a volume from an existing snapshot, specify the ID of #: the existing volume snapshot. If specified, the volume is created #: in same availability zone and with same size of the snapshot. snapshot_id = resource.Body("snapshot_id") #: To create a volume from an existing volume, specify the ID of #: the existing volume. If specified, the volume is created with #: same size of the source volume. source_volume_id = resource.Body("source_volid") #: One of the following values: creating, available, attaching, in-use #: deleting, error, error_deleting, backing-up, restoring-backup, #: error_restoring. For details on these statuses, see the #: Block Storage API documentation. status = resource.Body("status") #: The user ID associated with the volume user_id = resource.Body("user_id") #: One or more metadata key and value pairs about image volume_image_metadata = resource.Body("volume_image_metadata") #: The name of the associated volume type. volume_type = resource.Body("volume_type") _max_microversion = "3.60" def _action(self, session, body, microversion=None): """Preform volume actions given the message body.""" # NOTE: This is using Volume.base_path instead of self.base_path # as both Volume and VolumeDetail instances can be acted on, but # the URL used is sans any additional /detail/ part. url = utils.urljoin(Volume.base_path, self.id, 'action') resp = session.post( url, json=body, microversion=self._max_microversion ) exceptions.raise_from_response(resp) return resp def extend(self, session, size): """Extend a volume size.""" body = {'os-extend': {'new_size': size}} self._action(session, body) def set_bootable_status(self, session, bootable=True): """Set volume bootable status flag""" body = {'os-set_bootable': {'bootable': bootable}} self._action(session, body) def set_readonly(self, session, readonly): """Set volume readonly flag""" body = {'os-update_readonly_flag': {'readonly': readonly}} self._action(session, body) def set_image_metadata(self, session, metadata): """Sets image metadata key-value pairs on the volume""" body = {'os-set_image_metadata': metadata} self._action(session, body) def delete_image_metadata(self, session): """Remove all image metadata from the volume""" for key in self.metadata: body = {'os-unset_image_metadata': key} self._action(session, body) def delete_image_metadata_item(self, session, key): """Remove a single image metadata from the volume""" body = {'os-unset_image_metadata': key} self._action(session, body) def reset_status( self, session, status=None, attach_status=None, migration_status=None ): """Reset volume statuses (admin operation)""" body: ty.Dict[str, ty.Dict[str, str]] = {'os-reset_status': {}} if status: body['os-reset_status']['status'] = status if attach_status: body['os-reset_status']['attach_status'] = attach_status if migration_status: body['os-reset_status']['migration_status'] = migration_status self._action(session, body) def revert_to_snapshot(self, session, snapshot_id): """Revert volume to its snapshot""" utils.require_microversion(session, "3.40") body = {'revert': {'snapshot_id': snapshot_id}} self._action(session, body) def attach(self, session, mountpoint, instance=None, host_name=None): """Attach volume to server""" body = {'os-attach': {'mountpoint': mountpoint}} if instance is not None: body['os-attach']['instance_uuid'] = instance elif host_name is not None: body['os-attach']['host_name'] = host_name else: raise ValueError( 'Either instance_uuid or host_name must be specified' ) self._action(session, body) def detach(self, session, attachment, force=False, connector=None): """Detach volume from server""" if not force: body = {'os-detach': {'attachment_id': attachment}} if force: body = {'os-force_detach': {'attachment_id': attachment}} if connector: body['os-force_detach']['connector'] = connector self._action(session, body) @classmethod def manage( cls, session, host, ref, name=None, description=None, volume_type=None, availability_zone=None, metadata=None, bootable=False, cluster=None, ): """Manage an existing volume.""" url = '/manageable_volumes' if not utils.supports_microversion(session, '3.8'): url = '/os-volume-manage' body = { 'volume': { 'host': host, 'ref': ref, 'name': name, 'description': description, 'volume_type': volume_type, 'availability_zone': availability_zone, 'metadata': metadata, 'bootable': bootable, } } if cluster is not None: body['volume']['cluster'] = cluster resp = session.post(url, json=body, microversion=cls._max_microversion) exceptions.raise_from_response(resp) volume = Volume() volume._translate_response(resp) return volume def unmanage(self, session): """Unmanage volume""" body = {'os-unmanage': None} self._action(session, body) def retype(self, session, new_type, migration_policy=None): """Change volume type""" body = {'os-retype': {'new_type': new_type}} if migration_policy: body['os-retype']['migration_policy'] = migration_policy self._action(session, body) def migrate( self, session, host=None, force_host_copy=False, lock_volume=False, cluster=None, ): """Migrate volume""" req = dict() if host is not None: req['host'] = host if force_host_copy: req['force_host_copy'] = force_host_copy if lock_volume: req['lock_volume'] = lock_volume if cluster is not None: req['cluster'] = cluster utils.require_microversion(session, "3.16") body = {'os-migrate_volume': req} self._action(session, body) def complete_migration(self, session, new_volume_id, error=False): """Complete volume migration""" body = { 'os-migrate_volume_completion': { 'new_volume': new_volume_id, 'error': error, } } self._action(session, body) def force_delete(self, session): """Force volume deletion""" body = {'os-force_delete': None} self._action(session, body) def upload_to_image( self, session, image_name, force=False, disk_format=None, container_format=None, visibility=None, protected=None, ): """Upload the volume to image service""" req = dict(image_name=image_name, force=force) if disk_format is not None: req['disk_format'] = disk_format if container_format is not None: req['container_format'] = container_format if visibility is not None: req['visibility'] = visibility if protected is not None: req['protected'] = protected if visibility is not None or protected is not None: utils.require_microversion(session, "3.1") body = {'os-volume_upload_image': req} resp = self._action(session, body).json() return resp['os-volume_upload_image'] def reserve(self, session): """Reserve volume""" body = {'os-reserve': None} self._action(session, body) def unreserve(self, session): """Unreserve volume""" body = {'os-unreserve': None} self._action(session, body) def begin_detaching(self, session): """Update volume status to 'detaching'""" body = {'os-begin_detaching': None} self._action(session, body) def abort_detaching(self, session): """Roll back volume status to 'in-use'""" body = {'os-roll_detaching': None} self._action(session, body) def init_attachment(self, session, connector): """Initialize volume attachment""" body = {'os-initialize_connection': {'connector': connector}} resp = self._action(session, body).json() return resp['connection_info'] def terminate_attachment(self, session, connector): """Terminate volume attachment""" body = {'os-terminate_connection': {'connector': connector}} self._action(session, body) def _prepare_request_body(self, patch, prepend_key): body = self._body.dirty # Scheduler hints is external to the standard volume request # so pass it separately and not under the volume JSON object. scheduler_hints = None if 'OS-SCH-HNT:scheduler_hints' in body.keys(): scheduler_hints = body.pop('OS-SCH-HNT:scheduler_hints') if prepend_key and self.resource_key is not None: body = {self.resource_key: body} # If scheduler hints was passed in the request but the value is # None, it doesn't make a difference to include it. if scheduler_hints: body['OS-SCH-HNT:scheduler_hints'] = scheduler_hints return body VolumeDetail = Volume ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2333076 openstacksdk-4.0.0/openstack/cloud/0000775000175000017500000000000000000000000017324 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/__init__.py0000664000175000017500000000121400000000000021433 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack.cloud.exc import * # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/_accelerator.py0000664000175000017500000001324500000000000022326 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack.cloud import openstackcloud class AcceleratorCloudMixin(openstackcloud._OpenStackCloudMixin): def list_deployables(self, filters=None): """List all available deployables. :param filters: (optional) dict of filter conditions to push down :returns: A list of accelerator ``Deployable`` objects. """ # Translate None from search interface to empty {} for kwargs below if not filters: filters = {} return list(self.accelerator.deployables(**filters)) def list_devices(self, filters=None): """List all devices. :param filters: (optional) dict of filter conditions to push down :returns: A list of accelerator ``Device`` objects. """ # Translate None from search interface to empty {} for kwargs below if not filters: filters = {} return list(self.accelerator.devices(**filters)) def list_device_profiles(self, filters=None): """List all device_profiles. :param filters: (optional) dict of filter conditions to push down :returns: A list of accelerator ``DeviceProfile`` objects. """ # Translate None from search interface to empty {} for kwargs below if not filters: filters = {} return list(self.accelerator.device_profiles(**filters)) def create_device_profile(self, attrs): """Create a device_profile. :param attrs: The info of device_profile to be created. :returns: An accelerator ``DeviceProfile`` objects. """ return self.accelerator.create_device_profile(**attrs) def delete_device_profile(self, name_or_id, filters): """Delete a device_profile. :param name_or_id: The name or uuid of the device profile to be deleted. :param filters: dict of filter conditions to push down :returns: True if delete succeeded, False otherwise. """ device_profile = self.accelerator.get_device_profile( name_or_id, filters, ) if device_profile is None: self.log.debug( "device_profile %s not found for deleting", name_or_id, ) return False self.accelerator.delete_device_profile(device_profile=device_profile) return True def list_accelerator_requests(self, filters=None): """List all accelerator_requests. :param filters: (optional) dict of filter conditions to push down :returns: A list of accelerator ``AcceleratorRequest`` objects. """ # Translate None from search interface to empty {} for kwargs below if not filters: filters = {} return list(self.accelerator.accelerator_requests(**filters)) def delete_accelerator_request(self, name_or_id, filters): """Delete a accelerator_request. :param name_or_id: The name or UUID of the accelerator request to be deleted. :param filters: dict of filter conditions to push down :returns: True if delete succeeded, False otherwise. """ accelerator_request = self.accelerator.get_accelerator_request( name_or_id, filters, ) if accelerator_request is None: self.log.debug( "accelerator_request %s not found for deleting", name_or_id, ) return False self.accelerator.delete_accelerator_request( accelerator_request=accelerator_request, ) return True def create_accelerator_request(self, attrs): """Create an accelerator_request. :param attrs: The info of accelerator_request to be created. :returns: An accelerator ``AcceleratorRequest`` object. """ return self.accelerator.create_accelerator_request(**attrs) def bind_accelerator_request(self, uuid, properties): """Bind an accelerator to VM. :param uuid: The uuid of the accelerator_request to be binded. :param properties: The info of VM that will bind the accelerator. :returns: True if bind succeeded, False otherwise. """ accelerator_request = self.accelerator.get_accelerator_request(uuid) if accelerator_request is None: self.log.debug( "accelerator_request %s not found for unbinding", uuid ) return False return self.accelerator.update_accelerator_request(uuid, properties) def unbind_accelerator_request(self, uuid, properties): """Unbind an accelerator from VM. :param uuid: The uuid of the accelerator_request to be unbinded. :param properties: The info of VM that will unbind the accelerator. :returns: True if unbind succeeded, False otherwise. """ accelerator_request = self.accelerator.get_accelerator_request(uuid) if accelerator_request is None: self.log.debug( "accelerator_request %s not found for unbinding", uuid ) return False return self.accelerator.update_accelerator_request(uuid, properties) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/_baremetal.py0000664000175000017500000005766600000000000022015 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import sys import warnings import jsonpatch from openstack.cloud import openstackcloud from openstack import exceptions from openstack import warnings as os_warnings def _normalize_port_list(nics): ports = [] for row in nics: if isinstance(row, str): address = row row = {} elif 'mac' in row: address = row.pop('mac') else: try: address = row.pop('address') except KeyError: raise TypeError( "Either 'address' or 'mac' must be provided " "for port %s" % row ) ports.append(dict(row, address=address)) return ports class BaremetalCloudMixin(openstackcloud._OpenStackCloudMixin): def list_nics(self): """Return a list of all bare metal ports.""" return list(self.baremetal.ports(details=True)) def list_nics_for_machine(self, uuid): """Returns a list of ports present on the machine node. :param uuid: String representing machine UUID value in order to identify the machine. :returns: A list of ports. """ # TODO(dtantsur): support node names here. return list(self.baremetal.ports(details=True, node_id=uuid)) def get_nic_by_mac(self, mac): """Get bare metal NIC by its hardware address (usually MAC).""" results = list(self.baremetal.ports(address=mac, details=True)) try: return results[0] except IndexError: return None def list_machines(self): """List Machines. :returns: list of :class:`~openstack.baremetal.v1.node.Node`. """ return list(self.baremetal.nodes()) def get_machine(self, name_or_id): """Get Machine by name or uuid Search the baremetal host out by utilizing the supplied id value which can consist of a name or UUID. :param name_or_id: A node name or UUID that will be looked up. :rtype: :class:`~openstack.baremetal.v1.node.Node`. :returns: The node found or None if no nodes are found. """ try: return self.baremetal.find_node(name_or_id, ignore_missing=False) except exceptions.NotFoundException: return None def get_machine_by_mac(self, mac): """Get machine by port MAC address :param mac: Port MAC address to query in order to return a node. :rtype: :class:`~openstack.baremetal.v1.node.Node`. :returns: The node found or None if no nodes are found. """ nic = self.get_nic_by_mac(mac) if nic is None: return None else: return self.get_machine(nic['node_uuid']) def inspect_machine(self, name_or_id, wait=False, timeout=3600): """Inspect a Barmetal machine Engages the Ironic node inspection behavior in order to collect metadata about the baremetal machine. :param name_or_id: String representing machine name or UUID value in order to identify the machine. :param wait: Boolean value controlling if the method is to wait for the desired state to be reached or a failure to occur. :param timeout: Integer value, defautling to 3600 seconds, for the wait state to reach completion. :rtype: :class:`~openstack.baremetal.v1.node.Node`. :returns: Current state of the node. """ return_to_available = False node = self.baremetal.get_node(name_or_id) # NOTE(TheJulia): If in available state, we can do this. However, # we need to move the machine back to manageable first. if node.provision_state == 'available': if node.instance_id: raise exceptions.SDKException( "Refusing to inspect available machine %(node)s " "which is associated with an instance " "(instance_uuid %(inst)s)" % {'node': node.id, 'inst': node.instance_id} ) return_to_available = True # NOTE(TheJulia): Changing available machine to managedable state # and due to state transitions we need to until that transition has # completed. node = self.baremetal.set_node_provision_state( node, 'manage', wait=True, timeout=timeout ) if node.provision_state not in ('manageable', 'inspect failed'): raise exceptions.SDKException( "Machine %(node)s must be in 'manageable', 'inspect failed' " "or 'available' provision state to start inspection, the " "current state is %(state)s" % {'node': node.id, 'state': node.provision_state} ) node = self.baremetal.set_node_provision_state( node, 'inspect', wait=True, timeout=timeout ) if return_to_available: node = self.baremetal.set_node_provision_state( node, 'provide', wait=True, timeout=timeout ) return node @contextlib.contextmanager def _delete_node_on_error(self, node): try: yield except Exception as exc: self.log.debug( "cleaning up node %s because of an error: %s", node.id, exc ) tb = sys.exc_info()[2] try: self.baremetal.delete_node(node) except Exception: self.log.debug( "could not remove node %s", node.id, exc_info=True ) raise exc.with_traceback(tb) def register_machine( self, nics, wait=False, timeout=3600, lock_timeout=600, provision_state='available', **kwargs ): """Register Baremetal with Ironic Allows for the registration of Baremetal nodes with Ironic and population of pertinant node information or configuration to be passed to the Ironic API for the node. This method also creates ports for a list of MAC addresses passed in to be utilized for boot and potentially network configuration. If a failure is detected creating the network ports, any ports created are deleted, and the node is removed from Ironic. :param nics: An array of ports that represent the network interfaces for the node to be created. The ports are created after the node is enrolled but before it goes through cleaning. Example:: [ {'address': 'aa:bb:cc:dd:ee:01'}, {'address': 'aa:bb:cc:dd:ee:02'} ] Alternatively, you can provide an array of MAC addresses. :param wait: Boolean value, defaulting to false, to wait for the node to reach the available state where the node can be provisioned. It must be noted, when set to false, the method will still wait for locks to clear before sending the next required command. :param timeout: Integer value, defautling to 3600 seconds, for the wait state to reach completion. :param lock_timeout: Integer value, defaulting to 600 seconds, for locks to clear. :param provision_state: The expected provision state, one of "enroll" "manageable" or "available". Using "available" results in automated cleaning. :param kwargs: Key value pairs to be passed to the Ironic API, including uuid, name, chassis_uuid, driver_info, properties. :returns: Current state of the node. :rtype: :class:`~openstack.baremetal.v1.node.Node`. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if provision_state not in ('enroll', 'manageable', 'available'): raise ValueError( 'Initial provision state must be enroll, ' 'manageable or available, got %s' % provision_state ) # Available is tricky: it cannot be directly requested on newer API # versions, we need to go through cleaning. But we cannot go through # cleaning until we create ports. if provision_state != 'available': kwargs['provision_state'] = 'enroll' machine = self.baremetal.create_node(**kwargs) with self._delete_node_on_error(machine): # Making a node at least manageable if ( machine.provision_state == 'enroll' and provision_state != 'enroll' ): machine = self.baremetal.set_node_provision_state( machine, 'manage', wait=True, timeout=timeout ) machine = self.baremetal.wait_for_node_reservation( machine, timeout=lock_timeout ) # Create NICs before trying to run cleaning created_nics = [] try: for port in _normalize_port_list(nics): nic = self.baremetal.create_port( node_id=machine.id, **port ) created_nics.append(nic.id) except Exception: for uuid in created_nics: try: self.baremetal.delete_port(uuid) except Exception: pass raise if ( machine.provision_state != 'available' and provision_state == 'available' ): machine = self.baremetal.set_node_provision_state( machine, 'provide', wait=wait, timeout=timeout ) return machine def unregister_machine(self, nics, uuid, wait=None, timeout=600): """Unregister Baremetal from Ironic Removes entries for Network Interfaces and baremetal nodes from an Ironic API :param nics: An array of strings that consist of MAC addresses to be removed. :param string uuid: The UUID of the node to be deleted. :param wait: DEPRECATED, do not use. :param timeout: Integer value, representing seconds with a default value of 600, which controls the maximum amount of time to block until a lock is released on machine. :raises: :class:`~openstack.exceptions.SDKException` on operation failure. """ if wait is not None: warnings.warn( "wait argument is deprecated and has no effect", os_warnings.OpenStackDeprecationWarning, ) machine = self.get_machine(uuid) invalid_states = ['active', 'cleaning', 'clean wait', 'clean failed'] if machine['provision_state'] in invalid_states: raise exceptions.SDKException( "Error unregistering node '%s' due to current provision " "state '%s'" % (uuid, machine['provision_state']) ) # NOTE(TheJulia) There is a high possibility of a lock being present # if the machine was just moved through the state machine. This was # previously concealed by exception retry logic that detected the # failure, and resubitted the request in python-ironicclient. try: self.baremetal.wait_for_node_reservation(machine, timeout) except exceptions.SDKException as e: raise exceptions.SDKException( "Error unregistering node '%s': Exception occured while" " waiting to be able to proceed: %s" % (machine['uuid'], e) ) for nic in _normalize_port_list(nics): try: port = next(self.baremetal.ports(address=nic['address'])) except StopIteration: continue self.baremetal.delete_port(port.id) self.baremetal.delete_node(uuid) def patch_machine(self, name_or_id, patch): """Patch Machine Information This method allows for an interface to manipulate node entries within Ironic. :param string name_or_id: A machine name or UUID to be updated. :param patch: The JSON Patch document is a list of dictonary objects that comply with RFC 6902 which can be found at https://tools.ietf.org/html/rfc6902. Example patch construction:: patch=[] patch.append({ 'op': 'remove', 'path': '/instance_info' }) patch.append({ 'op': 'replace', 'path': '/name', 'value': 'newname' }) patch.append({ 'op': 'add', 'path': '/driver_info/username', 'value': 'administrator' }) :returns: Current state of the node. :rtype: :class:`~openstack.baremetal.v1.node.Node`. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ return self.baremetal.patch_node(name_or_id, patch) def update_machine(self, name_or_id, **attrs): """Update a machine with new configuration information A user-friendly method to perform updates of a machine, in whole or part. :param string name_or_id: A machine name or UUID to be updated. :param attrs: Attributes to updated on the machine. :returns: Dictionary containing a machine sub-dictonary consisting of the updated data returned from the API update operation, and a list named changes which contains all of the API paths that received updates. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ machine = self.get_machine(name_or_id) if not machine: raise exceptions.SDKException( "Machine update failed to find Machine: %s. " % name_or_id ) new_config = dict(machine._to_munch(), **attrs) try: patch = jsonpatch.JsonPatch.from_diff( machine._to_munch(), new_config ) except Exception as e: raise exceptions.SDKException( "Machine update failed - Error generating JSON patch object " "for submission to the API. Machine: %s Error: %s" % (name_or_id, e) ) if not patch: return dict(node=machine, changes=None) change_list = [change['path'] for change in patch] node = self.baremetal.update_node(machine, **attrs) return dict(node=node, changes=change_list) def attach_port_to_machine(self, name_or_id, port_name_or_id): """Attach a virtual port to the bare metal machine. :param string name_or_id: A machine name or UUID. :param string port_name_or_id: A port name or UUID. Note that this is a Network service port, not a bare metal NIC. :return: Nothing. """ machine = self.get_machine(name_or_id) port = self.network.find_port(port_name_or_id) self.baremetal.attach_vif_to_node(machine, port['id']) def detach_port_from_machine(self, name_or_id, port_name_or_id): """Detach a virtual port from the bare metal machine. :param string name_or_id: A machine name or UUID. :param string port_name_or_id: A port name or UUID. Note that this is a Network service port, not a bare metal NIC. :return: Nothing. """ machine = self.get_machine(name_or_id) port = self.network.find_port(port_name_or_id) self.baremetal.detach_vif_from_node(machine, port['id']) def list_ports_attached_to_machine(self, name_or_id): """List virtual ports attached to the bare metal machine. :param string name_or_id: A machine name or UUID. :returns: List of ``openstack.Resource`` objects representing the ports. """ machine = self.get_machine(name_or_id) vif_ids = self.baremetal.list_node_vifs(machine) return [self.network.find_port(vif) for vif in vif_ids] def validate_machine(self, name_or_id, for_deploy=True): """Validate parameters of the machine. :param string name_or_id: The Name or UUID value representing the baremetal node. :param bool for_deploy: If ``True``, validate readiness for deployment, otherwise validate only the power management properties. :raises: :exc:`~openstack.exceptions.ValidationException` """ if for_deploy: ifaces = ['boot', 'deploy', 'management', 'power'] else: ifaces = ['power'] self.baremetal.validate_node(name_or_id, required=ifaces) def validate_node(self, uuid): warnings.warn( 'validate_node is deprecated, please use validate_machine instead', os_warnings.OpenStackDeprecationWarning, ) self.baremetal.validate_node(uuid) def node_set_provision_state( self, name_or_id, state, configdrive=None, wait=False, timeout=3600 ): """Set Node Provision State Enables a user to provision a Machine and optionally define a config drive to be utilized. :param string name_or_id: The Name or UUID value representing the baremetal node. :param string state: The desired provision state for the baremetal node. :param string configdrive: An optional URL or file or path representing the configdrive. In the case of a directory, the client API will create a properly formatted configuration drive file and post the file contents to the API for deployment. :param boolean wait: A boolean value, defaulted to false, to control if the method will wait for the desire end state to be reached before returning. :param integer timeout: Integer value, defaulting to 3600 seconds, representing the amount of time to wait for the desire end state to be reached. :returns: Current state of the machine upon exit of the method. :rtype: :class:`~openstack.baremetal.v1.node.Node`. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ node = self.baremetal.set_node_provision_state( name_or_id, target=state, config_drive=configdrive, wait=wait, timeout=timeout, ) return node def set_machine_maintenance_state( self, name_or_id, state=True, reason=None ): """Set Baremetal Machine Maintenance State Sets Baremetal maintenance state and maintenance reason. :param string name_or_id: The Name or UUID value representing the baremetal node. :param boolean state: The desired state of the node. True being in maintenance where as False means the machine is not in maintenance mode. This value defaults to True if not explicitly set. :param string reason: An optional freeform string that is supplied to the baremetal API to allow for notation as to why the node is in maintenance state. :returns: None :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if state: self.baremetal.set_node_maintenance(name_or_id, reason) else: self.baremetal.unset_node_maintenance(name_or_id) def remove_machine_from_maintenance(self, name_or_id): """Remove Baremetal Machine from Maintenance State Similarly to set_machine_maintenance_state, this method removes a machine from maintenance state. It must be noted that this method simpily calls set_machine_maintenace_state for the name_or_id requested and sets the state to False. :param string name_or_id: The Name or UUID value representing the baremetal node. :returns: None :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ self.baremetal.unset_node_maintenance(name_or_id) def set_machine_power_on(self, name_or_id): """Activate baremetal machine power This is a method that sets the node power state to "on". :params string name_or_id: A string representing the baremetal node to have power turned to an "on" state. :returns: None :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ self.baremetal.set_node_power_state(name_or_id, 'power on') def set_machine_power_off(self, name_or_id): """De-activate baremetal machine power This is a method that sets the node power state to "off". :params string name_or_id: A string representing the baremetal node to have power turned to an "off" state. :returns: None :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ self.baremetal.set_node_power_state(name_or_id, 'power off') def set_machine_power_reboot(self, name_or_id): """De-activate baremetal machine power This is a method that sets the node power state to "reboot", which in essence changes the machine power state to "off", and that back to "on". :params string name_or_id: A string representing the baremetal node to have power turned to an "off" state. :returns: None :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ self.baremetal.set_node_power_state(name_or_id, 'rebooting') def activate_node(self, uuid, configdrive=None, wait=False, timeout=1200): self.node_set_provision_state( uuid, 'active', configdrive, wait=wait, timeout=timeout ) def deactivate_node(self, uuid, wait=False, timeout=1200): self.node_set_provision_state( uuid, 'deleted', wait=wait, timeout=timeout ) def set_node_instance_info(self, uuid, patch): warnings.warn( "The set_node_instance_info call is deprecated, " "use patch_machine or update_machine instead", os_warnings.OpenStackDeprecationWarning, ) return self.patch_machine(uuid, patch) def purge_node_instance_info(self, uuid): warnings.warn( "The purge_node_instance_info call is deprecated, " "use patch_machine or update_machine instead", os_warnings.OpenStackDeprecationWarning, ) return self.patch_machine( uuid, dict(path='/instance_info', op='remove') ) def wait_for_baremetal_node_lock(self, node, timeout=30): """Wait for a baremetal node to have no lock. DEPRECATED, use ``wait_for_node_reservation`` on the `baremetal` proxy. :raises: :class:`~openstack.exceptions.SDKException` upon client failure. :returns: None """ warnings.warn( "The wait_for_baremetal_node_lock call is deprecated " "in favor of wait_for_node_reservation on the baremetal " "proxy", os_warnings.OpenStackDeprecationWarning, ) self.baremetal.wait_for_node_reservation(node, timeout) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/_block_storage.py0000664000175000017500000007353700000000000022672 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from openstack.cloud import _utils from openstack.cloud import openstackcloud from openstack import exceptions from openstack import warnings as os_warnings class BlockStorageCloudMixin(openstackcloud._OpenStackCloudMixin): # TODO(stephenfin): Remove 'cache' in a future major version def list_volumes(self, cache=True): """List all available volumes. :param cache: **DEPRECATED** This parameter no longer does anything. :returns: A list of volume ``Volume`` objects. """ warnings.warn( "the 'cache' argument is deprecated and no longer does anything; " "consider removing it from calls", os_warnings.OpenStackDeprecationWarning, ) return list(self.block_storage.volumes()) # TODO(stephenfin): Remove 'get_extra' in a future major version def list_volume_types(self, get_extra=None): """List all available volume types. :param get_extra: **DEPRECATED** This parameter no longer does anything. :returns: A list of volume ``Type`` objects. """ if get_extra is not None: warnings.warn( "the 'get_extra' argument is deprecated and no longer does " "anything; consider removing it from calls", os_warnings.OpenStackDeprecationWarning, ) return list(self.block_storage.types()) # TODO(stephenfin): Remove 'filters' in a future major version def get_volume(self, name_or_id, filters=None): """Get a volume by name or ID. :param name_or_id: Name or unique ID of the volume. :param filters: **DEPRECATED** A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A volume ``Volume`` object if found, else None. """ return _utils._get_entity(self, 'volume', name_or_id, filters) def get_volume_by_id(self, id): """Get a volume by ID :param id: ID of the volume. :returns: A volume ``Volume`` object if found, else None. """ return self.block_storage.get_volume(id) # TODO(stephenfin): Remove 'filters' in a future major version def get_volume_type(self, name_or_id, filters=None): """Get a volume type by name or ID. :param name_or_id: Name or unique ID of the volume type. :param filters: **DEPRECATED** A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A volume ``Type`` object if found, else None. """ return _utils._get_entity(self, 'volume_type', name_or_id, filters) def create_volume( self, size, wait=True, timeout=None, image=None, bootable=None, **kwargs, ): """Create a volume. :param size: Size, in GB of the volume to create. :param wait: If true, waits for volume to be created. :param timeout: Seconds to wait for volume creation. None is forever. :param image: (optional) Image name, ID or object from which to create the volume :param bootable: (optional) Make this volume bootable. If set, wait will also be set to true. :param kwargs: Keyword arguments as expected for cinder client. :returns: The created volume ``Volume`` object. :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time exceeded. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if bootable is not None: wait = True if image: image_obj = self.image.find_image(image) if not image_obj: raise exceptions.SDKException( f"Image {image} was requested as the basis for a new " f"volume but was not found on the cloud" ) kwargs['imageRef'] = image_obj['id'] kwargs = self._get_volume_kwargs(kwargs) kwargs['size'] = size volume = self.block_storage.create_volume(**kwargs) if volume['status'] == 'error': raise exceptions.SDKException("Error in creating volume") if wait: self.block_storage.wait_for_status(volume, wait=timeout) if bootable: self.block_storage.set_volume_bootable_status(volume, True) return volume def update_volume(self, name_or_id, **kwargs): """Update a volume. :param name_or_id: Name or unique ID of the volume. :param kwargs: Volume attributes to be updated. :returns: The updated volume ``Volume`` object. """ kwargs = self._get_volume_kwargs(kwargs) volume = self.get_volume(name_or_id) if not volume: raise exceptions.SDKException("Volume %s not found." % name_or_id) volume = self.block_storage.update_volume(volume, **kwargs) return volume def set_volume_bootable(self, name_or_id, bootable=True): """Set a volume's bootable flag. :param name_or_id: Name or unique ID of the volume. :param bool bootable: Whether the volume should be bootable. (Defaults to True) :returns: None :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time exceeded. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ volume = self.get_volume(name_or_id) if not volume: raise exceptions.SDKException( "Volume {name_or_id} does not exist".format( name_or_id=name_or_id ) ) self.block_storage.set_volume_bootable_status(volume, bootable) def delete_volume( self, name_or_id=None, wait=True, timeout=None, force=False, ): """Delete a volume. :param name_or_id: Name or unique ID of the volume. :param wait: If true, waits for volume to be deleted. :param timeout: Seconds to wait for volume deletion. None is forever. :param force: Force delete volume even if the volume is in deleting or error_deleting state. :returns: True if deletion was successful, else False. :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time exceeded. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ volume = self.block_storage.find_volume(name_or_id) if not volume: self.log.debug( "Volume %(name_or_id)s does not exist", {'name_or_id': name_or_id}, exc_info=True, ) return False try: self.block_storage.delete_volume(volume, force=force) except exceptions.SDKException: self.log.exception("error in deleting volume") raise if wait: self.block_storage.wait_for_delete(volume, wait=timeout) return True # TODO(stephenfin): Remove 'cache' in a future major version def get_volumes(self, server, cache=True): """Get volumes for a server. :param server: The server to fetch volumes for. :param cache: **DEPRECATED** This parameter no longer does anything. :returns: A list of volume ``Volume`` objects. """ volumes = [] for volume in self.list_volumes(cache=cache): for attach in volume['attachments']: if attach['server_id'] == server['id']: volumes.append(volume) return volumes def get_volume_limits(self, name_or_id=None): """Get volume limits for the current project :param name_or_id: (optional) Project name or ID to get limits for if different from the current project :returns: The volume ``Limits`` object if found, else None. """ params = {} if name_or_id: project = self.identity.find_project(name_or_id) if not project: raise exceptions.SDKException("project does not exist") params['project'] = project return self.block_storage.get_limits(**params) def get_volume_id(self, name_or_id): """Get ID of a volume. :param name_or_id: Name or unique ID of the volume. :returns: The ID of the volume if found, else None. """ volume = self.get_volume(name_or_id) if volume: return volume['id'] return None def volume_exists(self, name_or_id): """Check if a volume exists. :param name_or_id: Name or unique ID of the volume. :returns: True if the volume exists, else False. """ return self.get_volume(name_or_id) is not None def get_volume_attach_device(self, volume, server_id): """Return the device name a volume is attached to for a server. This can also be used to verify if a volume is attached to a particular server. :param volume: The volume to fetch the device name from. :param server_id: ID of server to check. :returns: Device name if attached, None if volume is not attached. """ for attach in volume['attachments']: if server_id == attach['server_id']: return attach['device'] return None def detach_volume(self, server, volume, wait=True, timeout=None): """Detach a volume from a server. :param server: The server dict to detach from. :param volume: The volume dict to detach. :param wait: If true, waits for volume to be detached. :param timeout: Seconds to wait for volume detachment. None is forever. :returns: None :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time exceeded. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ self.compute.delete_volume_attachment( server=server['id'], volume=volume['id'], ignore_missing=False, ) if wait: vol = self.get_volume(volume['id']) self.block_storage.wait_for_status(vol) def attach_volume( self, server, volume, device=None, wait=True, timeout=None, ): """Attach a volume to a server. This will attach a volume, described by the passed in volume dict (as returned by get_volume()), to the server described by the passed in server dict (as returned by get_server()) on the named device on the server. If the volume is already attached to the server, or generally not available, then an exception is raised. To re-attach to a server, but under a different device, the user must detach it first. :param server: The server dict to attach to. :param volume: The volume dict to attach. :param device: The device name where the volume will attach. :param wait: If true, waits for volume to be attached. :param timeout: Seconds to wait for volume attachment. None is forever. :returns: a volume attachment object. :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time exceeded. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ dev = self.get_volume_attach_device(volume, server['id']) if dev: raise exceptions.SDKException( "Volume %s already attached to server %s on device %s" % (volume['id'], server['id'], dev) ) if volume['status'] != 'available': raise exceptions.SDKException( "Volume %s is not available. Status is '%s'" % (volume['id'], volume['status']) ) payload = {} if device: payload['device'] = device attachment = self.compute.create_volume_attachment( server=server['id'], volume=volume['id'], **payload, ) if wait: if not hasattr(volume, 'fetch'): # If we got volume as dict we need to re-fetch it to be able to # use wait_for_status. volume = self.block_storage.get_volume(volume['id']) self.block_storage.wait_for_status(volume, 'in-use', wait=timeout) return attachment def _get_volume_kwargs(self, kwargs): name = kwargs.pop('name', kwargs.pop('display_name', None)) description = kwargs.pop( 'description', kwargs.pop('display_description', None) ) if name: kwargs['name'] = name if description: kwargs['description'] = description return kwargs @_utils.valid_kwargs( 'name', 'display_name', 'description', 'display_description' ) def create_volume_snapshot( self, volume_id, force=False, wait=True, timeout=None, **kwargs, ): """Create a volume. :param volume_id: the ID of the volume to snapshot. :param force: If set to True the snapshot will be created even if the volume is attached to an instance, if False it will not :param name: name of the snapshot, one will be generated if one is not provided :param description: description of the snapshot, one will be generated if one is not provided :param wait: If true, waits for volume snapshot to be created. :param timeout: Seconds to wait for volume snapshot creation. None is forever. :returns: The created volume ``Snapshot`` object. :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time exceeded. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ kwargs = self._get_volume_kwargs(kwargs) payload = {'volume_id': volume_id, 'force': force} payload.update(kwargs) snapshot = self.block_storage.create_snapshot(**payload) if wait: snapshot = self.block_storage.wait_for_status( snapshot, wait=timeout ) return snapshot def get_volume_snapshot_by_id(self, snapshot_id): """Takes a snapshot_id and gets a dict of the snapshot that maches that ID. Note: This is more efficient than get_volume_snapshot. param: snapshot_id: ID of the volume snapshot. :returns: A volume ``Snapshot`` object if found, else None. """ return self.block_storage.get_snapshot(snapshot_id) # TODO(stephenfin): Remove 'filters' in a future major version def get_volume_snapshot(self, name_or_id, filters=None): """Get a volume by name or ID. :param name_or_id: Name or unique ID of the volume snapshot. :param filters: **DEPRECATED** A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A volume ``Snapshot`` object if found, else None. """ return _utils._get_entity(self, 'volume_snapshot', name_or_id, filters) def create_volume_backup( self, volume_id, name=None, description=None, force=False, wait=True, timeout=None, incremental=False, snapshot_id=None, ): """Create a volume backup. :param volume_id: the ID of the volume to backup. :param name: name of the backup, one will be generated if one is not provided :param description: description of the backup, one will be generated if one is not provided :param force: If set to True the backup will be created even if the volume is attached to an instance, if False it will not :param wait: If true, waits for volume backup to be created. :param timeout: Seconds to wait for volume backup creation. None is forever. :param incremental: If set to true, the backup will be incremental. :param snapshot_id: The UUID of the source snapshot to back up. :returns: The created volume ``Backup`` object. :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time exceeded. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ payload = { 'name': name, 'volume_id': volume_id, 'description': description, 'force': force, 'is_incremental': incremental, 'snapshot_id': snapshot_id, } backup = self.block_storage.create_backup(**payload) if wait: backup = self.block_storage.wait_for_status(backup, wait=timeout) return backup # TODO(stephenfin): Remove 'filters' in a future major version def get_volume_backup(self, name_or_id, filters=None): """Get a volume backup by name or ID. :param name_or_id: Name or unique ID of the volume backup. :param filters: **DEPRECATED** A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A volume ``Backup`` object if found, else None. """ return _utils._get_entity(self, 'volume_backup', name_or_id, filters) def list_volume_snapshots(self, detailed=True, filters=None): """List all volume snapshots. :param detailed: Whether or not to add detailed additional information. :param filters: A dictionary of meta data to use for further filtering. Example:: { 'name': 'my-volume-snapshot', 'volume_id': 'e126044c-7b4c-43be-a32a-c9cbbc9ddb56', 'all_tenants': 1 } :returns: A list of volume ``Snapshot`` objects. """ if not filters: filters = {} return list(self.block_storage.snapshots(details=detailed, **filters)) def list_volume_backups(self, detailed=True, filters=None): """List all volume backups. :param detailed: Whether or not to add detailed additional information. :param filters: A dictionary of meta data to use for further filtering. Example:: { 'name': 'my-volume-backup', 'status': 'available', 'volume_id': 'e126044c-7b4c-43be-a32a-c9cbbc9ddb56', 'all_tenants': 1 } :returns: A list of volume ``Backup`` objects. """ if not filters: filters = {} return list(self.block_storage.backups(details=detailed, **filters)) def delete_volume_backup( self, name_or_id=None, force=False, wait=False, timeout=None ): """Delete a volume backup. :param name_or_id: Name or unique ID of the volume backup. :param force: Allow delete in state other than error or available. :param wait: If true, waits for volume backup to be deleted. :param timeout: Seconds to wait for volume backup deletion. None is forever. :returns: True if deletion was successful, else False. :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time exceeded. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ volume_backup = self.get_volume_backup(name_or_id) if not volume_backup: return False self.block_storage.delete_backup( volume_backup, ignore_missing=False, force=force ) if wait: self.block_storage.wait_for_delete(volume_backup, wait=timeout) return True def delete_volume_snapshot( self, name_or_id=None, wait=False, timeout=None, ): """Delete a volume snapshot. :param name_or_id: Name or unique ID of the volume snapshot. :param wait: If true, waits for volume snapshot to be deleted. :param timeout: Seconds to wait for volume snapshot deletion. None is forever. :returns: True if deletion was successful, else False. :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time exceeded. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ volumesnapshot = self.get_volume_snapshot(name_or_id) if not volumesnapshot: return False self.block_storage.delete_snapshot( volumesnapshot, ignore_missing=False ) if wait: self.block_storage.wait_for_delete(volumesnapshot, wait=timeout) return True def search_volumes(self, name_or_id=None, filters=None): """Search for one or more volumes. :param name_or_id: Name or unique ID of volume(s). :param filters: **DEPRECATED** A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A list of volume ``Volume`` objects, if any are found. """ volumes = self.list_volumes() return _utils._filter_list(volumes, name_or_id, filters) def search_volume_snapshots(self, name_or_id=None, filters=None): """Search for one or more volume snapshots. :param name_or_id: Name or unique ID of volume snapshot(s). :param filters: **DEPRECATED** A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A list of volume ``Snapshot`` objects, if any are found. """ volumesnapshots = self.list_volume_snapshots() return _utils._filter_list(volumesnapshots, name_or_id, filters) def search_volume_backups(self, name_or_id=None, filters=None): """Search for one or more volume backups. :param name_or_id: Name or unique ID of volume backup(s). :param filters: **DEPRECATED** A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A list of volume ``Backup`` objects, if any are found. """ volume_backups = self.list_volume_backups() return _utils._filter_list(volume_backups, name_or_id, filters) # TODO(stephenfin): Remove 'get_extra' in a future major version def search_volume_types( self, name_or_id=None, filters=None, get_extra=None, ): """Search for one or more volume types. :param name_or_id: Name or unique ID of volume type(s). :param filters: **DEPRECATED** A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A list of volume ``Type`` objects, if any are found. """ volume_types = self.list_volume_types(get_extra=get_extra) return _utils._filter_list(volume_types, name_or_id, filters) def get_volume_type_access(self, name_or_id): """Return a list of volume_type_access. :param name_or_id: Name or unique ID of the volume type. :returns: A volume ``Type`` object if found, else None. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ volume_type = self.get_volume_type(name_or_id) if not volume_type: raise exceptions.SDKException( "VolumeType not found: %s" % name_or_id ) return self.block_storage.get_type_access(volume_type) def add_volume_type_access(self, name_or_id, project_id): """Grant access on a volume_type to a project. NOTE: the call works even if the project does not exist. :param name_or_id: ID or name of a volume_type :param project_id: A project id :returns: None :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ volume_type = self.get_volume_type(name_or_id) if not volume_type: raise exceptions.SDKException( "VolumeType not found: %s" % name_or_id ) self.block_storage.add_type_access(volume_type, project_id) def remove_volume_type_access(self, name_or_id, project_id): """Revoke access on a volume_type to a project. :param name_or_id: ID or name of a volume_type :param project_id: A project id :returns: None :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ volume_type = self.get_volume_type(name_or_id) if not volume_type: raise exceptions.SDKException( "VolumeType not found: %s" % name_or_id ) self.block_storage.remove_type_access(volume_type, project_id) def set_volume_quotas(self, name_or_id, **kwargs): """Set a volume quota in a project :param name_or_id: project name or id :param kwargs: key/value pairs of quota name and quota value :returns: None :raises: :class:`~openstack.exceptions.SDKException` if the resource to set the quota does not exist. """ project = self.identity.find_project(name_or_id, ignore_missing=False) self.block_storage.update_quota_set(project=project, **kwargs) def get_volume_quotas(self, name_or_id): """Get volume quotas for a project :param name_or_id: project name or id :returns: A volume ``QuotaSet`` object with the quotas :raises: :class:`~openstack.exceptions.SDKException` if it's not a valid project """ proj = self.identity.find_project(name_or_id, ignore_missing=False) return self.block_storage.get_quota_set(proj) def delete_volume_quotas(self, name_or_id): """Delete volume quotas for a project :param name_or_id: project name or id :returns: The deleted volume ``QuotaSet`` object. :raises: :class:`~openstack.exceptions.SDKException` if it's not a valid project or the call failed """ proj = self.identity.find_project(name_or_id, ignore_missing=False) return self.block_storage.revert_quota_set(proj) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/_coe.py0000664000175000017500000002565500000000000020620 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack.cloud import _utils from openstack.cloud import openstackcloud from openstack import exceptions class CoeCloudMixin(openstackcloud._OpenStackCloudMixin): def list_coe_clusters(self): """List COE (Container Orchestration Engine) cluster. :returns: A list of container infrastructure management ``Cluster`` objects. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ return list(self.container_infrastructure_management.clusters()) def search_coe_clusters(self, name_or_id=None, filters=None): """Search COE cluster. :param name_or_id: cluster name or ID. :param filters: a dict containing additional filters to use. :param detail: a boolean to control if we need summarized or detailed output. :returns: A list of container infrastructure management ``Cluster`` objects. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ coe_clusters = self.list_coe_clusters() return _utils._filter_list(coe_clusters, name_or_id, filters) def get_coe_cluster(self, name_or_id, filters=None): """Get a COE cluster by name or ID. :param name_or_id: Name or ID of the cluster. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A container infrastructure management ``Cluster`` object if found, else None. """ return _utils._get_entity(self, 'coe_cluster', name_or_id, filters) def create_coe_cluster( self, name, cluster_template_id, **kwargs, ): """Create a COE cluster based on given cluster template. :param string name: Name of the cluster. :param string cluster_template_id: ID of the cluster template to use. :param dict kwargs: Any other arguments to pass in. :returns: The created container infrastructure management ``Cluster`` object. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call """ cluster = self.container_infrastructure_management.create_cluster( name=name, cluster_template_id=cluster_template_id, **kwargs, ) return cluster def delete_coe_cluster(self, name_or_id): """Delete a COE cluster. :param name_or_id: Name or unique ID of the cluster. :returns: True if the delete succeeded, False if the cluster was not found. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ cluster = self.get_coe_cluster(name_or_id) if not cluster: self.log.debug( "COE Cluster %(name_or_id)s does not exist", {'name_or_id': name_or_id}, exc_info=True, ) return False self.container_infrastructure_management.delete_cluster(cluster) return True def update_coe_cluster(self, name_or_id, **kwargs): """Update a COE cluster. :param name_or_id: Name or ID of the COE cluster being updated. :param kwargs: Cluster attributes to be updated. :returns: The updated cluster ``Cluster`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ cluster = self.get_coe_cluster(name_or_id) if not cluster: raise exceptions.SDKException( "COE cluster %s not found." % name_or_id ) cluster = self.container_infrastructure_management.update_cluster( cluster, **kwargs ) return cluster def get_coe_cluster_certificate(self, cluster_id): """Get details about the CA certificate for a cluster by name or ID. :param cluster_id: ID of the cluster. :returns: Details about the CA certificate for the given cluster. """ return ( self.container_infrastructure_management.get_cluster_certificate( cluster_id ) ) def sign_coe_cluster_certificate(self, cluster_id, csr): """Sign client key and generate the CA certificate for a cluster :param cluster_id: UUID of the cluster. :param csr: Certificate Signing Request (CSR) for authenticating client key.The CSR will be used by Magnum to generate a signed certificate that client will use to communicate with the cluster. :returns: a dict representing the signed certs. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ return self.container_infrastructure_management.create_cluster_certificate( # noqa: E501 cluster_uuid=cluster_id, csr=csr ) def list_cluster_templates(self, detail=False): """List cluster templates. :param bool detail. Ignored. Included for backwards compat. ClusterTemplates are always returned with full details. :returns: a list of dicts containing the cluster template details. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ return list( self.container_infrastructure_management.cluster_templates() ) def search_cluster_templates( self, name_or_id=None, filters=None, detail=False ): """Search cluster templates. :param name_or_id: cluster template name or ID. :param filters: a dict containing additional filters to use. :param detail: a boolean to control if we need summarized or detailed output. :returns: a list of dict containing the cluster templates :raises: :class:`~openstack.exceptions.SDKException`: if something goes wrong during the OpenStack API call. """ cluster_templates = self.list_cluster_templates(detail=detail) return _utils._filter_list(cluster_templates, name_or_id, filters) def get_cluster_template(self, name_or_id, filters=None, detail=False): """Get a cluster template by name or ID. :param name_or_id: Name or ID of the cluster template. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A cluster template dict or None if no matching cluster template is found. """ return _utils._get_entity( self, 'cluster_template', name_or_id, filters=filters, detail=detail, ) def create_cluster_template( self, name, image_id=None, keypair_id=None, coe=None, **kwargs ): """Create a cluster template. :param string name: Name of the cluster template. :param string image_id: Name or ID of the image to use. :param string keypair_id: Name or ID of the keypair to use. :param string coe: Name of the coe for the cluster template. Other arguments will be passed in kwargs. :returns: a dict containing the cluster template description :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call """ cluster_template = ( self.container_infrastructure_management.create_cluster_template( name=name, image_id=image_id, keypair_id=keypair_id, coe=coe, **kwargs, ) ) return cluster_template def delete_cluster_template(self, name_or_id): """Delete a cluster template. :param name_or_id: Name or unique ID of the cluster template. :returns: True if the delete succeeded, False if the cluster template was not found. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ cluster_template = self.get_cluster_template(name_or_id) if not cluster_template: self.log.debug( "Cluster template %(name_or_id)s does not exist", {'name_or_id': name_or_id}, exc_info=True, ) return False self.container_infrastructure_management.delete_cluster_template( cluster_template ) return True def update_cluster_template(self, name_or_id, **kwargs): """Update a cluster template. :param name_or_id: Name or ID of the cluster template being updated. :returns: an update cluster template. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ cluster_template = self.get_cluster_template(name_or_id) if not cluster_template: raise exceptions.SDKException( "Cluster template %s not found." % name_or_id ) cluster_template = ( self.container_infrastructure_management.update_cluster_template( cluster_template, **kwargs ) ) return cluster_template def list_magnum_services(self): """List all Magnum services. :returns: a list of dicts containing the service details. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ return list(self.container_infrastructure_management.services()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/_compute.py0000664000175000017500000021526000000000000021517 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import functools import operator import time import iso8601 from openstack.cloud import _network_common from openstack.cloud import _utils from openstack.cloud import exc from openstack.cloud import meta from openstack.compute.v2 import server as _server from openstack import exceptions from openstack import utils _SERVER_FIELDS = ( 'accessIPv4', 'accessIPv6', 'addresses', 'adminPass', 'created', 'description', 'key_name', 'metadata', 'networks', 'personality', 'private_v4', 'public_v4', 'public_v6', 'server_groups', 'status', 'updated', 'user_id', 'tags', ) def _to_bool(value): if isinstance(value, str): if not value: return False prospective = value.lower().capitalize() return prospective == 'True' return bool(value) def _pop_int(resource, key): return int(resource.pop(key, 0) or 0) def _pop_or_get(resource, key, default, strict): if strict: return resource.pop(key, default) else: return resource.get(key, default) class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin): @property def _compute_region(self): # This is only used in exception messages. Can we get rid of it? return self.config.get_region_name('compute') def get_flavor_name(self, flavor_id): """Get the name of a flavor. :param flavor_id: ID of the flavor. :returns: The name of the flavor if a match if found, else None. """ flavor = self.get_flavor(flavor_id, get_extra=False) if flavor: return flavor['name'] return None def get_flavor_by_ram(self, ram, include=None, get_extra=True): """Get a flavor based on amount of RAM available. Finds the flavor with the least amount of RAM that is at least as much as the specified amount. If `include` is given, further filter based on matching flavor name. :param int ram: Minimum amount of RAM. :param string include: If given, will return a flavor whose name contains this string as a substring. :param get_extra: :returns: A compute ``Flavor`` object. :raises: :class:`~openstack.exceptions.SDKException` if no matching flavour could be found. """ flavors = self.list_flavors(get_extra=get_extra) for flavor in sorted(flavors, key=operator.itemgetter('ram')): if flavor['ram'] >= ram and ( not include or include in flavor['name'] ): return flavor raise exceptions.SDKException( "Could not find a flavor with {ram} and '{include}'".format( ram=ram, include=include ) ) def search_keypairs(self, name_or_id=None, filters=None): """Search keypairs. :param name_or_id: :param filters: :returns: A list of compute ``Keypair`` objects matching the search criteria. """ keypairs = self.list_keypairs( filters=filters if isinstance(filters, dict) else None ) return _utils._filter_list(keypairs, name_or_id, filters) def search_flavors(self, name_or_id=None, filters=None, get_extra=True): """Search flavors. :param name_or_id: :param flavors: :param get_extra: :returns: A list of compute ``Flavor`` objects matching the search criteria. """ flavors = self.list_flavors(get_extra=get_extra) return _utils._filter_list(flavors, name_or_id, filters) def search_servers( self, name_or_id=None, filters=None, detailed=False, all_projects=False, bare=False, ): """Search servers. :param name_or_id: :param filters: :param detailed: :param all_projects: :param bare: :returns: A list of compute ``Server`` objects matching the search criteria. """ servers = self.list_servers( detailed=detailed, all_projects=all_projects, bare=bare ) return _utils._filter_list(servers, name_or_id, filters) def search_server_groups(self, name_or_id=None, filters=None): """Search server groups. :param name_or_id: Name or unique ID of the server group(s). :param filters: A dict containing additional filters to use. :returns: A list of compute ``ServerGroup`` objects matching the search criteria. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ server_groups = self.list_server_groups() return _utils._filter_list(server_groups, name_or_id, filters) def list_keypairs(self, filters=None): """List all available keypairs. :param filters: :returns: A list of compute ``Keypair`` objects. """ if not filters: filters = {} return list(self.compute.keypairs(**filters)) def list_availability_zone_names(self, unavailable=False): """List names of availability zones. :param bool unavailable: Whether or not to include unavailable zones in the output. Defaults to False. :returns: A list of availability zone names, or an empty list if the list could not be fetched. """ try: zones = self.compute.availability_zones() ret = [] for zone in zones: if zone.state['available'] or unavailable: ret.append(zone.name) return ret except exceptions.SDKException: self.log.debug( "Availability zone list could not be fetched", exc_info=True ) return [] def list_flavors(self, get_extra=False): """List all available flavors. :param get_extra: Whether or not to fetch extra specs for each flavor. Defaults to True. Default behavior value can be overridden in clouds.yaml by setting openstack.cloud.get_extra_specs to False. :returns: A list of compute ``Flavor`` objects. """ return list( self.compute.flavors(details=True, get_extra_specs=get_extra) ) def list_server_security_groups(self, server): """List all security groups associated with the given server. :returns: A list of security group dictionary objects. """ # Don't even try if we're a cloud that doesn't have them if not self._has_secgroups(): return [] server = self.compute.get_server(server) server.fetch_security_groups(self.compute) return server.security_groups def _get_server_security_groups(self, server, security_groups): if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) if not isinstance(server, dict): server = self.get_server(server, bare=True) if server is None: self.log.debug('Server %s not found', server) return None, None if not isinstance(security_groups, (list, tuple)): security_groups = [security_groups] sec_group_objs = [] for sg in security_groups: if not isinstance(sg, dict): sg = self.get_security_group(sg) if sg is None: self.log.debug( 'Security group %s not found for adding', sg ) return None, None sec_group_objs.append(sg) return server, sec_group_objs def add_server_security_groups(self, server, security_groups): """Add security groups to a server. Add existing security groups to an existing server. If the security groups are already present on the server this will continue unaffected. :returns: False if server or security groups are undefined, True otherwise. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ server, security_groups = self._get_server_security_groups( server, security_groups ) if not (server and security_groups): return False for sg in security_groups: self.compute.add_security_group_to_server(server, sg) return True def remove_server_security_groups(self, server, security_groups): """Remove security groups from a server Remove existing security groups from an existing server. If the security groups are not present on the server this will continue unaffected. :returns: False if server or security groups are undefined, True otherwise. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ server, security_groups = self._get_server_security_groups( server, security_groups ) if not (server and security_groups): return False ret = True for sg in security_groups: try: self.compute.remove_security_group_from_server(server, sg) except exceptions.NotFoundException: # NOTE(jamielennox): Is this ok? If we remove something that # isn't present should we just conclude job done or is that an # error? Nova returns ok if you try to add a group twice. self.log.debug( "The security group %s was not present on server %s so " "no action was performed", sg.name, server.name, ) ret = False return ret def list_servers( self, detailed=False, all_projects=False, bare=False, filters=None, ): """List all available servers. :param detailed: Whether or not to add detailed additional information. Defaults to False. :param all_projects: Whether to list servers from all projects or just the current auth scoped project. :param bare: Whether to skip adding any additional information to the server record. Defaults to False, meaning the addresses dict will be populated as needed from neutron. Setting to True implies detailed = False. :param filters: Additional query parameters passed to the API server. :returns: A list of compute ``Server`` objects. """ if not filters: filters = {} return [ self._expand_server(server, detailed, bare) for server in self.compute.servers( all_projects=all_projects, **filters, ) ] def list_server_groups(self): """List all available server groups. :returns: A list of compute ``ServerGroup`` objects. """ return list(self.compute.server_groups()) def get_compute_limits(self, name_or_id=None): """Get absolute compute limits for a project :param name_or_id: (optional) project name or ID to get limits for if different from the current project :returns: A compute :class:`~openstack.compute.v2.limits.Limits.AbsoluteLimits` object. :raises: :class:`~openstack.exceptions.SDKException` if it's not a valid project """ params = {} if name_or_id: project = self.identity.find_project(name_or_id) if not project: raise exceptions.SDKException( f"Project {name_or_id} was requested but was not found " f"on the cloud" ) params['tenant_id'] = project.id return self.compute.get_limits(**params).absolute def get_keypair(self, name_or_id, filters=None): """Get a keypair by name or ID. :param name_or_id: Name or ID of the keypair. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A compute ``Keypair`` object if found, else None. """ return _utils._get_entity(self, 'keypair', name_or_id, filters) def get_flavor(self, name_or_id, filters=None, get_extra=True): """Get a flavor by name or ID. :param name_or_id: Name or ID of the flavor. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :param get_extra: Whether or not the list_flavors call should get the extra flavor specs. :returns: A compute ``Flavor`` object if found, else None. """ if not filters: filters = {} flavor = self.compute.find_flavor( name_or_id, get_extra_specs=get_extra, ignore_missing=True, **filters, ) return flavor def get_flavor_by_id(self, id, get_extra=False): """Get a flavor by ID :param id: ID of the flavor. :param get_extra: Whether or not the list_flavors call should get the extra flavor specs. :returns: A compute ``Flavor`` object if found, else None. """ return self.compute.get_flavor(id, get_extra_specs=get_extra) def get_server_console(self, server, length=None): """Get the console log for a server. :param server: The server to fetch the console log for. Can be either a server dict or the Name or ID of the server. :param int length: The number of lines you would like to retrieve from the end of the log. (optional, defaults to all) :returns: A string containing the text of the console log or an empty string if the cloud does not support console logs. :raises: :class:`~openstack.exceptions.SDKException` if an invalid server argument is given or if something else unforseen happens """ if not isinstance(server, dict): server = self.get_server(server, bare=True) if not server: raise exceptions.SDKException( "Console log requested for invalid server" ) try: return self._get_server_console_output(server['id'], length) except exceptions.BadRequestException: return "" def _get_server_console_output(self, server_id, length=None): output = self.compute.get_server_console_output( server=server_id, length=length ) if 'output' in output: return output['output'] def get_server( self, name_or_id=None, filters=None, detailed=False, bare=False, all_projects=False, ): """Get a server by name or ID. :param name_or_id: Name or ID of the server. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :param detailed: Whether or not to add detailed additional information. Defaults to False. :param bare: Whether to skip adding any additional information to the server record. Defaults to False, meaning the addresses dict will be populated as needed from neutron. Setting to True implies detailed = False. :param all_projects: Whether to get server from all projects or just the current auth scoped project. :returns: A compute ``Server`` object if found, else None. """ searchfunc = functools.partial( self.search_servers, detailed=detailed, bare=True, all_projects=all_projects, ) server = _utils._get_entity(self, searchfunc, name_or_id, filters) return self._expand_server(server, detailed, bare) def _expand_server(self, server, detailed, bare): if bare or not server: return server elif detailed: return meta.get_hostvars_from_server(self, server) else: return meta.add_server_interfaces(self, server) def get_server_by_id(self, id): """Get a server by ID. :param id: ID of the server. :returns: A compute ``Server`` object if found, else None. """ try: server = self.compute.get_server(id) return meta.add_server_interfaces(self, server) except exceptions.NotFoundException: return None def get_server_group(self, name_or_id=None, filters=None): """Get a server group by name or ID. :param name_or_id: Name or ID of the server group. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'policy': 'affinity', } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A compute ``ServerGroup`` object if found, else None. """ return _utils._get_entity(self, 'server_group', name_or_id, filters) def create_keypair(self, name, public_key=None): """Create a new keypair. :param name: Name of the keypair being created. :param public_key: Public key for the new keypair. :returns: The created compute ``Keypair`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ keypair = { 'name': name, } if public_key: keypair['public_key'] = public_key return self.compute.create_keypair(**keypair) def delete_keypair(self, name): """Delete a keypair. :param name: Name of the keypair to delete. :returns: True if delete succeeded, False otherwise. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ try: self.compute.delete_keypair(name, ignore_missing=False) except exceptions.NotFoundException: self.log.debug("Keypair %s not found for deleting", name) return False return True def create_image_snapshot( self, name, server, wait=False, timeout=3600, **metadata, ): """Create an image by snapshotting an existing server. ..note:: On most clouds this is a cold snapshot - meaning that the server in question will be shutdown before taking the snapshot. It is possible that it's a live snapshot - but there is no way to know as a user, so caveat emptor. :param name: Name of the image to be created :param server: Server name or ID or dict representing the server to be snapshotted :param wait: If true, waits for image to be created. :param timeout: Seconds to wait for image creation. None is forever. :param metadata: Metadata to give newly-created image entity :returns: The created image ``Image`` object. :raises: :class:`~openstack.exceptions.SDKException` if there are problems uploading """ if not isinstance(server, dict): server_obj = self.get_server(server, bare=True) if not server_obj: raise exceptions.SDKException( "Server {server} could not be found and therefore" " could not be snapshotted.".format(server=server) ) server = server_obj image = self.compute.create_server_image( server, name=name, metadata=metadata, wait=wait, timeout=timeout ) return image def get_server_id(self, name_or_id): """Get the ID of a server. :param name_or_id: :returns: The name of the server if found, else None. """ server = self.get_server(name_or_id, bare=True) if server: return server['id'] return None def get_server_private_ip(self, server): """Get the private IP of a server. :param server: :returns: The private IP of the server if set, else None. """ return meta.get_server_private_ip(server, self) def get_server_public_ip(self, server): """Get the public IP of a server. :param server: :returns: The public IP of the server if set, else None. """ return meta.get_server_external_ipv4(self, server) def get_server_meta(self, server): """Get the metadata for a server. :param server: :returns: The metadata for the server if found, else None. """ # TODO(mordred) remove once ansible has moved to Inventory interface server_vars = meta.get_hostvars_from_server(self, server) groups = meta.get_groups_from_server(self, server, server_vars) return dict(server_vars=server_vars, groups=groups) @_utils.valid_kwargs( 'meta', 'files', 'userdata', 'description', 'reservation_id', 'return_raw', 'min_count', 'max_count', 'security_groups', 'key_name', 'availability_zone', 'block_device_mapping', 'block_device_mapping_v2', 'nics', 'scheduler_hints', 'config_drive', 'admin_pass', 'disk_config', 'tags', ) def create_server( self, name, image=None, flavor=None, auto_ip=True, ips=None, ip_pool=None, root_volume=None, terminate_volume=False, wait=False, timeout=180, reuse_ips=True, network=None, boot_from_volume=False, volume_size='50', boot_volume=None, volumes=None, nat_destination=None, group=None, **kwargs, ): """Create a virtual server instance. :param name: Something to name the server. :param image: Image dict, name or ID to boot with. image is required unless boot_volume is given. :param flavor: Flavor dict, name or ID to boot onto. :param auto_ip: Whether to take actions to find a routable IP for the server. (defaults to True) :param ips: List of IPs to attach to the server (defaults to None) :param ip_pool: Name of the network or floating IP pool to get an address from. (defaults to None) :param root_volume: Name or ID of a volume to boot from (defaults to None - deprecated, use boot_volume) :param boot_volume: Name or ID of a volume to boot from (defaults to None) :param terminate_volume: If booting from a volume, whether it should be deleted when the server is destroyed. (defaults to False) :param volumes: (optional) A list of volumes to attach to the server :param meta: (optional) A dict of arbitrary key/value metadata to store for this server. Both keys and values must be <=255 characters. :param files: (optional, deprecated) A dict of files to overwrite on the server upon boot. Keys are file names (i.e. ``/etc/passwd``) and values are the file contents (either as a string or as a file-like object). A maximum of five entries is allowed, and each file must be 10k or less. :param reservation_id: a UUID for the set of servers being requested. :param min_count: (optional extension) The minimum number of servers to launch. :param max_count: (optional extension) The maximum number of servers to launch. :param security_groups: A list of security group names :param userdata: user data to pass to be exposed by the metadata server this can be a file type object as well or a string. :param key_name: (optional extension) name of previously created keypair to inject into the instance. :param availability_zone: Name of the availability zone for instance placement. :param block_device_mapping: (optional) A dict of block device mappings for this server. :param block_device_mapping_v2: (optional) A dict of block device mappings for this server. :param nics: (optional extension) an ordered list of nics to be added to this server, with information about connected networks, fixed IPs, port etc. :param scheduler_hints: (optional extension) arbitrary key-value pairs specified by the client to help boot an instance :param config_drive: (optional extension) value for config drive either boolean, or volume-id :param disk_config: (optional extension) control how the disk is partitioned when the server is created. possible values are 'AUTO' or 'MANUAL'. :param admin_pass: (optional extension) add a user supplied admin password. :param wait: (optional) Wait for the address to appear as assigned to the server. Defaults to False. :param timeout: (optional) Seconds to wait, defaults to 60. See the ``wait`` parameter. :param reuse_ips: (optional) Whether to attempt to reuse pre-existing floating ips should a floating IP be needed (defaults to True) :param network: (optional) Network dict or name or ID to attach the server to. Mutually exclusive with the nics parameter. Can also be a list of network names or IDs or network dicts. :param boot_from_volume: Whether to boot from volume. 'boot_volume' implies True, but boot_from_volume=True with no boot_volume is valid and will create a volume from the image and use that. :param volume_size: When booting an image from volume, how big should the created volume be? Defaults to 50. :param nat_destination: Which network should a created floating IP be attached to, if it's not possible to infer from the cloud's configuration. (Optional, defaults to None) :param group: ServerGroup dict, name or id to boot the server in. If a group is provided in both scheduler_hints and in the group param, the group param will win. (Optional, defaults to None) :returns: The created compute ``Server`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ # TODO(shade) Image is optional but flavor is not - yet flavor comes # after image in the argument list. Doh. if not flavor: raise TypeError( "create_server() missing 1 required argument: 'flavor'" ) if not image and not boot_volume: raise TypeError( "create_server() requires either 'image' or 'boot_volume'" ) # TODO(mordred) Add support for description starting in 2.19 security_groups = kwargs.get('security_groups', []) if security_groups and not isinstance(kwargs['security_groups'], list): security_groups = [security_groups] if security_groups: kwargs['security_groups'] = [] for sec_group in security_groups: kwargs['security_groups'].append(dict(name=sec_group)) if 'userdata' in kwargs: user_data = kwargs.pop('userdata') if user_data: kwargs['user_data'] = self._encode_server_userdata(user_data) for desired, given in ( ('OS-DCF:diskConfig', 'disk_config'), ('config_drive', 'config_drive'), ('key_name', 'key_name'), ('metadata', 'meta'), ('adminPass', 'admin_pass'), ): value = kwargs.pop(given, None) if value: kwargs[desired] = value if group: group_obj = self.compute.find_server_group(group) if not group_obj: raise exceptions.SDKException( "Server Group {group} was requested but was not found" " on the cloud".format(group=group) ) if 'scheduler_hints' not in kwargs: kwargs['scheduler_hints'] = {} kwargs['scheduler_hints']['group'] = group_obj['id'] kwargs.setdefault('max_count', kwargs.get('max_count', 1)) kwargs.setdefault('min_count', kwargs.get('min_count', 1)) if 'nics' in kwargs and not isinstance(kwargs['nics'], list): if isinstance(kwargs['nics'], dict): # Be nice and help the user out kwargs['nics'] = [kwargs['nics']] else: raise exceptions.SDKException( 'nics parameter to create_server takes a list of dicts.' ' Got: {nics}'.format(nics=kwargs['nics']) ) if network and ('nics' not in kwargs or not kwargs['nics']): nics = [] if not isinstance(network, list): network = [network] for net_name in network: if isinstance(net_name, dict) and 'id' in net_name: network_obj = net_name else: network_obj = self.network.find_network(net_name) if not network_obj: raise exceptions.SDKException( 'Network {network} is not a valid network in' ' {cloud}:{region}'.format( network=network, cloud=self.name, region=self._compute_region, ) ) nics.append({'net-id': network_obj['id']}) kwargs['nics'] = nics if not network and ('nics' not in kwargs or not kwargs['nics']): default_network = self.get_default_network() if default_network: kwargs['nics'] = [{'net-id': default_network['id']}] networks = [] for nic in kwargs.pop('nics', []): net = {} if 'net-id' in nic: # TODO(mordred) Make sure this is in uuid format net['uuid'] = nic.pop('net-id') # If there's a net-id, ignore net-name nic.pop('net-name', None) elif 'net-name' in nic: net_name = nic.pop('net-name') nic_net = self.network.find_network(net_name) if not nic_net: raise exceptions.SDKException( "Requested network {net} could not be found.".format( net=net_name ) ) net['uuid'] = nic_net['id'] for ip_key in ('v4-fixed-ip', 'v6-fixed-ip', 'fixed_ip'): fixed_ip = nic.pop(ip_key, None) if fixed_ip and net.get('fixed_ip'): raise exceptions.SDKException( "Only one of v4-fixed-ip, v6-fixed-ip or fixed_ip" " may be given" ) if fixed_ip: net['fixed_ip'] = fixed_ip for key in ('port', 'port-id'): if key in nic: net['port'] = nic.pop(key) # A tag supported only in server microversion 2.32-2.36 or >= 2.42 # Bumping the version to 2.42 to support the 'tag' implementation if 'tag' in nic: utils.require_microversion(self.compute, '2.42') net['tag'] = nic.pop('tag') if nic: raise exceptions.SDKException( "Additional unsupported keys given for server network" " creation: {keys}".format(keys=nic.keys()) ) networks.append(net) if networks: kwargs['networks'] = networks else: # If user has not passed networks - let Nova try the best; # note earlier microversions expect this to be blank. if utils.supports_microversion(self.compute, '2.37'): kwargs['networks'] = 'auto' if image: # TODO(stephenfin): Drop support for dicts: we should only accept # strings or Image objects if isinstance(image, dict): kwargs['imageRef'] = image['id'] else: image_obj = self.image.find_image(image) if not image_obj: raise exc.OpenStackCloudException( f"Image {image} was requested but was not found " f"on the cloud" ) kwargs['imageRef'] = image_obj.id # TODO(stephenfin): Drop support for dicts: we should only accept # strings or Image objects if isinstance(flavor, dict): kwargs['flavorRef'] = flavor['id'] else: kwargs['flavorRef'] = self.get_flavor(flavor, get_extra=False).id if volumes is None: volumes = [] # nova cli calls this boot_volume. Let's be the same if root_volume and not boot_volume: boot_volume = root_volume kwargs = self._get_boot_from_volume_kwargs( image=image, boot_from_volume=boot_from_volume, boot_volume=boot_volume, volume_size=str(volume_size), terminate_volume=terminate_volume, volumes=volumes, kwargs=kwargs, ) kwargs['name'] = name server = self.compute.create_server(**kwargs) # TODO(mordred) We're only testing this in functional tests. We need # to add unit tests for this too. admin_pass = server.admin_password or kwargs.get('admin_pass') if not wait: server = self.compute.get_server(server.id) if server['status'] == 'ERROR': if ( 'fault' in server and server['fault'] is not None and 'message' in server['fault'] ): raise exceptions.SDKException( "Error in creating the server. " "Compute service reports fault: {reason}".format( reason=server['fault']['message'] ), extra_data=dict(server=server), ) raise exceptions.SDKException( "Error in creating the server " "(no further information available)", extra_data=dict(server=server), ) server = meta.add_server_interfaces(self, server) else: server = self.wait_for_server( server, auto_ip=auto_ip, ips=ips, ip_pool=ip_pool, reuse=reuse_ips, timeout=timeout, nat_destination=nat_destination, ) server.admin_password = admin_pass return server def _get_boot_from_volume_kwargs( self, image, boot_from_volume, boot_volume, volume_size, terminate_volume, volumes, kwargs, ): """Return block device mappings :param image: Image dict, name or id to boot with. """ # TODO(mordred) We're only testing this in functional tests. We need # to add unit tests for this too. if boot_volume or boot_from_volume or volumes: kwargs.setdefault('block_device_mapping_v2', []) else: return kwargs # If we have boot_from_volume but no root volume, then we're # booting an image from volume if boot_volume: volume = self.block_storage.find_volume(boot_volume) if not volume: raise exceptions.SDKException( f"Volume {volume} was requested but was not found " f"on the cloud" ) block_mapping = { 'boot_index': '0', 'delete_on_termination': terminate_volume, 'destination_type': 'volume', 'uuid': volume['id'], 'source_type': 'volume', } kwargs['block_device_mapping_v2'].append(block_mapping) kwargs['imageRef'] = '' elif boot_from_volume: # TODO(stephenfin): Drop support for dicts: we should only accept # strings or Image objects if isinstance(image, dict): image_obj = image else: image_obj = self.image.find_image(image) if not image_obj: raise exceptions.SDKException( f"Image {image} was requested but was not found " f"on the cloud" ) block_mapping = { 'boot_index': '0', 'delete_on_termination': terminate_volume, 'destination_type': 'volume', 'uuid': image_obj['id'], 'source_type': 'image', 'volume_size': volume_size, } kwargs['imageRef'] = '' kwargs['block_device_mapping_v2'].append(block_mapping) if volumes and kwargs['imageRef']: # If we're attaching volumes on boot but booting from an image, # we need to specify that in the BDM. block_mapping = { 'boot_index': 0, 'delete_on_termination': True, 'destination_type': 'local', 'source_type': 'image', 'uuid': kwargs['imageRef'], } kwargs['block_device_mapping_v2'].append(block_mapping) for volume in volumes: volume_obj = self.block_storage.find_volume(volume) if not volume_obj: raise exceptions.SDKException( f"Volume {volume} was requested but was not found " f"on the cloud" ) block_mapping = { 'boot_index': '-1', 'delete_on_termination': False, 'destination_type': 'volume', 'uuid': volume_obj['id'], 'source_type': 'volume', } kwargs['block_device_mapping_v2'].append(block_mapping) return kwargs def wait_for_server( self, server, auto_ip=True, ips=None, ip_pool=None, reuse=True, timeout=180, nat_destination=None, ): """ Wait for a server to reach ACTIVE status. """ server_id = server['id'] timeout_message = "Timeout waiting for the server to come up." start_time = time.time() for count in utils.iterate_timeout( timeout, timeout_message, wait=min(5, timeout), ): try: server = self.get_server(server_id) except Exception: continue if not server: continue # We have more work to do, but the details of that are # hidden from the user. So, calculate remaining timeout # and pass it down into the IP stack. remaining_timeout = timeout - int(time.time() - start_time) if remaining_timeout <= 0: raise exceptions.ResourceTimeout(timeout_message) server = self.get_active_server( server=server, reuse=reuse, auto_ip=auto_ip, ips=ips, ip_pool=ip_pool, wait=True, timeout=remaining_timeout, nat_destination=nat_destination, ) if server is not None and server['status'] == 'ACTIVE': return server def get_active_server( self, server, auto_ip=True, ips=None, ip_pool=None, reuse=True, wait=False, timeout=180, nat_destination=None, ): if server['status'] == 'ERROR': if ( 'fault' in server and server['fault'] is not None and 'message' in server['fault'] ): raise exceptions.SDKException( "Error in creating the server. " "Compute service reports fault: {reason}".format( reason=server['fault']['message'] ), extra_data=dict(server=server), ) raise exceptions.SDKException( "Error in creating the server " "(no further information available)", extra_data=dict(server=server), ) if server['status'] == 'ACTIVE': if 'addresses' in server and server['addresses']: return self.add_ips_to_server( server, auto_ip, ips, ip_pool, reuse=reuse, nat_destination=nat_destination, wait=wait, timeout=timeout, ) self.log.debug( 'Server %(server)s reached ACTIVE state without' ' being allocated an IP address.' ' Deleting server.', {'server': server['id']}, ) try: self._delete_server(server=server, wait=wait, timeout=timeout) except Exception as e: raise exceptions.SDKException( 'Server reached ACTIVE state without being' ' allocated an IP address AND then could not' ' be deleted: {}'.format(e), extra_data=dict(server=server), ) raise exceptions.SDKException( 'Server reached ACTIVE state without being' ' allocated an IP address.', extra_data=dict(server=server), ) return None def rebuild_server( self, server_id, image_id, admin_pass=None, detailed=False, bare=False, wait=False, timeout=180, ): """Rebuild a server. :param server_id: :param image_id: :param admin_pass: :param detailed: :param bare: :param wait: :param timeout: :returns: A compute ``Server`` object. """ kwargs = {} if image_id: kwargs['image'] = image_id if admin_pass: kwargs['admin_password'] = admin_pass server = self.compute.rebuild_server(server_id, **kwargs) if not wait: return self._expand_server(server, bare=bare, detailed=detailed) admin_pass = server.get('adminPass') or admin_pass server = self.compute.wait_for_server(server, wait=timeout) if server['status'] == 'ACTIVE': server.adminPass = admin_pass return self._expand_server(server, detailed=detailed, bare=bare) def set_server_metadata(self, name_or_id, metadata): """Set metadata in a server instance. :param str name_or_id: The name or ID of the server instance to update. :param dict metadata: A dictionary with the key=value pairs to set in the server instance. It only updates the key=value pairs provided. Existing ones will remain untouched. :returns: None :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ server = self.get_server(name_or_id, bare=True) if not server: raise exceptions.SDKException(f'Invalid Server {name_or_id}') self.compute.set_server_metadata(server=server.id, **metadata) def delete_server_metadata(self, name_or_id, metadata_keys): """Delete metadata from a server instance. :param str name_or_id: The name or ID of the server instance to update. :param metadata_keys: A list with the keys to be deleted from the server instance. :returns: None :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ server = self.get_server(name_or_id, bare=True) if not server: raise exceptions.SDKException(f'Invalid Server {name_or_id}') self.compute.delete_server_metadata( server=server.id, keys=metadata_keys ) def delete_server( self, name_or_id, wait=False, timeout=180, delete_ips=False, delete_ip_retry=1, ): """Delete a server instance. :param name_or_id: name or ID of the server to delete :param bool wait: If true, waits for server to be deleted. :param int timeout: Seconds to wait for server deletion. :param bool delete_ips: If true, deletes any floating IPs associated with the instance. :param int delete_ip_retry: Number of times to retry deleting any floating ips, should the first try be unsuccessful. :returns: True if delete succeeded, False otherwise if the server does not exist. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ # If delete_ips is True, we need the server to not be bare. server = self.compute.find_server(name_or_id, ignore_missing=True) if not server: return False # This portion of the code is intentionally left as a separate # private method in order to avoid an unnecessary API call to get # a server we already have. return self._delete_server( server, wait=wait, timeout=timeout, delete_ips=delete_ips, delete_ip_retry=delete_ip_retry, ) def _delete_server_floating_ips(self, server, delete_ip_retry): # Does the server have floating ips in its # addresses dict? If not, skip this. server_floats = meta.find_nova_interfaces( server['addresses'], ext_tag='floating' ) for fip in server_floats: try: ip = self.get_floating_ip( id=None, filters={'floating_ip_address': fip['addr']} ) except exceptions.NotFoundException: # We're deleting. If it doesn't exist - awesome # NOTE(mordred) If the cloud is a nova FIP cloud but # floating_ip_source is set to neutron, this # can lead to a FIP leak. continue if not ip: continue deleted = self.delete_floating_ip(ip['id'], retry=delete_ip_retry) if not deleted: raise exceptions.SDKException( "Tried to delete floating ip {floating_ip}" " associated with server {id} but there was" " an error deleting it. Not deleting server.".format( floating_ip=ip['floating_ip_address'], id=server['id'] ) ) def _delete_server( self, server, wait=False, timeout=180, delete_ips=False, delete_ip_retry=1, ): if not server: return False if delete_ips and self._has_floating_ips() and server['addresses']: self._delete_server_floating_ips(server, delete_ip_retry) try: self.compute.delete_server(server) except exceptions.NotFoundException: return False except Exception: raise if not wait: return True if not isinstance(server, _server.Server): # We might come here with Munch object (at the moment). # If this is the case - convert it into real server to be able to # use wait_for_delete server = _server.Server(id=server['id']) self.compute.wait_for_delete(server, wait=timeout) return True @_utils.valid_kwargs('name', 'description') def update_server(self, name_or_id, detailed=False, bare=False, **kwargs): """Update a server. :param name_or_id: Name of the server to be updated. :param detailed: Whether or not to add detailed additional information. Defaults to False. :param bare: Whether to skip adding any additional information to the server record. Defaults to False, meaning the addresses dict will be populated as needed from neutron. Setting to True implies detailed = False. :param name: New name for the server :param description: New description for the server :returns: The updated compute ``Server`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ server = self.compute.find_server(name_or_id, ignore_missing=False) server = self.compute.update_server(server, **kwargs) return self._expand_server(server, bare=bare, detailed=detailed) def create_server_group(self, name, policies=None, policy=None): """Create a new server group. :param name: Name of the server group being created :param policies: List of policies for the server group. :returns: The created compute ``ServerGroup`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ sg_attrs = {'name': name} if policies: sg_attrs['policies'] = policies if policy: sg_attrs['policy'] = policy return self.compute.create_server_group(**sg_attrs) def delete_server_group(self, name_or_id): """Delete a server group. :param name_or_id: Name or ID of the server group to delete :returns: True if delete succeeded, False otherwise :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ server_group = self.get_server_group(name_or_id) if not server_group: self.log.debug( "Server group %s not found for deleting", name_or_id ) return False self.compute.delete_server_group(server_group, ignore_missing=False) return True def create_flavor( self, name, ram, vcpus, disk, description=None, flavorid="auto", ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True, ): """Create a new flavor. :param name: Descriptive name of the flavor :param ram: Memory in MB for the flavor :param vcpus: Number of VCPUs for the flavor :param disk: Size of local disk in GB :param description: Description of the flavor :param flavorid: ID for the flavor (optional) :param ephemeral: Ephemeral space size in GB :param swap: Swap space in MB :param rxtx_factor: RX/TX factor :param is_public: Make flavor accessible to the public :returns: The created compute ``Flavor`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ attrs = { 'disk': disk, 'ephemeral': ephemeral, 'id': flavorid, 'is_public': is_public, 'name': name, 'ram': ram, 'rxtx_factor': rxtx_factor, 'swap': swap, 'vcpus': vcpus, 'description': description, } if flavorid == 'auto': attrs['id'] = None return self.compute.create_flavor(**attrs) def delete_flavor(self, name_or_id): """Delete a flavor :param name_or_id: ID or name of the flavor to delete. :returns: True if delete succeeded, False otherwise. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ try: flavor = self.compute.find_flavor(name_or_id) if not flavor: self.log.debug("Flavor %s not found for deleting", name_or_id) return False self.compute.delete_flavor(flavor) return True except exceptions.SDKException: raise exceptions.SDKException( f"Unable to delete flavor {name_or_id}" ) def set_flavor_specs(self, flavor_id, extra_specs): """Add extra specs to a flavor :param string flavor_id: ID of the flavor to update. :param dict extra_specs: Dictionary of key-value pairs. :raises: :class:`~openstack.exceptions.SDKException` on operation error. :raises: :class:`~openstack.exceptions.BadRequestException` if flavor ID is not found. """ self.compute.create_flavor_extra_specs(flavor_id, extra_specs) def unset_flavor_specs(self, flavor_id, keys): """Delete extra specs from a flavor :param string flavor_id: ID of the flavor to update. :param keys: List of spec keys to delete. :raises: :class:`~openstack.exceptions.SDKException` on operation error. :raises: :class:`~openstack.exceptions.BadRequestException` if flavor ID is not found. """ for key in keys: self.compute.delete_flavor_extra_specs_property(flavor_id, key) def add_flavor_access(self, flavor_id, project_id): """Grant access to a private flavor for a project/tenant. :param string flavor_id: ID of the private flavor. :param string project_id: ID of the project/tenant. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ self.compute.flavor_add_tenant_access(flavor_id, project_id) def remove_flavor_access(self, flavor_id, project_id): """Revoke access from a private flavor for a project/tenant. :param string flavor_id: ID of the private flavor. :param string project_id: ID of the project/tenant. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ self.compute.flavor_remove_tenant_access(flavor_id, project_id) def list_flavor_access(self, flavor_id): """List access from a private flavor for a project/tenant. :param string flavor_id: ID of the private flavor. :returns: List of dicts with flavor_id and tenant_id attributes. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ return self.compute.get_flavor_access(flavor_id) def list_hypervisors(self, filters=None): """List all hypervisors :param filters: :returns: A list of compute ``Hypervisor`` objects. """ if not filters: filters = {} return list(self.compute.hypervisors(details=True, **filters)) def search_aggregates(self, name_or_id=None, filters=None): """Seach host aggregates. :param name: aggregate name or id. :param filters: a dict containing additional filters to use. :returns: A list of compute ``Aggregate`` objects matching the search criteria. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ aggregates = self.list_aggregates() return _utils._filter_list(aggregates, name_or_id, filters) def list_aggregates(self, filters=None): """List all available host aggregates. :returns: A list of compute ``Aggregate`` objects. """ if not filters: filters = {} return self.compute.aggregates(**filters) def get_aggregate(self, name_or_id, filters=None): """Get an aggregate by name or ID. :param name_or_id: Name or ID of the aggregate. :param dict filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'availability_zone': 'nova', 'metadata': { 'cpu_allocation_ratio': '1.0' } } :returns: An aggregate dict or None if no matching aggregate is found. """ return self.compute.find_aggregate(name_or_id, ignore_missing=True) def create_aggregate(self, name, availability_zone=None): """Create a new host aggregate. :param name: Name of the host aggregate being created :param availability_zone: Availability zone to assign hosts :returns: The created compute ``Aggregate`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ return self.compute.create_aggregate( name=name, availability_zone=availability_zone ) @_utils.valid_kwargs('name', 'availability_zone') def update_aggregate(self, name_or_id, **kwargs): """Update a host aggregate. :param name_or_id: Name or ID of the aggregate being updated. :param name: New aggregate name :param availability_zone: Availability zone to assign to hosts :returns: The updated compute ``Aggregate`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ aggregate = self.get_aggregate(name_or_id) return self.compute.update_aggregate(aggregate, **kwargs) def delete_aggregate(self, name_or_id): """Delete a host aggregate. :param name_or_id: Name or ID of the host aggregate to delete. :returns: True if delete succeeded, False otherwise. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if isinstance(name_or_id, (str, bytes)) and not name_or_id.isdigit(): aggregate = self.get_aggregate(name_or_id) if not aggregate: self.log.debug( "Aggregate %s not found for deleting", name_or_id ) return False name_or_id = aggregate.id try: self.compute.delete_aggregate(name_or_id, ignore_missing=False) return True except exceptions.NotFoundException: self.log.debug("Aggregate %s not found for deleting", name_or_id) return False def set_aggregate_metadata(self, name_or_id, metadata): """Set aggregate metadata, replacing the existing metadata. :param name_or_id: Name of the host aggregate to update :param metadata: Dict containing metadata to replace (Use {'key': None} to remove a key) :returns: a dict representing the new host aggregate. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ aggregate = self.get_aggregate(name_or_id) if not aggregate: raise exceptions.SDKException( "Host aggregate %s not found." % name_or_id ) return self.compute.set_aggregate_metadata(aggregate, metadata) def add_host_to_aggregate(self, name_or_id, host_name): """Add a host to an aggregate. :param name_or_id: Name or ID of the host aggregate. :param host_name: Host to add. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ aggregate = self.get_aggregate(name_or_id) if not aggregate: raise exceptions.SDKException( "Host aggregate %s not found." % name_or_id ) return self.compute.add_host_to_aggregate(aggregate, host_name) def remove_host_from_aggregate(self, name_or_id, host_name): """Remove a host from an aggregate. :param name_or_id: Name or ID of the host aggregate. :param host_name: Host to remove. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ aggregate = self.get_aggregate(name_or_id) if not aggregate: raise exceptions.SDKException( "Host aggregate %s not found." % name_or_id ) return self.compute.remove_host_from_aggregate(aggregate, host_name) def set_compute_quotas(self, name_or_id, **kwargs): """Set a quota in a project :param name_or_id: project name or id :param kwargs: key/value pairs of quota name and quota value :raises: :class:`~openstack.exceptions.SDKException` if the resource to set the quota does not exist. """ project = self.identity.find_project(name_or_id, ignore_missing=False) kwargs['force'] = True self.compute.update_quota_set(project=project, **kwargs) def get_compute_quotas(self, name_or_id): """Get quota for a project :param name_or_id: project name or id :returns: A compute ``QuotaSet`` object if found, else None. :raises: :class:`~openstack.exceptions.SDKException` if it's not a valid project """ proj = self.identity.find_project(name_or_id, ignore_missing=False) return self.compute.get_quota_set(proj) def delete_compute_quotas(self, name_or_id): """Delete quota for a project :param name_or_id: project name or id :raises: :class:`~openstack.exceptions.SDKException` if it's not a valid project or the nova client call failed """ proj = self.identity.find_project(name_or_id, ignore_missing=False) self.compute.revert_quota_set(proj) def get_compute_usage(self, name_or_id, start=None, end=None): """Get usage for a specific project :param name_or_id: project name or id :param start: :class:`datetime.datetime` or string. Start date in UTC Defaults to 2010-07-06T12:00:00Z (the date the OpenStack project was started) :param end: :class:`datetime.datetime` or string. End date in UTC. Defaults to now :returns: A :class:`~openstack.compute.v2.usage.Usage` object :raises: :class:`~openstack.exceptions.SDKException` if it's not a valid project """ def parse_date(date): try: return iso8601.parse_date(date) except iso8601.iso8601.ParseError: # Yes. This is an exception mask. However,iso8601 is an # implementation detail - and the error message is actually # less informative. raise exceptions.SDKException( "Date given, {date}, is invalid. Please pass in a date" " string in ISO 8601 format -" " YYYY-MM-DDTHH:MM:SS".format(date=date) ) if isinstance(start, str): start = parse_date(start) if isinstance(end, str): end = parse_date(end) proj = self.identity.find_project(name_or_id) if not proj: raise exceptions.SDKException( f"Project {name_or_id} was requested but was not found " f"on the cloud" ) return self.compute.get_usage(proj, start, end) def _encode_server_userdata(self, userdata): if hasattr(userdata, 'read'): userdata = userdata.read() if not isinstance(userdata, bytes): # If the userdata passed in is bytes, just send it unmodified if not isinstance(userdata, str): raise TypeError("%s can't be encoded" % type(userdata)) # If it's not bytes, make it bytes userdata = userdata.encode('utf-8', 'strict') # Once we have base64 bytes, make them into a utf-8 string for REST return base64.b64encode(userdata).decode('utf-8') def get_openstack_vars(self, server): return meta.get_hostvars_from_server(self, server) def _expand_server_vars(self, server): # Used by nodepool # TODO(mordred) remove after these make it into what we # actually want the API to be. return meta.expand_server_vars(self, server) def _remove_novaclient_artifacts(self, item): # Remove novaclient artifacts item.pop('links', None) item.pop('NAME_ATTR', None) item.pop('HUMAN_ID', None) item.pop('human_id', None) item.pop('request_ids', None) item.pop('x_openstack_request_ids', None) def _normalize_server(self, server): ret = utils.Munch() # Copy incoming server because of shared dicts in unittests # Wrap the copy in munch so that sub-dicts are properly munched server = utils.Munch(server) self._remove_novaclient_artifacts(server) ret['id'] = server.pop('id') ret['name'] = server.pop('name') server['flavor'].pop('links', None) ret['flavor'] = server.pop('flavor') # From original_names from sdk server.pop('flavorRef', None) # OpenStack can return image as a string when you've booted # from volume image = server.pop('image', None) if str(image) != image: image = utils.Munch(id=image['id']) ret['image'] = image # From original_names from sdk server.pop('imageRef', None) # From original_names from sdk ret['block_device_mapping'] = server.pop('block_device_mapping_v2', {}) project_id = server.pop('tenant_id', '') project_id = server.pop('project_id', project_id) az = _pop_or_get( server, 'OS-EXT-AZ:availability_zone', None, self.strict_mode ) # the server resource has this already, but it's missing az info # from the resource. # TODO(mordred) create_server is still normalizing servers that aren't # from the resource layer. ret['location'] = server.pop( 'location', self._get_current_location(project_id=project_id, zone=az), ) # Ensure volumes is always in the server dict, even if empty ret['volumes'] = _pop_or_get( server, 'os-extended-volumes:volumes_attached', [], self.strict_mode, ) config_drive = server.pop( 'has_config_drive', server.pop('config_drive', False) ) ret['has_config_drive'] = _to_bool(config_drive) host_id = server.pop('hostId', server.pop('host_id', None)) ret['host_id'] = host_id ret['progress'] = _pop_int(server, 'progress') # Leave these in so that the general properties handling works ret['disk_config'] = _pop_or_get( server, 'OS-DCF:diskConfig', None, self.strict_mode ) for key in ( 'OS-EXT-STS:power_state', 'OS-EXT-STS:task_state', 'OS-EXT-STS:vm_state', 'OS-SRV-USG:launched_at', 'OS-SRV-USG:terminated_at', 'OS-EXT-SRV-ATTR:hypervisor_hostname', 'OS-EXT-SRV-ATTR:instance_name', 'OS-EXT-SRV-ATTR:user_data', 'OS-EXT-SRV-ATTR:host', 'OS-EXT-SRV-ATTR:hostname', 'OS-EXT-SRV-ATTR:kernel_id', 'OS-EXT-SRV-ATTR:launch_index', 'OS-EXT-SRV-ATTR:ramdisk_id', 'OS-EXT-SRV-ATTR:reservation_id', 'OS-EXT-SRV-ATTR:root_device_name', 'OS-SCH-HNT:scheduler_hints', ): short_key = key.split(':')[1] ret[short_key] = _pop_or_get(server, key, None, self.strict_mode) # Protect against security_groups being None ret['security_groups'] = server.pop('security_groups', None) or [] # NOTE(mnaser): The Nova API returns the creation date in `created` # however the Shade contract returns `created_at` for # all resources. ret['created_at'] = server.get('created') for field in _SERVER_FIELDS: ret[field] = server.pop(field, None) if not ret['networks']: ret['networks'] = {} ret['interface_ip'] = '' ret['properties'] = server.copy() # Backwards compat if not self.strict_mode: ret['hostId'] = host_id ret['config_drive'] = config_drive ret['project_id'] = project_id ret['tenant_id'] = project_id # TODO(efried): This is hardcoded to 'compute' because this method # should only ever be used by the compute proxy. (That said, it # doesn't appear to be used at all, so can we get rid of it?) ret['region'] = self.config.get_region_name('compute') ret['cloud'] = self.config.name ret['az'] = az for key, val in ret['properties'].items(): ret.setdefault(key, val) return ret ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/_dns.py0000664000175000017500000002216400000000000020626 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack.cloud import _utils from openstack.cloud import openstackcloud from openstack import exceptions from openstack import resource class DnsCloudMixin(openstackcloud._OpenStackCloudMixin): def list_zones(self, filters=None): """List all available zones. :returns: A list of zones dicts. """ if not filters: filters = {} return list(self.dns.zones(allow_unknown_params=True, **filters)) def get_zone(self, name_or_id, filters=None): """Get a zone by name or ID. :param name_or_id: Name or ID of the zone :param filters: A dictionary of meta data to use for further filtering :returns: A zone dict or None if no matching zone is found. """ if not filters: filters = {} zone = self.dns.find_zone( name_or_id=name_or_id, ignore_missing=True, **filters ) if not zone: return None return zone def search_zones(self, name_or_id=None, filters=None): zones = self.list_zones(filters) return _utils._filter_list(zones, name_or_id, filters) def create_zone( self, name, zone_type=None, email=None, description=None, ttl=None, masters=None, ): """Create a new zone. :param name: Name of the zone being created. :param zone_type: Type of the zone (primary/secondary) :param email: Email of the zone owner (only applies if zone_type is primary) :param description: Description of the zone :param ttl: TTL (Time to live) value in seconds :param masters: Master nameservers (only applies if zone_type is secondary) :returns: a dict representing the created zone. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ # We capitalize in case the user passes time in lowercase, as # designate call expects PRIMARY/SECONDARY if zone_type is not None: zone_type = zone_type.upper() if zone_type not in ('PRIMARY', 'SECONDARY'): raise exceptions.SDKException( "Invalid type %s, valid choices are PRIMARY or SECONDARY" % zone_type ) zone = { "name": name, "email": email, "description": description, } if ttl is not None: zone["ttl"] = ttl if zone_type is not None: zone["type"] = zone_type if masters is not None: zone["masters"] = masters try: return self.dns.create_zone(**zone) except exceptions.SDKException: raise exceptions.SDKException(f"Unable to create zone {name}") @_utils.valid_kwargs('email', 'description', 'ttl', 'masters') def update_zone(self, name_or_id, **kwargs): """Update a zone. :param name_or_id: Name or ID of the zone being updated. :param email: Email of the zone owner (only applies if zone_type is primary) :param description: Description of the zone :param ttl: TTL (Time to live) value in seconds :param masters: Master nameservers (only applies if zone_type is secondary) :returns: a dict representing the updated zone. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ zone = self.get_zone(name_or_id) if not zone: raise exceptions.SDKException("Zone %s not found." % name_or_id) return self.dns.update_zone(zone['id'], **kwargs) def delete_zone(self, name_or_id): """Delete a zone. :param name_or_id: Name or ID of the zone being deleted. :returns: True if delete succeeded, False otherwise. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ zone = self.dns.find_zone(name_or_id) if not zone: self.log.debug("Zone %s not found for deleting", name_or_id) return False self.dns.delete_zone(zone) return True def list_recordsets(self, zone): """List all available recordsets. :param zone: Name, ID or :class:`openstack.dns.v2.zone.Zone` instance of the zone managing the recordset. :returns: A list of recordsets. """ if isinstance(zone, resource.Resource): zone_obj = zone else: zone_obj = self.get_zone(zone) if zone_obj is None: raise exceptions.SDKException("Zone %s not found." % zone) return list(self.dns.recordsets(zone_obj)) def get_recordset(self, zone, name_or_id): """Get a recordset by name or ID. :param zone: Name, ID or :class:`openstack.dns.v2.zone.Zone` instance of the zone managing the recordset. :param name_or_id: Name or ID of the recordset :returns: A recordset dict or None if no matching recordset is found. """ if isinstance(zone, resource.Resource): zone_obj = zone else: zone_obj = self.get_zone(zone) if not zone_obj: raise exceptions.SDKException("Zone %s not found." % zone) try: return self.dns.find_recordset( zone=zone_obj, name_or_id=name_or_id, ignore_missing=False ) except Exception: return None def search_recordsets(self, zone, name_or_id=None, filters=None): recordsets = self.list_recordsets(zone=zone) return _utils._filter_list(recordsets, name_or_id, filters) def create_recordset( self, zone, name, recordset_type, records, description=None, ttl=None ): """Create a recordset. :param zone: Name, ID or :class:`openstack.dns.v2.zone.Zone` instance of the zone managing the recordset. :param name: Name of the recordset :param recordset_type: Type of the recordset :param records: List of the recordset definitions :param description: Description of the recordset :param ttl: TTL value of the recordset :returns: a dict representing the created recordset. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if isinstance(zone, resource.Resource): zone_obj = zone else: zone_obj = self.get_zone(zone) if not zone_obj: raise exceptions.SDKException("Zone %s not found." % zone) # We capitalize the type in case the user sends in lowercase recordset_type = recordset_type.upper() body = {'name': name, 'type': recordset_type, 'records': records} if description: body['description'] = description if ttl: body['ttl'] = ttl return self.dns.create_recordset(zone=zone_obj, **body) @_utils.valid_kwargs('description', 'ttl', 'records') def update_recordset(self, zone, name_or_id, **kwargs): """Update a recordset. :param zone: Name, ID or :class:`openstack.dns.v2.zone.Zone` instance of the zone managing the recordset. :param name_or_id: Name or ID of the recordset being updated. :param records: List of the recordset definitions :param description: Description of the recordset :param ttl: TTL (Time to live) value in seconds of the recordset :returns: a dict representing the updated recordset. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ rs = self.get_recordset(zone, name_or_id) if not rs: raise exceptions.SDKException( "Recordset %s not found." % name_or_id ) rs = self.dns.update_recordset(recordset=rs, **kwargs) return rs def delete_recordset(self, zone, name_or_id): """Delete a recordset. :param zone: Name, ID or :class:`openstack.dns.v2.zone.Zone` instance of the zone managing the recordset. :param name_or_id: Name or ID of the recordset being deleted. :returns: True if delete succeeded, False otherwise. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ recordset = self.get_recordset(zone, name_or_id) if not recordset: self.log.debug("Recordset %s not found for deleting", name_or_id) return False self.dns.delete_recordset(recordset, ignore_missing=False) return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/_identity.py0000664000175000017500000015537600000000000021707 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from openstack.cloud import _utils from openstack.cloud import openstackcloud from openstack import exceptions from openstack import utils from openstack import warnings as os_warnings class IdentityCloudMixin(openstackcloud._OpenStackCloudMixin): def _get_project_id_param_dict(self, name_or_id): if name_or_id: project = self.get_project(name_or_id) if not project: return {} if utils.supports_version(self.identity, '3'): return {'default_project_id': project['id']} else: return {'tenant_id': project['id']} else: return {} def _get_domain_id_param_dict(self, domain_id): """Get a useable domain.""" # Keystone v3 requires domains for user and project creation. v2 does # not. However, keystone v2 does not allow user creation by non-admin # users, so we can throw an error to the user that does not need to # mention api versions if utils.supports_version(self.identity, '3'): if not domain_id: raise exceptions.SDKException( "User or project creation requires an explicit domain_id " "argument." ) else: return {'domain_id': domain_id} else: return {} def _get_identity_params(self, domain_id=None, project=None): """Get the domain and project/tenant parameters if needed. keystone v2 and v3 are divergent enough that we need to pass or not pass project or tenant_id or domain or nothing in a sane manner. """ ret = {} ret.update(self._get_domain_id_param_dict(domain_id)) ret.update(self._get_project_id_param_dict(project)) return ret def list_projects(self, domain_id=None, name_or_id=None, filters=None): """List projects. With no parameters, returns a full listing of all visible projects. :param domain_id: Domain ID to scope the searched projects. :param name_or_id: Name or ID of the project(s). :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A list of identity ``Project`` objects. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ if not filters: filters = {} query = dict(**filters) if name_or_id: query['name'] = name_or_id if domain_id: query['domain_id'] = domain_id return list(self.identity.projects(**query)) def search_projects(self, name_or_id=None, filters=None, domain_id=None): """Backwards compatibility method for search_projects search_projects originally had a parameter list that was name_or_id, filters and list had domain_id first. This method exists in this form to allow code written with positional parameter to still work. But really, use keyword arguments. :param name_or_id: Name or ID of the project(s). :param filters: dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :param domain_id: Domain ID to scope the searched projects. :returns: A list of identity ``Project`` objects. """ projects = self.list_projects(domain_id=domain_id, filters=filters) return _utils._filter_list(projects, name_or_id, filters) def get_project(self, name_or_id, filters=None, domain_id=None): """Get exactly one project. :param name_or_id: Name or unique ID of the project. :param filters: **DEPRECATED** A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :param domain_id: Domain ID to scope the retrieved project. :returns: An identity ``Project`` object. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ return _utils._get_entity( self, 'project', name_or_id, filters, domain_id=domain_id ) def update_project( self, name_or_id, enabled=None, domain_id=None, **kwargs, ): """Update a project :param name_or_id: Name or unique ID of the project. :param enabled: Whether the project is enabled or not. :param domain_id: Domain ID to scope the retrieved project. :returns: An identity ``Project`` object. """ project = self.identity.find_project( name_or_id=name_or_id, domain_id=domain_id, ) if not project: raise exceptions.SDKException("Project %s not found." % name_or_id) if enabled is not None: kwargs.update({'enabled': enabled}) project = self.identity.update_project(project, **kwargs) return project def create_project( self, name, domain_id, description=None, enabled=True, **kwargs, ): """Create a project. :param name: :param domain_id: :param description: :param enabled: :returns: An identity ``Project`` object. """ attrs = dict( name=name, description=description, domain_id=domain_id, is_enabled=enabled, ) if kwargs: attrs.update(kwargs) return self.identity.create_project(**attrs) def delete_project(self, name_or_id, domain_id=None): """Delete a project. :param name_or_id: Name or unique ID of the project. :param domain_id: Domain ID to scope the retrieved project. :returns: True if delete succeeded, False if the project was not found. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call """ try: project = self.identity.find_project( name_or_id=name_or_id, ignore_missing=True, domain_id=domain_id ) if not project: self.log.debug("Project %s not found for deleting", name_or_id) return False self.identity.delete_project(project) return True except exceptions.SDKException: self.log.exception( "Error in deleting project {project}".format( project=name_or_id ) ) return False @_utils.valid_kwargs('domain_id', 'name') def list_users(self, **kwargs): """List users. :param name: :param domain_id: Domain ID to scope the retrieved users. :returns: A list of identity ``User`` objects. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ return list(self.identity.users(**kwargs)) def search_users(self, name_or_id=None, filters=None, domain_id=None): """Search users. :param name_or_id: Name or ID of the user(s). :param domain_id: Domain ID to scope the retrieved users. :param filters: dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A list of identity ``User`` objects :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ kwargs = {} # NOTE(jdwidari) if name_or_id isn't UUID like then make use of server- # side filter for user name https://bit.ly/2qh0Ijk # especially important when using LDAP and using page to limit results if name_or_id and not _utils._is_uuid_like(name_or_id): kwargs['name'] = name_or_id if domain_id: kwargs['domain_id'] = domain_id users = self.list_users(**kwargs) return _utils._filter_list(users, name_or_id, filters) # TODO(stephenfin): Remove 'filters' in a future major version # TODO(stephenfin): Remove 'kwargs' since it doesn't do anything @_utils.valid_kwargs('domain_id') def get_user(self, name_or_id, filters=None, **kwargs): """Get exactly one user. :param name_or_id: Name or unique ID of the user. :param filters: **DEPRECATED** A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: an identity ``User`` object :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ return _utils._get_entity(self, 'user', name_or_id, filters, **kwargs) # TODO(stephenfin): Remove normalize since it doesn't do anything def get_user_by_id(self, user_id, normalize=True): """Get a user by ID. :param string user_id: user ID :returns: an identity ``User`` object """ return self.identity.get_user(user_id) @_utils.valid_kwargs( 'name', 'email', 'enabled', 'domain_id', 'password', 'description', 'default_project', ) def update_user(self, name_or_id, **kwargs): user_kwargs = {} if 'domain_id' in kwargs and kwargs['domain_id']: user_kwargs['domain_id'] = kwargs['domain_id'] user = self.get_user(name_or_id, **user_kwargs) # TODO(mordred) When this changes to REST, force interface=admin # in the adapter call if it's an admin force call (and figure out how # to make that disctinction) # NOTE(samueldmq): now this is a REST call and domain_id is dropped # if None. keystoneclient drops keys with None values. if 'domain_id' in kwargs and kwargs['domain_id'] is None: del kwargs['domain_id'] user = self.identity.update_user(user, **kwargs) return user def create_user( self, name, password=None, email=None, default_project=None, enabled=True, domain_id=None, description=None, ): """Create a user.""" params = self._get_identity_params(domain_id, default_project) params.update({'name': name, 'email': email, 'enabled': enabled}) if password is not None: params['password'] = password if description is not None: params['description'] = description user = self.identity.create_user(**params) return user @_utils.valid_kwargs('domain_id') def delete_user(self, name_or_id, **kwargs): try: user = self.get_user(name_or_id, **kwargs) if not user: self.log.debug(f"User {name_or_id} not found for deleting") return False self.identity.delete_user(user) return True except exceptions.SDKException: self.log.exception(f"Error in deleting user {name_or_id}") return False def _get_user_and_group(self, user_name_or_id, group_name_or_id): user = self.get_user(user_name_or_id) if not user: raise exceptions.SDKException(f'User {user_name_or_id} not found') group = self.get_group(group_name_or_id) if not group: raise exceptions.SDKException( f'Group {group_name_or_id} not found' ) return (user, group) def add_user_to_group(self, name_or_id, group_name_or_id): """Add a user to a group. :param name_or_id: Name or unique ID of the user. :param group_name_or_id: Group name or ID :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call """ user, group = self._get_user_and_group(name_or_id, group_name_or_id) self.identity.add_user_to_group(user, group) def is_user_in_group(self, name_or_id, group_name_or_id): """Check to see if a user is in a group. :param name_or_id: Name or unique ID of the user. :param group_name_or_id: Group name or ID :returns: True if user is in the group, False otherwise :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call """ user, group = self._get_user_and_group(name_or_id, group_name_or_id) return self.identity.check_user_in_group(user, group) def remove_user_from_group(self, name_or_id, group_name_or_id): """Remove a user from a group. :param name_or_id: Name or unique ID of the user. :param group_name_or_id: Group name or ID :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call """ user, group = self._get_user_and_group(name_or_id, group_name_or_id) self.identity.remove_user_from_group(user, group) @_utils.valid_kwargs('type', 'service_type', 'description') def create_service(self, name, enabled=True, **kwargs): """Create a service. :param name: Service name. :param type: Service type. (type or service_type required.) :param service_type: Service type. (type or service_type required.) :param description: Service description (optional). :param enabled: Whether the service is enabled (v3 only) :returns: an identity ``Service`` object :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ type_ = kwargs.pop('type', None) service_type = kwargs.pop('service_type', None) # TODO(mordred) When this changes to REST, force interface=admin # in the adapter call kwargs['type'] = type_ or service_type kwargs['is_enabled'] = enabled kwargs['name'] = name return self.identity.create_service(**kwargs) @_utils.valid_kwargs( 'name', 'enabled', 'type', 'service_type', 'description' ) def update_service(self, name_or_id, **kwargs): # NOTE(SamYaple): Keystone v3 only accepts 'type' but shade accepts # both 'type' and 'service_type' with a preference # towards 'type' type_ = kwargs.pop('type', None) service_type = kwargs.pop('service_type', None) if type_ or service_type: kwargs['type'] = type_ or service_type service = self.get_service(name_or_id) return self.identity.update_service(service, **kwargs) def list_services(self): """List all Keystone services. :returns: A list of identity ``Service`` object :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ return list(self.identity.services()) def search_services(self, name_or_id=None, filters=None): """Search Keystone services. :param name_or_id: Name or ID of the service(s). :param filters: dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: a list of identity ``Service`` objects :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ services = self.list_services() return _utils._filter_list(services, name_or_id, filters) # TODO(stephenfin): Remove 'filters' since it's a noop def get_service(self, name_or_id, filters=None): """Get exactly one Keystone service. :param name_or_id: Name or unique ID of the service. :returns: an identity ``Service`` object :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call or if multiple matches are found. """ return _utils._get_entity(self, 'service', name_or_id, filters) def delete_service(self, name_or_id): """Delete a Keystone service. :param name_or_id: Name or unique ID of the service. :returns: True if delete succeeded, False otherwise. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call """ service = self.get_service(name_or_id=name_or_id) if service is None: self.log.debug("Service %s not found for deleting", name_or_id) return False try: self.identity.delete_service(service) return True except exceptions.SDKException: self.log.exception( 'Failed to delete service {id}'.format(id=service['id']) ) return False @_utils.valid_kwargs('public_url', 'internal_url', 'admin_url') def create_endpoint( self, service_name_or_id, url=None, interface=None, region=None, enabled=True, **kwargs, ): """Create a Keystone endpoint. :param service_name_or_id: Service name or id for this endpoint. :param url: URL of the endpoint :param interface: Interface type of the endpoint :param public_url: Endpoint public URL. :param internal_url: Endpoint internal URL. :param admin_url: Endpoint admin URL. :param region: Endpoint region. :param enabled: Whether the endpoint is enabled :returns: A list of identity ``Endpoint`` objects :raises: :class:`~openstack.exceptions.SDKException` if the service cannot be found or if something goes wrong during the OpenStack API call. """ public_url = kwargs.pop('public_url', None) internal_url = kwargs.pop('internal_url', None) admin_url = kwargs.pop('admin_url', None) if (url or interface) and (public_url or internal_url or admin_url): raise exceptions.SDKException( "create_endpoint takes either url and interface OR " "public_url, internal_url, admin_url" ) service = self.get_service(name_or_id=service_name_or_id) if service is None: raise exceptions.SDKException( "service {service} not found".format( service=service_name_or_id ) ) endpoints_args = [] if url: # v3 in use, v3-like arguments, one endpoint created endpoints_args.append( { 'url': url, 'interface': interface, 'service_id': service['id'], 'enabled': enabled, 'region_id': region, } ) else: # v3 in use, v2.0-like arguments, one endpoint created for each # interface url provided endpoint_args = { 'region_id': region, 'enabled': enabled, 'service_id': service['id'], } if public_url: endpoint_args.update( {'url': public_url, 'interface': 'public'} ) endpoints_args.append(endpoint_args.copy()) if internal_url: endpoint_args.update( {'url': internal_url, 'interface': 'internal'} ) endpoints_args.append(endpoint_args.copy()) if admin_url: endpoint_args.update({'url': admin_url, 'interface': 'admin'}) endpoints_args.append(endpoint_args.copy()) endpoints = [] for args in endpoints_args: endpoints.append(self.identity.create_endpoint(**args)) return endpoints @_utils.valid_kwargs( 'enabled', 'service_name_or_id', 'url', 'interface', 'region' ) def update_endpoint(self, endpoint_id, **kwargs): service_name_or_id = kwargs.pop('service_name_or_id', None) if service_name_or_id is not None: kwargs['service_id'] = service_name_or_id if 'region' in kwargs: kwargs['region_id'] = kwargs.pop('region') return self.identity.update_endpoint(endpoint_id, **kwargs) def list_endpoints(self): """List Keystone endpoints. :returns: A list of identity ``Endpoint`` objects :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ return list(self.identity.endpoints()) def search_endpoints(self, id=None, filters=None): """List Keystone endpoints. :param id: ID of endpoint(s). :param filters: dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A list of identity ``Endpoint`` objects :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ # NOTE(SamYaple): With keystone v3 we can filter directly via the # the keystone api, but since the return of all the endpoints even in # large environments is small, we can continue to filter in shade just # like the v2 api. endpoints = self.list_endpoints() return _utils._filter_list(endpoints, id, filters) # TODO(stephenfin): Remove 'filters' since it's a noop def get_endpoint(self, id, filters=None): """Get exactly one Keystone endpoint. :param id: ID of endpoint. :returns: An identity ``Endpoint`` object """ return _utils._get_entity(self, 'endpoint', id, filters) def delete_endpoint(self, id): """Delete a Keystone endpoint. :param id: ID of the endpoint to delete. :returns: True if delete succeeded, False otherwise. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ endpoint = self.get_endpoint(id=id) if endpoint is None: self.log.debug("Endpoint %s not found for deleting", id) return False try: self.identity.delete_endpoint(id) return True except exceptions.SDKException: self.log.exception(f"Failed to delete endpoint {id}") return False def create_domain(self, name, description=None, enabled=True): """Create a domain. :param name: The name of the domain. :param description: A description of the domain. :param enabled: Is the domain enabled or not (default True). :returns: The created identity ``Endpoint`` object. :raises: :class:`~openstack.exceptions.SDKException` if the domain cannot be created. """ domain_ref = {'name': name, 'enabled': enabled} if description is not None: domain_ref['description'] = description return self.identity.create_domain(**domain_ref) # TODO(stephenfin): domain_id and name_or_id are the same thing now; # deprecate one of them def update_domain( self, domain_id=None, name=None, description=None, enabled=None, name_or_id=None, ): """Update a Keystone domain :param domain_id: :param name: :param description: :param enabled: :param name_or_id: Name or unique ID of the domain. :returns: The updated identity ``Domain`` object. :raises: :class:`~openstack.exceptions.SDKException` if the domain cannot be updated """ if domain_id is None: if name_or_id is None: raise exceptions.SDKException( "You must pass either domain_id or name_or_id value" ) dom = self.get_domain(None, name_or_id) if dom is None: raise exceptions.SDKException( f"Domain {name_or_id} not found for updating" ) domain_id = dom['id'] domain_ref = {} domain_ref.update({'name': name} if name else {}) domain_ref.update({'description': description} if description else {}) domain_ref.update({'enabled': enabled} if enabled is not None else {}) return self.identity.update_domain(domain_id, **domain_ref) # TODO(stephenfin): domain_id and name_or_id are the same thing now; # deprecate one of them def delete_domain(self, domain_id=None, name_or_id=None): """Delete a Keystone domain. :param domain_id: ID of the domain to delete. :param name_or_id: Name or unique ID of the domain. :returns: True if delete succeeded, False otherwise. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ try: if domain_id is None: if name_or_id is None: raise exceptions.SDKException( "You must pass either domain_id or name_or_id value" ) dom = self.get_domain(name_or_id=name_or_id) if dom is None: self.log.debug( "Domain %s not found for deleting", name_or_id ) return False domain_id = dom['id'] # A domain must be disabled before deleting self.identity.update_domain(domain_id, is_enabled=False) self.identity.delete_domain(domain_id, ignore_missing=False) return True except exceptions.SDKException: self.log.exception("Failed to delete domain %s" % domain_id) raise def list_domains(self, **filters): """List Keystone domains. :returns: A list of identity ``Domain`` objects. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ return list(self.identity.domains(**filters)) # TODO(stephenfin): These arguments are backwards from everything else. def search_domains(self, filters=None, name_or_id=None): """Search Keystone domains. :param name_or_id: Name or ID of the domain(s). :param filters: dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: a list of identity ``Domain`` objects :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ if filters is None: filters = {} if name_or_id is not None: domains = self.list_domains() return _utils._filter_list(domains, name_or_id, filters) else: return self.list_domains(**filters) # TODO(stephenfin): domain_id and name_or_id are the same thing now; # deprecate one of them # TODO(stephenfin): Remove 'filters' in a future major version def get_domain(self, domain_id=None, name_or_id=None, filters=None): """Get exactly one Keystone domain. :param domain_id: ID of the domain. :param name_or_id: Name or unique ID of the domain. :param filters: **DEPRECATED** A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: an identity ``Domain`` object :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ if domain_id is None: return self.identity.find_domain(name_or_id) else: return self.identity.get_domain(domain_id) @_utils.valid_kwargs('domain_id') def list_groups(self, **kwargs): """List Keystone groups. :param domain_id: Domain ID. :returns: A list of identity ``Group`` objects :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ return list(self.identity.groups(**kwargs)) @_utils.valid_kwargs('domain_id') def search_groups(self, name_or_id=None, filters=None, **kwargs): """Search Keystone groups. :param name_or_id: Name or ID of the group(s). :param filters: dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :param domain_id: domain id. :returns: A list of identity ``Group`` objects :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ groups = self.list_groups(**kwargs) return _utils._filter_list(groups, name_or_id, filters) # TODO(stephenfin): Remove filters since it's a noop # TODO(stephenfin): Remove kwargs since it's a noop @_utils.valid_kwargs('domain_id') def get_group(self, name_or_id, filters=None, **kwargs): """Get exactly one Keystone group. :param name_or_id: Name or unique ID of the group(s). :returns: An identity ``Group`` object :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ return _utils._get_entity(self, 'group', name_or_id, filters, **kwargs) def create_group(self, name, description, domain=None): """Create a group. :param string name: Group name. :param string description: Group description. :param string domain: Domain name or ID for the group. :returns: An identity ``Group`` object :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ group_ref = {'name': name} if description: group_ref['description'] = description if domain: dom = self.get_domain(domain) if not dom: raise exceptions.SDKException( "Creating group {group} failed: Invalid domain " "{domain}".format(group=name, domain=domain) ) group_ref['domain_id'] = dom['id'] group = self.identity.create_group(**group_ref) return group def update_group( self, name_or_id, name=None, description=None, **kwargs, ): """Update an existing group :param name_or_id: Name or unique ID of the group. :param name: New group name. :param description: New group description. :returns: The updated identity ``Group`` object. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ group = self.identity.find_group(name_or_id, **kwargs) if group is None: raise exceptions.SDKException( f"Group {name_or_id} not found for updating" ) group_ref = {} if name: group_ref['name'] = name if description: group_ref['description'] = description group = self.identity.update_group(group, **group_ref) return group def delete_group(self, name_or_id): """Delete a group :param name_or_id: Name or unique ID of the group. :returns: True if delete succeeded, False otherwise. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ try: group = self.identity.find_group(name_or_id) if group is None: self.log.debug("Group %s not found for deleting", name_or_id) return False self.identity.delete_group(group) return True except exceptions.SDKException: self.log.exception(f"Unable to delete group {name_or_id}") return False def list_roles(self, **kwargs): """List Keystone roles. :returns: A list of identity ``Role`` objects :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ return list(self.identity.roles(**kwargs)) def search_roles(self, name_or_id=None, filters=None): """Seach Keystone roles. :param name: Name or ID of the role(s). :param filters: dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: a list of identity ``Role`` objects :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ roles = self.list_roles() return _utils._filter_list(roles, name_or_id, filters) # TODO(stephenfin): Remove filters since it's a noop # TODO(stephenfin): Remove kwargs since it's a noop @_utils.valid_kwargs('domain_id') def get_role(self, name_or_id, filters=None, **kwargs): """Get a Keystone role. :param name_or_id: Name or unique ID of the role. :returns: An identity ``Role`` object if found, else None. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ return _utils._get_entity(self, 'role', name_or_id, filters, **kwargs) def _keystone_v3_role_assignments(self, **filters): # NOTE(samueldmq): different parameters have different representation # patterns as query parameters in the call to the list role assignments # API. The code below handles each set of patterns separately and # renames the parameters names accordingly, ignoring 'effective', # 'include_names' and 'include_subtree' whose do not need any renaming. for k in ('group', 'role', 'user'): if k in filters: try: filters[k + '.id'] = filters[k].id except AttributeError: # Also this goes away in next patches filters[k + '.id'] = filters[k] del filters[k] for k in ('project', 'domain'): if k in filters: try: filters['scope.' + k + '.id'] = filters[k].id except AttributeError: # NOTE(gtema): will be dropped once domains are switched to # proxy filters['scope.' + k + '.id'] = filters[k] del filters[k] if 'inherited_to' in filters: filters['scope.OS-INHERIT:inherited_to'] = filters['inherited_to'] del filters['inherited_to'] elif 'os_inherit_extension_inherited_to' in filters: warnings.warn( "os_inherit_extension_inherited_to is deprecated. Use " "inherited_to instead.", os_warnings.OpenStackDeprecationWarning, ) filters['scope.OS-INHERIT:inherited_to'] = filters[ 'os_inherit_extension_inherited_to' ] del filters['os_inherit_extension_inherited_to'] return list(self.identity.role_assignments(**filters)) def list_role_assignments(self, filters=None): """List Keystone role assignments :param dict filters: Dict of filter conditions. Acceptable keys are: * 'user' (string) - User ID to be used as query filter. * 'group' (string) - Group ID to be used as query filter. * 'project' (string) - Project ID to be used as query filter. * 'domain' (string) - Domain ID to be used as query filter. * 'system' (string) - System name to be used as query filter. * 'role' (string) - Role ID to be used as query filter. * 'inherited_to' (string) - Return inherited role assignments for either 'projects' or 'domains'. * 'os_inherit_extension_inherited_to' (string) - Deprecated; use 'inherited_to' instead. * 'effective' (boolean) - Return effective role assignments. * 'include_subtree' (boolean) - Include subtree 'user' and 'group' are mutually exclusive, as are 'domain' and 'project'. :returns: A list of identity :class:`openstack.identity.v3.role_assignment.RoleAssignment` objects :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ # NOTE(samueldmq): although 'include_names' is a valid query parameter # in the keystone v3 list role assignments API, it would have NO effect # on shade due to normalization. It is not documented as an acceptable # filter in the docs above per design! if not filters: filters = {} # NOTE(samueldmq): the docs above say filters are *IDs*, though if # dict or Resource objects are passed, this still works for backwards # compatibility as keystoneclient allows either IDs or objects to be # passed in. # TODO(samueldmq): fix the docs above to advertise Resource objects # can be provided as parameters too for k, v in filters.items(): if isinstance(v, dict): filters[k] = v['id'] for k in ['role', 'group', 'user']: if k in filters: filters['%s_id' % k] = filters.pop(k) for k in ['domain', 'project']: if k in filters: filters['scope_%s_id' % k] = filters.pop(k) if 'system' in filters: system_scope = filters.pop('system') filters['scope.system'] = system_scope if 'os_inherit_extension_inherited_to' in filters: warnings.warn( "os_inherit_extension_inherited_to is deprecated. Use " "inherited_to instead.", os_warnings.OpenStackDeprecationWarning, ) filters['inherited_to'] = filters.pop( 'os_inherit_extension_inherited_to' ) return list(self.identity.role_assignments(**filters)) @_utils.valid_kwargs('domain_id') def create_role(self, name, **kwargs): """Create a Keystone role. :param string name: The name of the role. :param domain_id: domain id (v3) :returns: an identity ``Role`` object :raises: :class:`~openstack.exceptions.SDKException` if the role cannot be created """ kwargs['name'] = name return self.identity.create_role(**kwargs) @_utils.valid_kwargs('domain_id') def update_role(self, name_or_id, name, **kwargs): """Update a Keystone role. :param name_or_id: Name or unique ID of the role. :param string name: The new role name :param domain_id: domain id :returns: an identity ``Role`` object :raises: :class:`~openstack.exceptions.SDKException` if the role cannot be created """ role = self.get_role(name_or_id, **kwargs) if role is None: self.log.debug("Role %s not found for updating", name_or_id) return False return self.identity.update_role(role, name=name, **kwargs) @_utils.valid_kwargs('domain_id') def delete_role(self, name_or_id, **kwargs): """Delete a Keystone role. :param name_or_id: Name or unique ID of the role. :param domain_id: domain id (v3) :returns: True if delete succeeded, False otherwise. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ role = self.get_role(name_or_id, **kwargs) if role is None: self.log.debug("Role %s not found for deleting", name_or_id) return False try: self.identity.delete_role(role) return True except exceptions.SDKException: self.log.exception(f"Unable to delete role {name_or_id}") raise def _get_grant_revoke_params( self, role, user=None, group=None, project=None, domain=None, system=None, ): data = {} search_args = {} if domain: data['domain'] = self.identity.find_domain( domain, ignore_missing=False ) # We have domain. We should use it for further searching user, # group, role, project search_args['domain_id'] = data['domain'].id data['role'] = self.identity.find_role(name_or_id=role) if not data['role']: raise exceptions.SDKException(f'Role {role} not found.') if user: # use cloud.get_user to save us from bad searching by name data['user'] = self.get_user(user, filters=search_args) if group: data['group'] = self.identity.find_group( group, ignore_missing=False, **search_args ) if data.get('user') and data.get('group'): raise exceptions.SDKException( 'Specify either a group or a user, not both' ) if data.get('user') is None and data.get('group') is None: raise exceptions.SDKException( 'Must specify either a user or a group' ) if project is None and domain is None and system is None: raise exceptions.SDKException( 'Must specify either a domain, project or system' ) if project: data['project'] = self.identity.find_project( project, ignore_missing=False, **search_args ) return data def grant_role( self, name_or_id, user=None, group=None, project=None, domain=None, system=None, wait=False, timeout=60, ): """Grant a role to a user. :param string name_or_id: Name or unique ID of the role. :param string user: The name or id of the user. :param string group: The name or id of the group. (v3) :param string project: The name or id of the project. :param string domain: The id of the domain. (v3) :param bool system: The name of the system. (v3) :param bool wait: Wait for role to be granted :param int timeout: Timeout to wait for role to be granted NOTE: domain is a required argument when the grant is on a project, user or group specified by name. In that situation, they are all considered to be in that domain. If different domains are in use in the same role grant, it is required to specify those by ID. NOTE: for wait and timeout, sometimes granting roles is not instantaneous. NOTE: precedence is given first to project, then domain, then system :returns: True if the role is assigned, otherwise False :raises: :class:`~openstack.exceptions.SDKException` if the role cannot be granted """ data = self._get_grant_revoke_params( name_or_id, user=user, group=group, project=project, domain=domain, system=system, ) user = data.get('user') group = data.get('group') project = data.get('project') domain = data.get('domain') role = data.get('role') if project: # Proceed with project - precedence over domain and system if user: has_role = self.identity.validate_user_has_project_role( project, user, role ) if has_role: self.log.debug('Assignment already exists') return False self.identity.assign_project_role_to_user(project, user, role) else: has_role = self.identity.validate_group_has_project_role( project, group, role ) if has_role: self.log.debug('Assignment already exists') return False self.identity.assign_project_role_to_group( project, group, role ) elif domain: # Proceed with domain - precedence over system if user: has_role = self.identity.validate_user_has_domain_role( domain, user, role ) if has_role: self.log.debug('Assignment already exists') return False self.identity.assign_domain_role_to_user(domain, user, role) else: has_role = self.identity.validate_group_has_domain_role( domain, group, role ) if has_role: self.log.debug('Assignment already exists') return False self.identity.assign_domain_role_to_group(domain, group, role) else: # Proceed with system # System name must be 'all' due to checks performed in # _get_grant_revoke_params if user: has_role = self.identity.validate_user_has_system_role( user, role, system ) if has_role: self.log.debug('Assignment already exists') return False self.identity.assign_system_role_to_user(user, role, system) else: has_role = self.identity.validate_group_has_system_role( group, role, system ) if has_role: self.log.debug('Assignment already exists') return False self.identity.assign_system_role_to_group(group, role, system) return True def revoke_role( self, name_or_id, user=None, group=None, project=None, domain=None, system=None, wait=False, timeout=60, ): """Revoke a role from a user. :param string name_or_id: Name or unique ID of the role. :param string user: The name or id of the user. :param string group: The name or id of the group. (v3) :param string project: The name or id of the project. :param string domain: The id of the domain. (v3) :param bool system: The name of the system. (v3) :param bool wait: Wait for role to be revoked :param int timeout: Timeout to wait for role to be revoked NOTE: for wait and timeout, sometimes revoking roles is not instantaneous. NOTE: project is required for keystone v2 NOTE: precedence is given first to project, then domain, then system :returns: True if the role is revoke, otherwise False :raises: :class:`~openstack.exceptions.SDKException` if the role cannot be removed """ data = self._get_grant_revoke_params( name_or_id, user=user, group=group, project=project, domain=domain, system=system, ) user = data.get('user') group = data.get('group') project = data.get('project') domain = data.get('domain') role = data.get('role') if project: # Proceed with project - precedence over domain and system if user: has_role = self.identity.validate_user_has_project_role( project, user, role ) if not has_role: self.log.debug('Assignment does not exists') return False self.identity.unassign_project_role_from_user( project, user, role ) else: has_role = self.identity.validate_group_has_project_role( project, group, role ) if not has_role: self.log.debug('Assignment does not exists') return False self.identity.unassign_project_role_from_group( project, group, role ) elif domain: # Proceed with domain - precedence over system if user: has_role = self.identity.validate_user_has_domain_role( domain, user, role ) if not has_role: self.log.debug('Assignment does not exists') return False self.identity.unassign_domain_role_from_user( domain, user, role ) else: has_role = self.identity.validate_group_has_domain_role( domain, group, role ) if not has_role: self.log.debug('Assignment does not exists') return False self.identity.unassign_domain_role_from_group( domain, group, role ) else: # Proceed with system # System name must be 'all' due to checks performed in # _get_grant_revoke_params if user: has_role = self.identity.validate_user_has_system_role( user, role, system ) if not has_role: self.log.debug('Assignment does not exist') return False self.identity.unassign_system_role_from_user( user, role, system ) else: has_role = self.identity.validate_group_has_system_role( group, role, system ) if not has_role: self.log.debug('Assignment does not exist') return False self.identity.unassign_system_role_from_group( group, role, system ) return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/_image.py0000664000175000017500000003171100000000000021122 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack.cloud import _utils from openstack.cloud import openstackcloud from openstack import exceptions from openstack import utils class ImageCloudMixin(openstackcloud._OpenStackCloudMixin): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.image_api_use_tasks = self.config.config['image_api_use_tasks'] def search_images(self, name_or_id=None, filters=None): images = self.list_images() return _utils._filter_list(images, name_or_id, filters) def list_images(self, filter_deleted=True, show_all=False): """Get available images. :param filter_deleted: Control whether deleted images are returned. :param show_all: Show all images, including images that are shared but not accepted. (By default in glance v2 shared image that have not been accepted are not shown) show_all will override the value of filter_deleted to False. :returns: A list of glance images. """ if show_all: filter_deleted = False # First, try to actually get images from glance, it's more efficient images = [] params = {} image_list = [] if utils.supports_version(self.image, '2'): if show_all: params['member_status'] = 'all' image_list = list(self.image.images(**params)) for image in image_list: # The cloud might return DELETED for invalid images. # While that's cute and all, that's an implementation detail. if not filter_deleted: images.append(image) elif image.status.lower() != 'deleted': images.append(image) return images def get_image(self, name_or_id, filters=None): """Get an image by name or ID. :param name_or_id: Name or ID of the image. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: An image :class:`openstack.image.v2.image.Image` object. """ return _utils._get_entity(self, 'image', name_or_id, filters) def get_image_by_id(self, id): """Get a image by ID :param id: ID of the image. :returns: An image :class:`openstack.image.v2.image.Image` object. """ return self.image.get_image(id) def download_image( self, name_or_id, output_path=None, output_file=None, chunk_size=1024 * 1024, ): """Download an image by name or ID :param str name_or_id: Name or ID of the image. :param output_path: the output path to write the image to. Either this or output_file must be specified :param output_file: a file object (or file-like object) to write the image data to. Only write() will be called on this object. Either this or output_path must be specified :param int chunk_size: size in bytes to read from the wire and buffer at one time. Defaults to 1024 * 1024 = 1 MiB :returns: When output_path and output_file are not given - the bytes comprising the given Image when stream is False, otherwise a :class:`requests.Response` instance. When output_path or output_file are given - an image :class:`~openstack.image.v2.image.Image` instance. :raises: :class:`~openstack.exceptions.SDKException` in the event download_image is called without exactly one of either output_path or output_file :raises: :class:`~openstack.exceptions.BadRequestException` if no images are found matching the name or ID provided """ if output_path is None and output_file is None: raise exceptions.SDKException( 'No output specified, an output path or file object' ' is necessary to write the image data to' ) elif output_path is not None and output_file is not None: raise exceptions.SDKException( 'Both an output path and file object were provided,' ' however only one can be used at once' ) image = self.image.find_image(name_or_id) if not image: raise exceptions.NotFoundException( "No images with name or ID %s were found" % name_or_id, None ) return self.image.download_image( image, output=output_file or output_path, chunk_size=chunk_size ) def get_image_exclude(self, name_or_id, exclude): for image in self.search_images(name_or_id): if exclude: if exclude not in image.name: return image else: return image return None def get_image_name(self, image_id, exclude=None): image = self.get_image_exclude(image_id, exclude) if image: return image.name return None def get_image_id(self, image_name, exclude=None): image = self.get_image_exclude(image_name, exclude) if image: return image.id return None def wait_for_image(self, image, timeout=3600): image_id = image['id'] for count in utils.iterate_timeout( timeout, "Timeout waiting for image to snapshot" ): image = self.get_image(image_id) if not image: continue if image['status'] == 'active': return image elif image['status'] == 'error': raise exceptions.SDKException( f'Image {image_id} hit error state' ) def delete_image( self, name_or_id, wait=False, timeout=3600, delete_objects=True, ): """Delete an existing image. :param name_or_id: Name of the image to be deleted. :param wait: If True, waits for image to be deleted. :param timeout: Seconds to wait for image deletion. None is forever. :param delete_objects: If True, also deletes uploaded swift objects. :returns: True if delete succeeded, False otherwise. :raises: :class:`~openstack.exceptions.SDKException` if there are problems deleting. """ image = self.get_image(name_or_id) if not image: return False self.image.delete_image(image) # Task API means an image was uploaded to swift # TODO(gtema) does it make sense to move this into proxy? if self.image_api_use_tasks and ( self.image._IMAGE_OBJECT_KEY in image.properties or self.image._SHADE_IMAGE_OBJECT_KEY in image.properties ): container, objname = image.properties.get( self.image._IMAGE_OBJECT_KEY, image.properties.get(self.image._SHADE_IMAGE_OBJECT_KEY), ).split('/', 1) self.object_store.delete_object( objname, container=container, ) if wait: for count in utils.iterate_timeout( timeout, "Timeout waiting for the image to be deleted." ): if self.get_image(image.id) is None: break return True def create_image( self, name, filename=None, container=None, md5=None, sha256=None, disk_format=None, container_format=None, disable_vendor_agent=True, wait=False, timeout=3600, tags=None, allow_duplicates=False, meta=None, volume=None, **kwargs, ): """Upload an image. :param str name: Name of the image to create. If it is a pathname of an image, the name will be constructed from the extensionless basename of the path. :param str filename: The path to the file to upload, if needed. (optional, defaults to None) :param str container: Name of the container in swift where images should be uploaded for import if the cloud requires such a thing. (optiona, defaults to 'images') :param str md5: md5 sum of the image file. If not given, an md5 will be calculated. :param str sha256: sha256 sum of the image file. If not given, an md5 will be calculated. :param str disk_format: The disk format the image is in. (optional, defaults to the os-client-config config value for this cloud) :param str container_format: The container format the image is in. (optional, defaults to the os-client-config config value for this cloud) :param list tags: List of tags for this image. Each tag is a string of at most 255 chars. :param bool disable_vendor_agent: Whether or not to append metadata flags to the image to inform the cloud in question to not expect a vendor agent to be runing. (optional, defaults to True) :param bool wait: If true, waits for image to be created. Defaults to true - however, be aware that one of the upload methods is always synchronous. :param timeout: Seconds to wait for image creation. None is forever. :param allow_duplicates: If true, skips checks that enforce unique image name. (optional, defaults to False) :param meta: A dict of key/value pairs to use for metadata that bypasses automatic type conversion. :param volume: Name or ID or volume object of a volume to create an image from. Mutually exclusive with (optional, defaults to None) Additional kwargs will be passed to the image creation as additional metadata for the image and will have all values converted to string except for min_disk, min_ram, size and virtual_size which will be converted to int. If you are sure you have all of your data types correct or have an advanced need to be explicit, use meta. If you are just a normal consumer, using kwargs is likely the right choice. If a value is in meta and kwargs, meta wins. :returns: An image :class:`openstack.image.v2.image.Image` object. :raises: :class:`~openstack.exceptions.SDKException` if there are problems uploading """ if volume: image = self.block_storage.create_image( name=name, volume=volume, allow_duplicates=allow_duplicates, container_format=container_format, disk_format=disk_format, wait=wait, timeout=timeout, ) else: image = self.image.create_image( name, filename=filename, container=container, md5=md5, sha256=sha256, disk_format=disk_format, container_format=container_format, disable_vendor_agent=disable_vendor_agent, wait=wait, timeout=timeout, tags=tags, allow_duplicates=allow_duplicates, meta=meta, **kwargs, ) if not wait: return image try: for count in utils.iterate_timeout( timeout, "Timeout waiting for the image to finish." ): image_obj = self.get_image(image.id) if image_obj and image_obj.status not in ('queued', 'saving'): return image_obj except exceptions.ResourceTimeout: self.log.debug( "Timeout waiting for image to become ready. Deleting." ) self.delete_image(image.id, wait=True) raise def update_image_properties( self, image=None, name_or_id=None, meta=None, **properties ): image = image or name_or_id return self.image.update_image_properties( image=image, meta=meta, **properties ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/_network.py0000664000175000017500000031435600000000000021542 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack.cloud import _network_common from openstack.cloud import _utils from openstack.cloud import exc from openstack import exceptions class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): def _neutron_extensions(self): extensions = set() for extension in self.network.extensions(): extensions.add(extension['alias']) return extensions def _has_neutron_extension(self, extension_alias): return extension_alias in self._neutron_extensions() # TODO(stephenfin): Deprecate this in favour of the 'list' function def search_networks(self, name_or_id=None, filters=None): """Search networks :param name_or_id: Name or ID of the desired network. :param filters: A dict containing additional filters to use. e.g. {'router:external': True} :returns: A list of network ``Network`` objects matching the search criteria. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ query = {} if name_or_id: query['name'] = name_or_id if filters: query.update(filters) return list(self.network.networks(**query)) # TODO(stephenfin): Deprecate this in favour of the 'list' function def search_routers(self, name_or_id=None, filters=None): """Search routers :param name_or_id: Name or ID of the desired router. :param filters: A dict containing additional filters to use. e.g. {'admin_state_up': True} :returns: A list of network ``Router`` objects matching the search criteria. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ query = {} if name_or_id: query['name'] = name_or_id if filters: query.update(filters) return list(self.network.routers(**query)) # TODO(stephenfin): Deprecate this in favour of the 'list' function def search_subnets(self, name_or_id=None, filters=None): """Search subnets :param name_or_id: Name or ID of the desired subnet. :param filters: A dict containing additional filters to use. e.g. {'enable_dhcp': True} :returns: A list of network ``Subnet`` objects matching the search criteria. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ query = {} if name_or_id: query['name'] = name_or_id if filters: query.update(filters) return list(self.network.subnets(**query)) # TODO(stephenfin): Deprecate this in favour of the 'list' function def search_ports(self, name_or_id=None, filters=None): """Search ports :param name_or_id: Name or ID of the desired port. :param filters: A dict containing additional filters to use. e.g. {'device_id': '2711c67a-b4a7-43dd-ace7-6187b791c3f0'} :returns: A list of network ``Port`` objects matching the search criteria. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ # If the filter is a string, do not push the filter down to neutron; # get all the ports and filter locally. # TODO(stephenfin): '_filter_list' can handle a dict - pass it down if isinstance(filters, str): pushdown_filters = None else: pushdown_filters = filters ports = self.list_ports(pushdown_filters) return _utils._filter_list(ports, name_or_id, filters) def list_networks(self, filters=None): """List all available networks. :param filters: (optional) A dict of filter conditions to push down. :returns: A list of network ``Network`` objects. """ # If the cloud is running nova-network, just return an empty list. if not self.has_service('network'): return [] # Translate None from search interface to empty {} for kwargs below if not filters: filters = {} return list(self.network.networks(**filters)) def list_routers(self, filters=None): """List all available routers. :param filters: (optional) A dict of filter conditions to push down :returns: A list of network ``Router`` objects. """ # If the cloud is running nova-network, just return an empty list. if not self.has_service('network'): return [] # Translate None from search interface to empty {} for kwargs below if not filters: filters = {} return list(self.network.routers(**filters)) def list_subnets(self, filters=None): """List all available subnets. :param filters: (optional) A dict of filter conditions to push down :returns: A list of network ``Subnet`` objects. """ # If the cloud is running nova-network, just return an empty list. if not self.has_service('network'): return [] # Translate None from search interface to empty {} for kwargs below if not filters: filters = {} return list(self.network.subnets(**filters)) def list_ports(self, filters=None): """List all available ports. :param filters: (optional) A dict of filter conditions to push down :returns: A list of network ``Port`` objects. """ # If the cloud is running nova-network, just return an empty list. if not self.has_service('network'): return [] # Translate None from search interface to empty {} for kwargs below if not filters: filters = {} return list(self.network.ports(**filters)) # TODO(stephenfin): Deprecate 'filters'; users should use 'list' for this def get_qos_policy(self, name_or_id, filters=None): """Get a QoS policy by name or ID. :param name_or_id: Name or ID of the policy. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A network ``QoSPolicy`` object if found, else None. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) if not filters: filters = {} return self.network.find_qos_policy( name_or_id=name_or_id, ignore_missing=True, **filters ) # TODO(stephenfin): Deprecate this in favour of the 'list' function def search_qos_policies(self, name_or_id=None, filters=None): """Search QoS policies :param name_or_id: Name or ID of the desired policy. :param filters: a dict containing additional filters to use. e.g. {'shared': True} :returns: A list of network ``QosPolicy`` objects matching the search criteria. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) query = {} if name_or_id: query['name'] = name_or_id if filters: query.update(filters) return list(self.network.qos_policies(**query)) def list_qos_rule_types(self, filters=None): """List all available QoS rule types. :param filters: (optional) A dict of filter conditions to push down :returns: A list of network ``QosRuleType`` objects. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) # Translate None from search interface to empty {} for kwargs below if not filters: filters = {} return list(self.network.qos_rule_types(**filters)) # TODO(stephenfin): Deprecate 'filters'; users should use 'list' for this def get_qos_rule_type_details(self, rule_type, filters=None): """Get a QoS rule type details by rule type name. :param rule_type: Name of the QoS rule type. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A network ``QoSRuleType`` object if found, else None. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) if not self._has_neutron_extension('qos-rule-type-details'): raise exc.OpenStackCloudUnavailableExtension( 'qos-rule-type-details extension is not available ' 'on target cloud' ) return self.network.get_qos_rule_type(rule_type) def list_qos_policies(self, filters=None): """List all available QoS policies. :param filters: (optional) A dict of filter conditions to push down :returns: A list of network ``QosPolicy`` objects. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) # Translate None from search interface to empty {} for kwargs below if not filters: filters = {} return list(self.network.qos_policies(**filters)) # TODO(stephenfin): Deprecate 'filters'; users should use 'list' for this def get_network(self, name_or_id, filters=None): """Get a network by name or ID. :param name_or_id: Name or ID of the network. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A network ``Network`` object if found, else None. """ if not filters: filters = {} return self.network.find_network( name_or_id=name_or_id, ignore_missing=True, **filters ) def get_network_by_id(self, id): """Get a network by ID :param id: ID of the network. :returns: A network ``Network`` object if found, else None. """ return self.network.get_network(id) # TODO(stephenfin): Deprecate 'filters'; users should use 'list' for this def get_router(self, name_or_id, filters=None): """Get a router by name or ID. :param name_or_id: Name or ID of the router. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A network ``Router`` object if found, else None. """ if not filters: filters = {} return self.network.find_router( name_or_id=name_or_id, ignore_missing=True, **filters ) # TODO(stephenfin): Deprecate 'filters'; users should use 'list' for this def get_subnet(self, name_or_id, filters=None): """Get a subnet by name or ID. :param name_or_id: Name or ID of the subnet. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } :returns: A network ``Subnet`` object if found, else None. """ if not filters: filters = {} return self.network.find_subnet( name_or_id=name_or_id, ignore_missing=True, **filters ) def get_subnet_by_id(self, id): """Get a subnet by ID :param id: ID of the subnet. :returns: A network ``Subnet`` object if found, else None. """ return self.network.get_subnet(id) # TODO(stephenfin): Deprecate 'filters'; users should use 'list' for this def get_port(self, name_or_id, filters=None): """Get a port by name or ID. :param name_or_id: Name or ID of the port. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A network ``Port`` object if found, else None. """ if not filters: filters = {} return self.network.find_port( name_or_id=name_or_id, ignore_missing=True, **filters ) def get_port_by_id(self, id): """Get a port by ID :param id: ID of the port. :returns: A network ``Port`` object if found, else None. """ return self.network.get_port(id) def get_subnetpool(self, name_or_id): """Get a subnetpool by name or ID. :param name_or_id: Name or ID of the subnetpool. :returns: A network ``Subnetpool`` object if found, else None. """ return self.network.find_subnet_pool( name_or_id=name_or_id, ignore_missing=True ) def create_network( self, name, shared=False, admin_state_up=True, external=False, provider=None, project_id=None, availability_zone_hints=None, port_security_enabled=None, mtu_size=None, dns_domain=None, ): """Create a network. :param string name: Name of the network being created. :param bool shared: Set the network as shared. :param bool admin_state_up: Set the network administrative state to up. :param bool external: Whether this network is externally accessible. :param dict provider: A dict of network provider options. Example:: { 'network_type': 'vlan', 'segmentation_id': 'vlan1' } :param string project_id: Specify the project ID this network will be created on (admin-only). :param types.ListType availability_zone_hints: A list of availability zone hints. :param bool port_security_enabled: Enable / Disable port security :param int mtu_size: maximum transmission unit value to address fragmentation. Minimum value is 68 for IPv4, and 1280 for IPv6. :param string dns_domain: Specify the DNS domain associated with this network. :returns: The created network ``Network`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ network = { 'name': name, 'admin_state_up': admin_state_up, } if shared: network['shared'] = shared if project_id is not None: network['project_id'] = project_id if availability_zone_hints is not None: if not isinstance(availability_zone_hints, list): raise exceptions.SDKException( "Parameter 'availability_zone_hints' must be a list" ) if not self._has_neutron_extension('network_availability_zone'): raise exc.OpenStackCloudUnavailableExtension( 'network_availability_zone extension is not available on ' 'target cloud' ) network['availability_zone_hints'] = availability_zone_hints if provider: if not isinstance(provider, dict): raise exceptions.SDKException( "Parameter 'provider' must be a dict" ) # Only pass what we know for attr in ( 'physical_network', 'network_type', 'segmentation_id', ): if attr in provider: arg = "provider:" + attr network[arg] = provider[attr] # Do not send 'router:external' unless it is explicitly # set since sending it *might* cause "Forbidden" errors in # some situations. It defaults to False in the client, anyway. if external: network['router:external'] = True if port_security_enabled is not None: if not isinstance(port_security_enabled, bool): raise exceptions.SDKException( "Parameter 'port_security_enabled' must be a bool" ) network['port_security_enabled'] = port_security_enabled if mtu_size: if not isinstance(mtu_size, int): raise exceptions.SDKException( "Parameter 'mtu_size' must be an integer." ) if not mtu_size >= 68: raise exceptions.SDKException( "Parameter 'mtu_size' must be greater than 67." ) network['mtu'] = mtu_size if dns_domain: network['dns_domain'] = dns_domain network = self.network.create_network(**network) # Reset cache so the new network is picked up self._reset_network_caches() return network @_utils.valid_kwargs( "name", "shared", "admin_state_up", "external", "provider", "mtu_size", "port_security_enabled", "dns_domain", ) def update_network(self, name_or_id, **kwargs): """Update a network. :param string name_or_id: Name or ID of the network being updated. :param string name: New name of the network. :param bool shared: Set the network as shared. :param bool admin_state_up: Set the network administrative state to up. :param bool external: Whether this network is externally accessible. :param dict provider: A dict of network provider options. Example:: { 'network_type': 'vlan', 'segmentation_id': 'vlan1' } :param int mtu_size: New maximum transmission unit value to address fragmentation. Minimum value is 68 for IPv4, and 1280 for IPv6. :param bool port_security_enabled: Enable or disable port security. :param string dns_domain: Specify the DNS domain associated with this network. :returns: The updated network ``Network`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ provider = kwargs.pop('provider', None) if provider: if not isinstance(provider, dict): raise exceptions.SDKException( "Parameter 'provider' must be a dict" ) for key in ('physical_network', 'network_type', 'segmentation_id'): if key in provider: kwargs['provider:' + key] = provider.pop(key) if 'external' in kwargs: kwargs['router:external'] = kwargs.pop('external') if 'port_security_enabled' in kwargs: if not isinstance(kwargs['port_security_enabled'], bool): raise exceptions.SDKException( "Parameter 'port_security_enabled' must be a bool" ) if 'mtu_size' in kwargs: if not isinstance(kwargs['mtu_size'], int): raise exceptions.SDKException( "Parameter 'mtu_size' must be an integer." ) if kwargs['mtu_size'] < 68: raise exceptions.SDKException( "Parameter 'mtu_size' must be greater than 67." ) kwargs['mtu'] = kwargs.pop('mtu_size') network = self.get_network(name_or_id) if not network: raise exceptions.SDKException("Network %s not found." % name_or_id) network = self.network.update_network(network, **kwargs) self._reset_network_caches() return network def delete_network(self, name_or_id): """Delete a network. :param name_or_id: Name or ID of the network being deleted. :returns: True if delete succeeded, False otherwise. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ network = self.get_network(name_or_id) if not network: self.log.debug("Network %s not found for deleting", name_or_id) return False self.network.delete_network(network) # Reset cache so the deleted network is removed self._reset_network_caches() return True def set_network_quotas(self, name_or_id, **kwargs): """Set a network quota in a project :param name_or_id: project name or id :param kwargs: key/value pairs of quota name and quota value :raises: :class:`~openstack.exceptions.SDKException` if the resource to set the quota does not exist. """ proj = self.identity.find_project(name_or_id) if not proj: raise exceptions.SDKException( f"Project {name_or_id} was requested by was not found " f"on the cloud" ) self.network.update_quota(proj.id, **kwargs) def get_network_quotas(self, name_or_id, details=False): """Get network quotas for a project :param name_or_id: project name or id :param details: if set to True it will return details about usage of quotas by given project :returns: A network ``Quota`` object if found, else None. :raises: :class:`~openstack.exceptions.SDKException` if it's not a valid project """ proj = self.identity.find_project(name_or_id) if not proj: raise exc.OpenStackCloudException( f"Project {name_or_id} was requested by was not found " f"on the cloud" ) return self.network.get_quota(proj.id, details) def get_network_extensions(self): """Get Cloud provided network extensions :returns: A set of Neutron extension aliases. """ return self._neutron_extensions() def delete_network_quotas(self, name_or_id): """Delete network quotas for a project :param name_or_id: project name or id :returns: dict with the quotas :raises: :class:`~openstack.exceptions.SDKException` if it's not a valid project or the network client call failed """ proj = self.identity.find_project(name_or_id) if not proj: raise exceptions.SDKException( f"Project {name_or_id} was requested by was not found " f"on the cloud" ) self.network.delete_quota(proj.id) @_utils.valid_kwargs( 'action', 'description', 'destination_firewall_group_id', 'destination_ip_address', 'destination_port', 'enabled', 'ip_version', 'name', 'project_id', 'protocol', 'shared', 'source_firewall_group_id', 'source_ip_address', 'source_port', ) def create_firewall_rule(self, **kwargs): """ Creates firewall rule. :param action: Action performed on traffic. Valid values: allow, deny Defaults to deny. :param description: Human-readable description. :param destination_firewall_group_id: ID of destination firewall group. :param destination_ip_address: IPv4-, IPv6 address or CIDR. :param destination_port: Port or port range (e.g. 80:90) :param bool enabled: Status of firewall rule. You can disable rules without disassociating them from firewall policies. Defaults to True. :param int ip_version: IP Version. Valid values: 4, 6 Defaults to 4. :param name: Human-readable name. :param project_id: Project id. :param protocol: IP protocol. Valid values: icmp, tcp, udp, null :param bool shared: Visibility to other projects. Defaults to False. :param source_firewall_group_id: ID of source firewall group. :param source_ip_address: IPv4-, IPv6 address or CIDR. :param source_port: Port or port range (e.g. 80:90) :raises: BadRequestException if parameters are malformed :returns: The created network ``FirewallRule`` object. """ return self.network.create_firewall_rule(**kwargs) def delete_firewall_rule(self, name_or_id, filters=None): """ Deletes firewall rule. Prints debug message in case to-be-deleted resource was not found. :param name_or_id: firewall rule name or id :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :raises: DuplicateResource on multiple matches :returns: True if resource is successfully deleted, False otherwise. :rtype: bool """ if not filters: filters = {} try: firewall_rule = self.network.find_firewall_rule( name_or_id, ignore_missing=False, **filters ) self.network.delete_firewall_rule( firewall_rule, ignore_missing=False ) except exceptions.NotFoundException: self.log.debug( 'Firewall rule %s not found for deleting', name_or_id ) return False return True # TODO(stephenfin): Deprecate 'filters'; users should use 'list' for this def get_firewall_rule(self, name_or_id, filters=None): """ Retrieves a single firewall rule. :param name_or_id: firewall rule name or id :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :raises: DuplicateResource on multiple matches :returns: A network ``FirewallRule`` object if found, else None. """ if not filters: filters = {} return self.network.find_firewall_rule( name_or_id, ignore_missing=True, **filters ) def list_firewall_rules(self, filters=None): """ Lists firewall rules. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A list of network ``FirewallRule`` objects. :rtype: list[FirewallRule] """ if not filters: filters = {} return list(self.network.firewall_rules(**filters)) @_utils.valid_kwargs( 'action', 'description', 'destination_firewall_group_id', 'destination_ip_address', 'destination_port', 'enabled', 'ip_version', 'name', 'project_id', 'protocol', 'shared', 'source_firewall_group_id', 'source_ip_address', 'source_port', ) def update_firewall_rule(self, name_or_id, filters=None, **kwargs): """ Updates firewall rule. :param name_or_id: firewall rule name or id :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :param kwargs: firewall rule update parameters. See create_firewall_rule docstring for valid parameters. :returns: The updated network ``FirewallRule`` object. :raises: BadRequestException if parameters are malformed :raises: NotFoundException if resource is not found """ if not filters: filters = {} firewall_rule = self.network.find_firewall_rule( name_or_id, ignore_missing=False, **filters ) return self.network.update_firewall_rule(firewall_rule, **kwargs) def _get_firewall_rule_ids(self, name_or_id_list, filters=None): """ Takes a list of firewall rule name or ids, looks them up and returns a list of firewall rule ids. Used by `create_firewall_policy` and `update_firewall_policy`. :param list[str] name_or_id_list: firewall rule name or id list :param dict filters: optional filters :raises: DuplicateResource on multiple matches :raises: NotFoundException if resource is not found :return: list of firewall rule ids :rtype: list[str] """ if not filters: filters = {} ids_list = [] for name_or_id in name_or_id_list: ids_list.append( self.network.find_firewall_rule( name_or_id, ignore_missing=False, **filters )['id'] ) return ids_list @_utils.valid_kwargs( 'audited', 'description', 'firewall_rules', 'name', 'project_id', 'shared', ) def create_firewall_policy(self, **kwargs): """ Create firewall policy. :param bool audited: Status of audition of firewall policy. Set to False each time the firewall policy or the associated firewall rules are changed. Has to be explicitly set to True. :param description: Human-readable description. :param list[str] firewall_rules: List of associated firewall rules. :param name: Human-readable name. :param project_id: Project id. :param bool shared: Visibility to other projects. Defaults to False. :raises: BadRequestException if parameters are malformed :raises: NotFoundException if a resource from firewall_list not found :returns: The created network ``FirewallPolicy`` object. """ if 'firewall_rules' in kwargs: kwargs['firewall_rules'] = self._get_firewall_rule_ids( kwargs['firewall_rules'] ) return self.network.create_firewall_policy(**kwargs) def delete_firewall_policy(self, name_or_id, filters=None): """ Deletes firewall policy. Prints debug message in case to-be-deleted resource was not found. :param name_or_id: firewall policy name or id :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :raises: DuplicateResource on multiple matches :returns: True if resource is successfully deleted, False otherwise. :rtype: bool """ if not filters: filters = {} try: firewall_policy = self.network.find_firewall_policy( name_or_id, ignore_missing=False, **filters ) self.network.delete_firewall_policy( firewall_policy, ignore_missing=False ) except exceptions.NotFoundException: self.log.debug( 'Firewall policy %s not found for deleting', name_or_id ) return False return True # TODO(stephenfin): Deprecate 'filters'; users should use 'list' for this def get_firewall_policy(self, name_or_id, filters=None): """ Retrieves a single firewall policy. :param name_or_id: firewall policy name or id :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :raises: DuplicateResource on multiple matches :returns: A network ``FirewallPolicy`` object if found, else None. """ if not filters: filters = {} return self.network.find_firewall_policy( name_or_id, ignore_missing=True, **filters ) def list_firewall_policies(self, filters=None): """ Lists firewall policies. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A list of network ``FirewallPolicy`` objects. :rtype: list[FirewallPolicy] """ if not filters: filters = {} return list(self.network.firewall_policies(**filters)) @_utils.valid_kwargs( 'audited', 'description', 'firewall_rules', 'name', 'project_id', 'shared', ) def update_firewall_policy(self, name_or_id, filters=None, **kwargs): """ Updates firewall policy. :param name_or_id: firewall policy name or id :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :param kwargs: firewall policy update parameters See create_firewall_policy docstring for valid parameters. :returns: The updated network ``FirewallPolicy`` object. :raises: BadRequestException if parameters are malformed :raises: DuplicateResource on multiple matches :raises: NotFoundException if resource is not found """ if not filters: filters = {} firewall_policy = self.network.find_firewall_policy( name_or_id, ignore_missing=False, **filters ) if 'firewall_rules' in kwargs: kwargs['firewall_rules'] = self._get_firewall_rule_ids( kwargs['firewall_rules'] ) return self.network.update_firewall_policy(firewall_policy, **kwargs) def insert_rule_into_policy( self, name_or_id, rule_name_or_id, insert_after=None, insert_before=None, filters=None, ): """Add firewall rule to a policy. Adds firewall rule to the firewall_rules list of a firewall policy. Short-circuits and returns the firewall policy early if the firewall rule id is already present in the firewall_rules list. This method doesn't do re-ordering. If you want to move a firewall rule or down the list, you have to remove and re-add it. :param name_or_id: firewall policy name or id :param rule_name_or_id: firewall rule name or id :param insert_after: rule name or id that should precede added rule :param insert_before: rule name or id that should succeed added rule :param dict filters: optional filters :raises: DuplicateResource on multiple matches :raises: NotFoundException if firewall policy or any of the firewall rules (inserted, after, before) is not found. :return: updated firewall policy :rtype: FirewallPolicy """ if not filters: filters = {} firewall_policy = self.network.find_firewall_policy( name_or_id, ignore_missing=False, **filters ) firewall_rule = self.network.find_firewall_rule( rule_name_or_id, ignore_missing=False ) # short-circuit if rule already in firewall_rules list # the API can't do any re-ordering of existing rules if firewall_rule['id'] in firewall_policy['firewall_rules']: self.log.debug( 'Firewall rule %s already associated with firewall policy %s', rule_name_or_id, name_or_id, ) return firewall_policy pos_params = {} if insert_after is not None: pos_params['insert_after'] = self.network.find_firewall_rule( insert_after, ignore_missing=False )['id'] if insert_before is not None: pos_params['insert_before'] = self.network.find_firewall_rule( insert_before, ignore_missing=False )['id'] return self.network.insert_rule_into_policy( firewall_policy['id'], firewall_rule['id'], **pos_params ) def remove_rule_from_policy( self, name_or_id, rule_name_or_id, filters=None ): """ Remove firewall rule from firewall policy's firewall_rules list. Short-circuits and returns firewall policy early if firewall rule is already absent from the firewall_rules list. :param name_or_id: firewall policy name or id :param rule_name_or_id: firewall rule name or id :param dict filters: optional filters :raises: DuplicateResource on multiple matches :raises: NotFoundException if firewall policy is not found :return: updated firewall policy :rtype: FirewallPolicy """ if not filters: filters = {} firewall_policy = self.network.find_firewall_policy( name_or_id, ignore_missing=False, **filters ) firewall_rule = self.network.find_firewall_rule(rule_name_or_id) if not firewall_rule: # short-circuit: if firewall rule is not found, # return current firewall policy self.log.debug( 'Firewall rule %s not found for removing', rule_name_or_id ) return firewall_policy if firewall_rule['id'] not in firewall_policy['firewall_rules']: # short-circuit: if firewall rule id is not associated, # log it to debug and return current firewall policy self.log.debug( 'Firewall rule %s not associated with firewall policy %s', rule_name_or_id, name_or_id, ) return firewall_policy return self.network.remove_rule_from_policy( firewall_policy['id'], firewall_rule['id'] ) @_utils.valid_kwargs( 'admin_state_up', 'description', 'egress_firewall_policy', 'ingress_firewall_policy', 'name', 'ports', 'project_id', 'shared', ) def create_firewall_group(self, **kwargs): """ Creates firewall group. The keys egress_firewall_policy and ingress_firewall_policy are looked up and mapped as egress_firewall_policy_id and ingress_firewall_policy_id respectively. Port name or ids list is transformed to port ids list before the POST request. :param bool admin_state_up: State of firewall group. Will block all traffic if set to False. Defaults to True. :param description: Human-readable description. :param egress_firewall_policy: Name or id of egress firewall policy. :param ingress_firewall_policy: Name or id of ingress firewall policy. :param name: Human-readable name. :param list[str] ports: List of associated ports (name or id) :param project_id: Project id. :param shared: Visibility to other projects. Defaults to False. :raises: BadRequestException if parameters are malformed :raises: DuplicateResource on multiple matches :raises: NotFoundException if (ingress-, egress-) firewall policy or a port is not found. :returns: The created network ``FirewallGroup`` object. """ self._lookup_ingress_egress_firewall_policy_ids(kwargs) if 'ports' in kwargs: kwargs['ports'] = self._get_port_ids(kwargs['ports']) return self.network.create_firewall_group(**kwargs) def delete_firewall_group(self, name_or_id, filters=None): """ Deletes firewall group. Prints debug message in case to-be-deleted resource was not found. :param name_or_id: firewall group name or id :param dict filters: optional filters :raises: DuplicateResource on multiple matches :returns: True if resource is successfully deleted, False otherwise. :rtype: bool """ if not filters: filters = {} try: firewall_group = self.network.find_firewall_group( name_or_id, ignore_missing=False, **filters ) self.network.delete_firewall_group( firewall_group, ignore_missing=False ) except exceptions.NotFoundException: self.log.debug( 'Firewall group %s not found for deleting', name_or_id ) return False return True # TODO(stephenfin): Deprecate 'filters'; users should use 'list' for this def get_firewall_group(self, name_or_id, filters=None): """ Retrieves firewall group. :param name_or_id: firewall group name or id :param dict filters: optional filters :raises: DuplicateResource on multiple matches :returns: A network ``FirewallGroup`` object if found, else None. """ if not filters: filters = {} return self.network.find_firewall_group( name_or_id, ignore_missing=True, **filters ) def list_firewall_groups(self, filters=None): """ Lists firewall groups. :returns: A list of network ``FirewallGroup`` objects. """ if not filters: filters = {} return list(self.network.firewall_groups(**filters)) @_utils.valid_kwargs( 'admin_state_up', 'description', 'egress_firewall_policy', 'ingress_firewall_policy', 'name', 'ports', 'project_id', 'shared', ) def update_firewall_group(self, name_or_id, filters=None, **kwargs): """ Updates firewall group. To unset egress- or ingress firewall policy, set egress_firewall_policy or ingress_firewall_policy to None. You can also set egress_firewall_policy_id and ingress_firewall_policy_id directly, which will skip the policy lookups. :param name_or_id: firewall group name or id :param dict filters: optional filters :param kwargs: firewall group update parameters See create_firewall_group docstring for valid parameters. :returns: The updated network ``FirewallGroup`` object. :raises: BadRequestException if parameters are malformed :raises: DuplicateResource on multiple matches :raises: NotFoundException if firewall group, a firewall policy (egress, ingress) or port is not found """ if not filters: filters = {} firewall_group = self.network.find_firewall_group( name_or_id, ignore_missing=False, **filters ) self._lookup_ingress_egress_firewall_policy_ids(kwargs) if 'ports' in kwargs: kwargs['ports'] = self._get_port_ids(kwargs['ports']) return self.network.update_firewall_group(firewall_group, **kwargs) def _lookup_ingress_egress_firewall_policy_ids(self, firewall_group): """ Transforms firewall_group dict IN-PLACE. Takes the value of the keys egress_firewall_policy and ingress_firewall_policy, looks up the policy ids and maps them to egress_firewall_policy_id and ingress_firewall_policy_id. Old keys which were used for the lookup are deleted. :param dict firewall_group: firewall group dict :raises: DuplicateResource on multiple matches :raises: NotFoundException if a firewall policy is not found """ for key in ('egress_firewall_policy', 'ingress_firewall_policy'): if key not in firewall_group: continue if firewall_group[key] is None: val = None else: val = self.network.find_firewall_policy( firewall_group[key], ignore_missing=False )['id'] firewall_group[key + '_id'] = val del firewall_group[key] @_utils.valid_kwargs( "name", "description", "shared", "default", "project_id" ) def create_qos_policy(self, **kwargs): """Create a QoS policy. :param string name: Name of the QoS policy being created. :param string description: Description of created QoS policy. :param bool shared: Set the QoS policy as shared. :param bool default: Set the QoS policy as default for project. :param string project_id: Specify the project ID this QoS policy will be created on (admin-only). :returns: The created network ``QosPolicy`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) default = kwargs.pop("default", None) if default is not None: if self._has_neutron_extension('qos-default'): kwargs['is_default'] = default else: self.log.debug( "'qos-default' extension is not available on " "target cloud" ) return self.network.create_qos_policy(**kwargs) @_utils.valid_kwargs( "name", "description", "shared", "default", "project_id" ) def update_qos_policy(self, name_or_id, **kwargs): """Update an existing QoS policy. :param string name_or_id: Name or ID of the QoS policy to update. :param string policy_name: The new name of the QoS policy. :param string description: The new description of the QoS policy. :param bool shared: If True, the QoS policy will be set as shared. :param bool default: If True, the QoS policy will be set as default for project. :returns: The updated network ``QosPolicyRule`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) default = kwargs.pop("default", None) if default is not None: if self._has_neutron_extension('qos-default'): kwargs['is_default'] = default else: self.log.debug( "'qos-default' extension is not available on " "target cloud" ) if not kwargs: self.log.debug("No QoS policy data to update") return curr_policy = self.network.find_qos_policy(name_or_id) if not curr_policy: raise exceptions.SDKException( "QoS policy %s not found." % name_or_id ) return self.network.update_qos_policy(curr_policy, **kwargs) def delete_qos_policy(self, name_or_id): """Delete a QoS policy. :param name_or_id: Name or ID of the policy being deleted. :returns: True if delete succeeded, False otherwise. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) policy = self.network.find_qos_policy(name_or_id) if not policy: self.log.debug("QoS policy %s not found for deleting", name_or_id) return False self.network.delete_qos_policy(policy) return True # TODO(stephenfin): Deprecate this in favour of the 'list' function def search_qos_bandwidth_limit_rules( self, policy_name_or_id, rule_id=None, filters=None, ): """Search QoS bandwidth limit rules :param string policy_name_or_id: Name or ID of the QoS policy to which rules should be associated. :param string rule_id: ID of searched rule. :param filters: a dict containing additional filters to use. e.g. {'max_kbps': 1000} :returns: A list of network ``QoSBandwidthLimitRule`` objects matching the search criteria. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ rules = self.list_qos_bandwidth_limit_rules(policy_name_or_id, filters) return _utils._filter_list(rules, rule_id, filters) def list_qos_bandwidth_limit_rules(self, policy_name_or_id, filters=None): """List all available QoS bandwidth limit rules. :param string policy_name_or_id: Name or ID of the QoS policy from from rules should be listed. :param filters: (optional) A dict of filter conditions to push down :returns: A list of network ``QoSBandwidthLimitRule`` objects. :raises: ``:class:`~openstack.exceptions.BadRequestException``` if QoS policy will not be found. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) policy = self.network.find_qos_policy(policy_name_or_id) if not policy: raise exceptions.NotFoundException( "QoS policy {name_or_id} not Found.".format( name_or_id=policy_name_or_id ) ) # Translate None from search interface to empty {} for kwargs below if not filters: filters = {} return list( self.network.qos_bandwidth_limit_rules( qos_policy=policy, **filters ) ) def get_qos_bandwidth_limit_rule(self, policy_name_or_id, rule_id): """Get a QoS bandwidth limit rule by name or ID. :param string policy_name_or_id: Name or ID of the QoS policy to which rule should be associated. :param rule_id: ID of the rule. :returns: A network ``QoSBandwidthLimitRule`` object if found, else None. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) policy = self.network.find_qos_policy(policy_name_or_id) if not policy: raise exceptions.NotFoundException( "QoS policy {name_or_id} not Found.".format( name_or_id=policy_name_or_id ) ) return self.network.get_qos_bandwidth_limit_rule(rule_id, policy) @_utils.valid_kwargs("max_burst_kbps", "direction") def create_qos_bandwidth_limit_rule( self, policy_name_or_id, max_kbps, **kwargs, ): """Create a QoS bandwidth limit rule. :param string policy_name_or_id: Name or ID of the QoS policy to which rule should be associated. :param int max_kbps: Maximum bandwidth limit value (in kilobits per second). :param int max_burst_kbps: Maximum burst value (in kilobits). :param string direction: Ingress or egress. The direction in which the traffic will be limited. :returns: The created network ``QoSBandwidthLimitRule`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) policy = self.network.find_qos_policy(policy_name_or_id) if not policy: raise exceptions.NotFoundException( "QoS policy {name_or_id} not Found.".format( name_or_id=policy_name_or_id ) ) if kwargs.get("direction") is not None: if not self._has_neutron_extension('qos-bw-limit-direction'): kwargs.pop("direction") self.log.debug( "'qos-bw-limit-direction' extension is not available on " "target cloud" ) kwargs['max_kbps'] = max_kbps return self.network.create_qos_bandwidth_limit_rule(policy, **kwargs) @_utils.valid_kwargs("max_kbps", "max_burst_kbps", "direction") def update_qos_bandwidth_limit_rule( self, policy_name_or_id, rule_id, **kwargs ): """Update a QoS bandwidth limit rule. :param string policy_name_or_id: Name or ID of the QoS policy to which rule is associated. :param string rule_id: ID of rule to update. :param int max_kbps: Maximum bandwidth limit value (in kilobits per second). :param int max_burst_kbps: Maximum burst value (in kilobits). :param string direction: Ingress or egress. The direction in which the traffic will be limited. :returns: The updated network ``QoSBandwidthLimitRule`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) policy = self.network.find_qos_policy( policy_name_or_id, ignore_missing=True ) if not policy: raise exceptions.NotFoundException( "QoS policy {name_or_id} not Found.".format( name_or_id=policy_name_or_id ) ) if kwargs.get("direction") is not None: if not self._has_neutron_extension('qos-bw-limit-direction'): kwargs.pop("direction") self.log.debug( "'qos-bw-limit-direction' extension is not available on " "target cloud" ) if not kwargs: self.log.debug("No QoS bandwidth limit rule data to update") return curr_rule = self.network.get_qos_bandwidth_limit_rule( qos_rule=rule_id, qos_policy=policy ) if not curr_rule: raise exceptions.SDKException( "QoS bandwidth_limit_rule {rule_id} not found in policy " "{policy_id}".format(rule_id=rule_id, policy_id=policy['id']) ) return self.network.update_qos_bandwidth_limit_rule( qos_rule=curr_rule, qos_policy=policy, **kwargs ) def delete_qos_bandwidth_limit_rule(self, policy_name_or_id, rule_id): """Delete a QoS bandwidth limit rule. :param string policy_name_or_id: Name or ID of the QoS policy to which rule is associated. :param string rule_id: ID of rule to update. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) policy = self.network.find_qos_policy(policy_name_or_id) if not policy: raise exceptions.NotFoundException( "QoS policy {name_or_id} not Found.".format( name_or_id=policy_name_or_id ) ) try: self.network.delete_qos_bandwidth_limit_rule( rule_id, policy, ignore_missing=False ) except exceptions.NotFoundException: self.log.debug( "QoS bandwidth limit rule {rule_id} not found in policy " "{policy_id}. Ignoring.".format( rule_id=rule_id, policy_id=policy['id'] ) ) return False return True # TODO(stephenfin): Deprecate this in favour of the 'list' function def search_qos_dscp_marking_rules( self, policy_name_or_id, rule_id=None, filters=None, ): """Search QoS DSCP marking rules :param string policy_name_or_id: Name or ID of the QoS policy to which rules should be associated. :param string rule_id: ID of searched rule. :param filters: a dict containing additional filters to use. e.g. {'dscp_mark': 32} :returns: A list of network ``QoSDSCPMarkingRule`` objects matching the search criteria. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ rules = self.list_qos_dscp_marking_rules(policy_name_or_id, filters) return _utils._filter_list(rules, rule_id, filters) def list_qos_dscp_marking_rules(self, policy_name_or_id, filters=None): """List all available QoS DSCP marking rules. :param string policy_name_or_id: Name or ID of the QoS policy from from rules should be listed. :param filters: (optional) A dict of filter conditions to push down :returns: A list of network ``QoSDSCPMarkingRule`` objects. :raises: ``:class:`~openstack.exceptions.BadRequestException``` if QoS policy will not be found. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) policy = self.network.find_qos_policy( policy_name_or_id, ignore_missing=True ) if not policy: raise exceptions.NotFoundException( "QoS policy {name_or_id} not Found.".format( name_or_id=policy_name_or_id ) ) # Translate None from search interface to empty {} for kwargs below if not filters: filters = {} return list(self.network.qos_dscp_marking_rules(policy, **filters)) def get_qos_dscp_marking_rule(self, policy_name_or_id, rule_id): """Get a QoS DSCP marking rule by name or ID. :param string policy_name_or_id: Name or ID of the QoS policy to which rule should be associated. :param rule_id: ID of the rule. :returns: A network ``QoSDSCPMarkingRule`` object if found, else None. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) policy = self.network.find_qos_policy(policy_name_or_id) if not policy: raise exceptions.NotFoundException( "QoS policy {name_or_id} not Found.".format( name_or_id=policy_name_or_id ) ) return self.network.get_qos_dscp_marking_rule(rule_id, policy) def create_qos_dscp_marking_rule( self, policy_name_or_id, dscp_mark, ): """Create a QoS DSCP marking rule. :param string policy_name_or_id: Name or ID of the QoS policy to which rule should be associated. :param int dscp_mark: DSCP mark value :returns: The created network ``QoSDSCPMarkingRule`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) policy = self.network.find_qos_policy(policy_name_or_id) if not policy: raise exceptions.NotFoundException( "QoS policy {name_or_id} not Found.".format( name_or_id=policy_name_or_id ) ) return self.network.create_qos_dscp_marking_rule( policy, dscp_mark=dscp_mark ) @_utils.valid_kwargs("dscp_mark") def update_qos_dscp_marking_rule( self, policy_name_or_id, rule_id, **kwargs ): """Update a QoS DSCP marking rule. :param string policy_name_or_id: Name or ID of the QoS policy to which rule is associated. :param string rule_id: ID of rule to update. :param int dscp_mark: DSCP mark value :returns: The updated network ``QoSDSCPMarkingRule`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) policy = self.network.find_qos_policy(policy_name_or_id) if not policy: raise exceptions.NotFoundException( "QoS policy {name_or_id} not Found.".format( name_or_id=policy_name_or_id ) ) if not kwargs: self.log.debug("No QoS DSCP marking rule data to update") return curr_rule = self.network.get_qos_dscp_marking_rule(rule_id, policy) if not curr_rule: raise exceptions.SDKException( "QoS dscp_marking_rule {rule_id} not found in policy " "{policy_id}".format(rule_id=rule_id, policy_id=policy['id']) ) return self.network.update_qos_dscp_marking_rule( curr_rule, policy, **kwargs ) def delete_qos_dscp_marking_rule(self, policy_name_or_id, rule_id): """Delete a QoS DSCP marking rule. :param string policy_name_or_id: Name or ID of the QoS policy to which rule is associated. :param string rule_id: ID of rule to update. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) policy = self.network.find_qos_policy(policy_name_or_id) if not policy: raise exceptions.NotFoundException( "QoS policy {name_or_id} not Found.".format( name_or_id=policy_name_or_id ) ) try: self.network.delete_qos_dscp_marking_rule( rule_id, policy, ignore_missing=False ) except exceptions.NotFoundException: self.log.debug( "QoS DSCP marking rule {rule_id} not found in policy " "{policy_id}. Ignoring.".format( rule_id=rule_id, policy_id=policy['id'] ) ) return False return True # TODO(stephenfin): Deprecate this in favour of the 'list' function def search_qos_minimum_bandwidth_rules( self, policy_name_or_id, rule_id=None, filters=None, ): """Search QoS minimum bandwidth rules :param string policy_name_or_id: Name or ID of the QoS policy to which rules should be associated. :param string rule_id: ID of searched rule. :param filters: a dict containing additional filters to use. e.g. {'min_kbps': 1000} :returns: A list of network ``QoSMinimumBandwidthRule`` objects matching the search criteria. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ rules = self.list_qos_minimum_bandwidth_rules( policy_name_or_id, filters ) return _utils._filter_list(rules, rule_id, filters) def list_qos_minimum_bandwidth_rules( self, policy_name_or_id, filters=None ): """List all available QoS minimum bandwidth rules. :param string policy_name_or_id: Name or ID of the QoS policy from from rules should be listed. :param filters: (optional) A dict of filter conditions to push down :returns: A list of network ``QoSMinimumBandwidthRule`` objects. :raises: ``:class:`~openstack.exceptions.BadRequestException``` if QoS policy will not be found. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) policy = self.network.find_qos_policy(policy_name_or_id) if not policy: raise exceptions.NotFoundException( "QoS policy {name_or_id} not Found.".format( name_or_id=policy_name_or_id ) ) # Translate None from search interface to empty {} for kwargs below if not filters: filters = {} return list( self.network.qos_minimum_bandwidth_rules(policy, **filters) ) def get_qos_minimum_bandwidth_rule(self, policy_name_or_id, rule_id): """Get a QoS minimum bandwidth rule by name or ID. :param string policy_name_or_id: Name or ID of the QoS policy to which rule should be associated. :param rule_id: ID of the rule. :returns: A network ``QoSMinimumBandwidthRule`` object if found, else None. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) policy = self.network.find_qos_policy(policy_name_or_id) if not policy: raise exceptions.NotFoundException( "QoS policy {name_or_id} not Found.".format( name_or_id=policy_name_or_id ) ) return self.network.get_qos_minimum_bandwidth_rule(rule_id, policy) @_utils.valid_kwargs("direction") def create_qos_minimum_bandwidth_rule( self, policy_name_or_id, min_kbps, **kwargs, ): """Create a QoS minimum bandwidth limit rule. :param string policy_name_or_id: Name or ID of the QoS policy to which rule should be associated. :param int min_kbps: Minimum bandwidth value (in kilobits per second). :param string direction: Ingress or egress. The direction in which the traffic will be available. :returns: The created network ``QoSMinimumBandwidthRule`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) policy = self.network.find_qos_policy(policy_name_or_id) if not policy: raise exceptions.NotFoundException( "QoS policy {name_or_id} not Found.".format( name_or_id=policy_name_or_id ) ) kwargs['min_kbps'] = min_kbps return self.network.create_qos_minimum_bandwidth_rule(policy, **kwargs) @_utils.valid_kwargs("min_kbps", "direction") def update_qos_minimum_bandwidth_rule( self, policy_name_or_id, rule_id, **kwargs ): """Update a QoS minimum bandwidth rule. :param string policy_name_or_id: Name or ID of the QoS policy to which rule is associated. :param string rule_id: ID of rule to update. :param int min_kbps: Minimum bandwidth value (in kilobits per second). :param string direction: Ingress or egress. The direction in which the traffic will be available. :returns: The updated network ``QoSMinimumBandwidthRule`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) policy = self.network.find_qos_policy(policy_name_or_id) if not policy: raise exceptions.NotFoundException( "QoS policy {name_or_id} not Found.".format( name_or_id=policy_name_or_id ) ) if not kwargs: self.log.debug("No QoS minimum bandwidth rule data to update") return curr_rule = self.network.get_qos_minimum_bandwidth_rule( rule_id, policy ) if not curr_rule: raise exceptions.SDKException( "QoS minimum_bandwidth_rule {rule_id} not found in policy " "{policy_id}".format(rule_id=rule_id, policy_id=policy['id']) ) return self.network.update_qos_minimum_bandwidth_rule( curr_rule, policy, **kwargs ) def delete_qos_minimum_bandwidth_rule(self, policy_name_or_id, rule_id): """Delete a QoS minimum bandwidth rule. :param string policy_name_or_id: Name or ID of the QoS policy to which rule is associated. :param string rule_id: ID of rule to delete. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if not self._has_neutron_extension('qos'): raise exc.OpenStackCloudUnavailableExtension( 'QoS extension is not available on target cloud' ) policy = self.network.find_qos_policy(policy_name_or_id) if not policy: raise exceptions.NotFoundException( "QoS policy {name_or_id} not Found.".format( name_or_id=policy_name_or_id ) ) try: self.network.delete_qos_minimum_bandwidth_rule( rule_id, policy, ignore_missing=False ) except exceptions.NotFoundException: self.log.debug( "QoS minimum bandwidth rule {rule_id} not found in policy " "{policy_id}. Ignoring.".format( rule_id=rule_id, policy_id=policy['id'] ) ) return False return True def add_router_interface(self, router, subnet_id=None, port_id=None): """Attach a subnet to an internal router interface. Either a subnet ID or port ID must be specified for the internal interface. Supplying both will result in an error. :param dict router: The dict object of the router being changed :param string subnet_id: The ID of the subnet to use for the interface :param string port_id: The ID of the port to use for the interface :returns: The raw response body from the request. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ return self.network.add_interface_to_router( router=router, subnet_id=subnet_id, port_id=port_id ) def remove_router_interface(self, router, subnet_id=None, port_id=None): """Detach a subnet from an internal router interface. At least one of subnet_id or port_id must be supplied. If you specify both subnet and port ID, the subnet ID must correspond to the subnet ID of the first IP address on the port specified by the port ID. Otherwise an error occurs. :param dict router: The dict object of the router being changed :param string subnet_id: The ID of the subnet to use for the interface :param string port_id: The ID of the port to use for the interface :returns: None on success :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if not subnet_id and not port_id: raise ValueError( "At least one of subnet_id or port_id must be supplied." ) self.network.remove_interface_from_router( router=router, subnet_id=subnet_id, port_id=port_id ) def list_router_interfaces(self, router, interface_type=None): """List all interfaces for a router. :param dict router: A router dict object. :param string interface_type: One of None, "internal", or "external". Controls whether all, internal interfaces or external interfaces are returned. :returns: A list of network ``Port`` objects. """ # Find only router interface and gateway ports, ignore L3 HA ports etc. ports = list(self.network.ports(device_id=router['id'])) router_interfaces = ( [ port for port in ports if ( port['device_owner'] in [ 'network:router_interface', 'network:router_interface_distributed', 'network:ha_router_replicated_interface', ] ) ] if not interface_type or interface_type == 'internal' else [] ) router_gateways = ( [ port for port in ports if port['device_owner'] == 'network:router_gateway' ] if not interface_type or interface_type == 'external' else [] ) return router_interfaces + router_gateways def create_router( self, name=None, admin_state_up=True, ext_gateway_net_id=None, enable_snat=None, ext_fixed_ips=None, project_id=None, availability_zone_hints=None, ): """Create a logical router. :param string name: The router name. :param bool admin_state_up: The administrative state of the router. :param string ext_gateway_net_id: Network ID for the external gateway. :param bool enable_snat: Enable Source NAT (SNAT) attribute. :param ext_fixed_ips: List of dictionaries of desired IP and/or subnet on the external network. Example:: [ { "subnet_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b", "ip_address": "192.168.10.2" } ] :param string project_id: Project ID for the router. :param types.ListType availability_zone_hints: A list of availability zone hints. :returns: The created network ``Router`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ router = {'admin_state_up': admin_state_up} if project_id is not None: router['project_id'] = project_id if name: router['name'] = name ext_gw_info = self._build_external_gateway_info( ext_gateway_net_id, enable_snat, ext_fixed_ips ) if ext_gw_info: router['external_gateway_info'] = ext_gw_info if availability_zone_hints is not None: if not isinstance(availability_zone_hints, list): raise exceptions.SDKException( "Parameter 'availability_zone_hints' must be a list" ) if not self._has_neutron_extension('router_availability_zone'): raise exc.OpenStackCloudUnavailableExtension( 'router_availability_zone extension is not available on ' 'target cloud' ) router['availability_zone_hints'] = availability_zone_hints return self.network.create_router(**router) def update_router( self, name_or_id, name=None, admin_state_up=None, ext_gateway_net_id=None, enable_snat=None, ext_fixed_ips=None, routes=None, ): """Update an existing logical router. :param string name_or_id: The name or UUID of the router to update. :param string name: The new router name. :param bool admin_state_up: The administrative state of the router. :param string ext_gateway_net_id: The network ID for the external gateway. :param bool enable_snat: Enable Source NAT (SNAT) attribute. :param ext_fixed_ips: List of dictionaries of desired IP and/or subnet on the external network. Example:: [ { "subnet_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b", "ip_address": "192.168.10.2" } ] :param list routes: A list of dictionaries with destination and nexthop parameters. To clear all routes pass an empty list ([]). Example:: [ { "destination": "179.24.1.0/24", "nexthop": "172.24.3.99" } ] :returns: The updated network ``Router`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ router = {} if name: router['name'] = name if admin_state_up is not None: router['admin_state_up'] = admin_state_up ext_gw_info = self._build_external_gateway_info( ext_gateway_net_id, enable_snat, ext_fixed_ips ) if ext_gw_info: router['external_gateway_info'] = ext_gw_info if routes is not None: if self._has_neutron_extension('extraroute'): router['routes'] = routes else: self.log.warning( 'extra routes extension is not available on target cloud' ) if not router: self.log.debug("No router data to update") return curr_router = self.get_router(name_or_id) if not curr_router: raise exceptions.SDKException("Router %s not found." % name_or_id) return self.network.update_router(curr_router, **router) def delete_router(self, name_or_id): """Delete a logical router. If a name, instead of a unique UUID, is supplied, it is possible that we could find more than one matching router since names are not required to be unique. An error will be raised in this case. :param name_or_id: Name or ID of the router being deleted. :returns: True if delete succeeded, False otherwise. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ router = self.network.find_router(name_or_id, ignore_missing=True) if not router: self.log.debug("Router %s not found for deleting", name_or_id) return False self.network.delete_router(router) return True def create_subnet( self, network_name_or_id, cidr=None, ip_version=4, enable_dhcp=False, subnet_name=None, tenant_id=None, allocation_pools=None, gateway_ip=None, disable_gateway_ip=False, dns_nameservers=None, host_routes=None, ipv6_ra_mode=None, ipv6_address_mode=None, prefixlen=None, use_default_subnetpool=False, subnetpool_name_or_id=None, **kwargs, ): """Create a subnet on a specified network. :param string network_name_or_id: The unique name or ID of the attached network. If a non-unique name is supplied, an exception is raised. :param string cidr: The CIDR. Only one of ``cidr``, ``use_default_subnetpool`` and ``subnetpool_name_or_id`` may be specified at the same time. :param int ip_version: The IP version, which is 4 or 6. :param bool enable_dhcp: Set to ``True`` if DHCP is enabled and ``False`` if disabled. Default is ``False``. :param string subnet_name: The name of the subnet. :param string tenant_id: The ID of the tenant who owns the network. Only administrative users can specify a tenant ID other than their own. :param allocation_pools: A list of dictionaries of the start and end addresses for the allocation pools. For example:: [ { "start": "192.168.199.2", "end": "192.168.199.254" } ] :param string gateway_ip: The gateway IP address. When you specify both allocation_pools and gateway_ip, you must ensure that the gateway IP does not overlap with the specified allocation pools. :param bool disable_gateway_ip: Set to ``True`` if gateway IP address is disabled and ``False`` if enabled. It is not allowed with gateway_ip. Default is ``False``. :param dns_nameservers: A list of DNS name servers for the subnet. For example:: [ "8.8.8.7", "8.8.8.8" ] :param host_routes: A list of host route dictionaries for the subnet. For example:: [ { "destination": "0.0.0.0/0", "nexthop": "123.456.78.9" }, { "destination": "192.168.0.0/24", "nexthop": "192.168.0.1" } ] :param string ipv6_ra_mode: IPv6 Router Advertisement mode. Valid values are: 'dhcpv6-stateful', 'dhcpv6-stateless', or 'slaac'. :param string ipv6_address_mode: IPv6 address mode. Valid values are: 'dhcpv6-stateful', 'dhcpv6-stateless', or 'slaac'. :param string prefixlen: The prefix length to use for subnet allocation from a subnetpool. :param bool use_default_subnetpool: Use the default subnetpool for ``ip_version`` to obtain a CIDR. Only one of ``cidr``, ``use_default_subnetpool`` and ``subnetpool_name_or_id`` may be specified at the same time. :param string subnetpool_name_or_id: The unique name or id of the subnetpool to obtain a CIDR from. Only one of ``cidr``, ``use_default_subnetpool`` and ``subnetpool_name_or_id`` may be specified at the same time. :param kwargs: Key value pairs to be passed to the Neutron API. :returns: The created network ``Subnet`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if tenant_id is not None: filters = {'tenant_id': tenant_id} else: filters = None network = self.get_network(network_name_or_id, filters) if not network: raise exceptions.SDKException( "Network %s not found." % network_name_or_id ) if disable_gateway_ip and gateway_ip: raise exceptions.SDKException( 'arg:disable_gateway_ip is not allowed with arg:gateway_ip' ) uses_subnetpool = use_default_subnetpool or subnetpool_name_or_id if not cidr and not uses_subnetpool: raise exceptions.SDKException( 'arg:cidr is required when a subnetpool is not used' ) if cidr and uses_subnetpool: raise exceptions.SDKException( 'arg:cidr and subnetpool may not be used at the same time' ) if use_default_subnetpool and subnetpool_name_or_id: raise exceptions.SDKException( 'arg:use_default_subnetpool and arg:subnetpool_id may not be ' 'used at the same time' ) subnetpool = None if subnetpool_name_or_id: subnetpool = self.get_subnetpool(subnetpool_name_or_id) if not subnetpool: raise exceptions.SDKException( "Subnetpool %s not found." % subnetpool_name_or_id ) # Be friendly on ip_version and allow strings if isinstance(ip_version, str): try: ip_version = int(ip_version) except ValueError: raise exceptions.SDKException('ip_version must be an integer') # The body of the neutron message for the subnet we wish to create. # This includes attributes that are required or have defaults. subnet = dict( { 'network_id': network['id'], 'ip_version': ip_version, 'enable_dhcp': enable_dhcp, }, **kwargs, ) # Add optional attributes to the message. if cidr: subnet['cidr'] = cidr if subnet_name: subnet['name'] = subnet_name if tenant_id: subnet['tenant_id'] = tenant_id if allocation_pools: subnet['allocation_pools'] = allocation_pools if gateway_ip: subnet['gateway_ip'] = gateway_ip if disable_gateway_ip: subnet['gateway_ip'] = None if dns_nameservers: subnet['dns_nameservers'] = dns_nameservers if host_routes: subnet['host_routes'] = host_routes if ipv6_ra_mode: subnet['ipv6_ra_mode'] = ipv6_ra_mode if ipv6_address_mode: subnet['ipv6_address_mode'] = ipv6_address_mode if prefixlen: subnet['prefixlen'] = prefixlen if use_default_subnetpool: subnet['use_default_subnetpool'] = True if subnetpool: subnet['subnetpool_id'] = subnetpool["id"] return self.network.create_subnet(**subnet) def delete_subnet(self, name_or_id): """Delete a subnet. If a name, instead of a unique UUID, is supplied, it is possible that we could find more than one matching subnet since names are not required to be unique. An error will be raised in this case. :param name_or_id: Name or ID of the subnet being deleted. :returns: True if delete succeeded, False otherwise. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ subnet = self.network.find_subnet(name_or_id, ignore_missing=True) if not subnet: self.log.debug("Subnet %s not found for deleting", name_or_id) return False self.network.delete_subnet(subnet) return True def update_subnet( self, name_or_id, subnet_name=None, enable_dhcp=None, gateway_ip=None, disable_gateway_ip=None, allocation_pools=None, dns_nameservers=None, host_routes=None, ): """Update an existing subnet. :param string name_or_id: Name or ID of the subnet to update. :param string subnet_name: The new name of the subnet. :param bool enable_dhcp: Set to ``True`` if DHCP is enabled and ``False`` if disabled. :param string gateway_ip: The gateway IP address. When you specify both allocation_pools and gateway_ip, you must ensure that the gateway IP does not overlap with the specified allocation pools. :param bool disable_gateway_ip: Set to ``True`` if gateway IP address is disabled and ``False`` if enabled. It is not allowed with gateway_ip. Default is ``False``. :param allocation_pools: A list of dictionaries of the start and end addresses for the allocation pools. For example:: [ { "start": "192.168.199.2", "end": "192.168.199.254" } ] :param dns_nameservers: A list of DNS name servers for the subnet. For example:: [ "8.8.8.7", "8.8.8.8" ] :param host_routes: A list of host route dictionaries for the subnet. For example:: [ { "destination": "0.0.0.0/0", "nexthop": "123.456.78.9" }, { "destination": "192.168.0.0/24", "nexthop": "192.168.0.1" } ] :returns: The updated network ``Subnet`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ subnet = {} if subnet_name: subnet['name'] = subnet_name if enable_dhcp is not None: subnet['enable_dhcp'] = enable_dhcp if gateway_ip: subnet['gateway_ip'] = gateway_ip if disable_gateway_ip: subnet['gateway_ip'] = None if allocation_pools: subnet['allocation_pools'] = allocation_pools if dns_nameservers: subnet['dns_nameservers'] = dns_nameservers if host_routes: subnet['host_routes'] = host_routes if not subnet: self.log.debug("No subnet data to update") return if disable_gateway_ip and gateway_ip: raise exceptions.SDKException( 'arg:disable_gateway_ip is not allowed with arg:gateway_ip' ) curr_subnet = self.get_subnet(name_or_id) if not curr_subnet: raise exceptions.SDKException("Subnet %s not found." % name_or_id) return self.network.update_subnet(curr_subnet, **subnet) @_utils.valid_kwargs( 'name', 'admin_state_up', 'mac_address', 'fixed_ips', 'subnet_id', 'ip_address', 'security_groups', 'allowed_address_pairs', 'extra_dhcp_opts', 'device_owner', 'device_id', 'binding:vnic_type', 'binding:profile', 'port_security_enabled', 'qos_policy_id', 'binding:host_id', 'project_id', 'description', 'dns_domain', 'dns_name', 'numa_affinity_policy', 'propagate_uplink_status', 'mac_learning_enabled', ) def create_port(self, network_id, **kwargs): """Create a port :param network_id: The ID of the network. (Required) :param name: A symbolic name for the port. (Optional) :param admin_state_up: The administrative status of the port, which is up (true, default) or down (false). (Optional) :param mac_address: The MAC address. (Optional) :param fixed_ips: List of ip_addresses and subnet_ids. See subnet_id and ip_address. (Optional) For example:: [ { "ip_address": "10.29.29.13", "subnet_id": "a78484c4-c380-4b47-85aa-21c51a2d8cbd" }, ... ] :param subnet_id: If you specify only a subnet ID, OpenStack Networking allocates an available IP from that subnet to the port. (Optional) If you specify both a subnet ID and an IP address, OpenStack Networking tries to allocate the specified address to the port. :param ip_address: If you specify both a subnet ID and an IP address, OpenStack Networking tries to allocate the specified address to the port. :param security_groups: List of security group UUIDs. (Optional) :param allowed_address_pairs: Allowed address pairs list (Optional) For example:: [ { "ip_address": "23.23.23.1", "mac_address": "fa:16:3e:c4:cd:3f" }, ... ] :param extra_dhcp_opts: Extra DHCP options. (Optional). For example:: [ { "opt_name": "opt name1", "opt_value": "value1" }, ... ] :param device_owner: The ID of the entity that uses this port. For example, a DHCP agent. (Optional) :param device_id: The ID of the device that uses this port. For example, a virtual server. (Optional) :param binding vnic_type: The type of the created port. (Optional) :param port_security_enabled: The security port state created on the network. (Optional) :param qos_policy_id: The ID of the QoS policy to apply for port. (Optional) :param project_id: The project in which to create the port. (Optional) :param description: Description of the port. (Optional) :param dns_domain: DNS domain relevant for the port. (Optional) :param dns_name: DNS name of the port. (Optional) :param numa_affinity_policy: the numa affinitiy policy. May be "None", "required", "preferred" or "legacy". (Optional) :param propagate_uplink_status: If the uplink status of the port should be propagated. (Optional) :param mac_learning_enabled: If mac learning should be enabled on the port. (Optional) :returns: The created network ``Port`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ kwargs['network_id'] = network_id return self.network.create_port(**kwargs) @_utils.valid_kwargs( 'name', 'admin_state_up', 'fixed_ips', 'security_groups', 'allowed_address_pairs', 'extra_dhcp_opts', 'device_owner', 'device_id', 'binding:vnic_type', 'binding:profile', 'port_security_enabled', 'qos_policy_id', 'binding:host_id', ) def update_port(self, name_or_id, **kwargs): """Update a port Note: to unset an attribute use None value. To leave an attribute untouched just omit it. :param name_or_id: name or ID of the port to update. (Required) :param name: A symbolic name for the port. (Optional) :param admin_state_up: The administrative status of the port, which is up (true) or down (false). (Optional) :param fixed_ips: List of ip_addresses and subnet_ids. (Optional) If you specify only a subnet ID, OpenStack Networking allocates an available IP from that subnet to the port. If you specify both a subnet ID and an IP address, OpenStack Networking tries to allocate the specified address to the port. For example:: [ { "ip_address": "10.29.29.13", "subnet_id": "a78484c4-c380-4b47-85aa-21c51a2d8cbd" }, ... ] :param security_groups: List of security group UUIDs. (Optional) :param allowed_address_pairs: Allowed address pairs list (Optional) For example:: [ { "ip_address": "23.23.23.1", "mac_address": "fa:16:3e:c4:cd:3f" }, ... ] :param extra_dhcp_opts: Extra DHCP options. (Optional). For example:: [ { "opt_name": "opt name1", "opt_value": "value1" }, ... ] :param device_owner: The ID of the entity that uses this port. For example, a DHCP agent. (Optional) :param device_id: The ID of the resource this port is attached to. :param binding vnic_type: The type of the created port. (Optional) :param port_security_enabled: The security port state created on the network. (Optional) :param qos_policy_id: The ID of the QoS policy to apply for port. :returns: The updated network ``Port`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ port = self.get_port(name_or_id=name_or_id) if port is None: raise exceptions.SDKException( f"failed to find port '{name_or_id}'" ) return self.network.update_port(port, **kwargs) def delete_port(self, name_or_id): """Delete a port :param name_or_id: ID or name of the port to delete. :returns: True if delete succeeded, False otherwise. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ port = self.network.find_port(name_or_id) if port is None: self.log.debug("Port %s not found for deleting", name_or_id) return False self.network.delete_port(port) return True def _get_port_ids(self, name_or_id_list, filters=None): """ Takes a list of port names or ids, retrieves ports and returns a list with port ids only. :param list[str] name_or_id_list: list of port names or ids :param dict filters: optional filters :raises: SDKException on multiple matches :raises: NotFoundException if a port is not found :return: list of port ids :rtype: list[str] """ ids_list = [] for name_or_id in name_or_id_list: port = self.get_port(name_or_id, filters) if not port: raise exceptions.NotFoundException( f'Port {name_or_id} not found' ) ids_list.append(port['id']) return ids_list def _build_external_gateway_info( self, ext_gateway_net_id, enable_snat, ext_fixed_ips ): info = {} if ext_gateway_net_id: info['network_id'] = ext_gateway_net_id # Only send enable_snat if it is explicitly set. if enable_snat is not None: info['enable_snat'] = enable_snat if ext_fixed_ips: info['external_fixed_ips'] = ext_fixed_ips if info: return info return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/_network_common.py0000664000175000017500000025500400000000000023104 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ipaddress import threading import time import warnings from openstack.cloud import _utils from openstack.cloud import exc from openstack.cloud import meta from openstack.cloud import openstackcloud from openstack import exceptions from openstack import proxy from openstack import utils from openstack import warnings as os_warnings class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin): """Shared networking functions used by Network and Compute classes.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._external_ipv4_names = self.config.get_external_ipv4_networks() self._internal_ipv4_names = self.config.get_internal_ipv4_networks() self._external_ipv6_names = self.config.get_external_ipv6_networks() self._internal_ipv6_names = self.config.get_internal_ipv6_networks() self._nat_destination = self.config.get_nat_destination() self._nat_source = self.config.get_nat_source() self._default_network = self.config.get_default_network() self._use_external_network = self.config.config.get( 'use_external_network', True ) self._use_internal_network = self.config.config.get( 'use_internal_network', True ) self._networks_lock = threading.Lock() self._reset_network_caches() self.private = self.config.config.get('private', False) self._floating_ip_source = self.config.config.get('floating_ip_source') if self._floating_ip_source: if self._floating_ip_source.lower() == 'none': self._floating_ip_source = None else: self._floating_ip_source = self._floating_ip_source.lower() self.secgroup_source = self.config.config['secgroup_source'] # networks def use_external_network(self): return self._use_external_network def use_internal_network(self): return self._use_internal_network def _reset_network_caches(self): # Variables to prevent us from going through the network finding # logic again if we've done it once. This is different from just # the cached value, since "None" is a valid value to find. with self._networks_lock: self._external_ipv4_networks = [] self._external_ipv4_floating_networks = [] self._internal_ipv4_networks = [] self._external_ipv6_networks = [] self._internal_ipv6_networks = [] self._nat_destination_network = None self._nat_source_network = None self._default_network_network = None self._network_list_stamp = False def _set_interesting_networks(self): external_ipv4_networks = [] external_ipv4_floating_networks = [] internal_ipv4_networks = [] external_ipv6_networks = [] internal_ipv6_networks = [] nat_destination = None nat_source = None default_network = None all_subnets = None # Filter locally because we have an or condition try: # TODO(mordred): Rackspace exposes neutron but it does not # work. I think that overriding what the service catalog # reports should be a thing os-client-config should handle # in a vendor profile - but for now it does not. That means # this search_networks can just totally fail. If it does # though, that's fine, clearly the neutron introspection is # not going to work. if self.has_service('network'): all_networks = list(self.network.networks()) else: all_networks = [] except exceptions.SDKException: self._network_list_stamp = True return for network in all_networks: # External IPv4 networks if ( network['name'] in self._external_ipv4_names or network['id'] in self._external_ipv4_names ): external_ipv4_networks.append(network) elif ( ( network.is_router_external or network.provider_physical_network ) and network['name'] not in self._internal_ipv4_names and network['id'] not in self._internal_ipv4_names ): external_ipv4_networks.append(network) # Internal networks if ( network['name'] in self._internal_ipv4_names or network['id'] in self._internal_ipv4_names ): internal_ipv4_networks.append(network) elif ( not network.is_router_external and not network.provider_physical_network and network['name'] not in self._external_ipv4_names and network['id'] not in self._external_ipv4_names ): internal_ipv4_networks.append(network) # External networks if ( network['name'] in self._external_ipv6_names or network['id'] in self._external_ipv6_names ): external_ipv6_networks.append(network) elif ( network.is_router_external and network['name'] not in self._internal_ipv6_names and network['id'] not in self._internal_ipv6_names ): external_ipv6_networks.append(network) # Internal networks if ( network['name'] in self._internal_ipv6_names or network['id'] in self._internal_ipv6_names ): internal_ipv6_networks.append(network) elif ( not network.is_router_external and network['name'] not in self._external_ipv6_names and network['id'] not in self._external_ipv6_names ): internal_ipv6_networks.append(network) # External Floating IPv4 networks if self._nat_source in (network['name'], network['id']): if nat_source: raise exceptions.SDKException( 'Multiple networks were found matching ' '{nat_net} which is the network configured ' 'to be the NAT source. Please check your ' 'cloud resources. It is probably a good idea ' 'to configure this network by ID rather than ' 'by name.'.format(nat_net=self._nat_source) ) external_ipv4_floating_networks.append(network) nat_source = network elif self._nat_source is None: if network.is_router_external: external_ipv4_floating_networks.append(network) nat_source = nat_source or network # NAT Destination if self._nat_destination in (network['name'], network['id']): if nat_destination: raise exceptions.SDKException( 'Multiple networks were found matching ' '{nat_net} which is the network configured ' 'to be the NAT destination. Please check your ' 'cloud resources. It is probably a good idea ' 'to configure this network by ID rather than ' 'by name.'.format(nat_net=self._nat_destination) ) nat_destination = network elif self._nat_destination is None: # TODO(mordred) need a config value for floating # ips for this cloud so that we can skip this # No configured nat destination, we have to figured # it out. if all_subnets is None: try: if self.has_service('network'): all_subnets = list(self.network.subnets()) else: all_subnets = [] except exceptions.SDKException: # Thanks Rackspace broken neutron all_subnets = [] for subnet in all_subnets: # TODO(mordred) trap for detecting more than # one network with a gateway_ip without a config if ( 'gateway_ip' in subnet and subnet['gateway_ip'] and network['id'] == subnet['network_id'] ): nat_destination = network break # Default network if self._default_network in (network['name'], network['id']): if default_network: raise exceptions.SDKException( 'Multiple networks were found matching ' '{default_net} which is the network ' 'configured to be the default interface ' 'network. Please check your cloud resources. ' 'It is probably a good idea ' 'to configure this network by ID rather than ' 'by name.'.format(default_net=self._default_network) ) default_network = network # Validate config vs. reality for net_name in self._external_ipv4_names: if net_name not in [net['name'] for net in external_ipv4_networks]: raise exceptions.SDKException( "Networks: {network} was provided for external IPv4 " "access and those networks could not be found".format( network=net_name ) ) for net_name in self._internal_ipv4_names: if net_name not in [net['name'] for net in internal_ipv4_networks]: raise exceptions.SDKException( "Networks: {network} was provided for internal IPv4 " "access and those networks could not be found".format( network=net_name ) ) for net_name in self._external_ipv6_names: if net_name not in [net['name'] for net in external_ipv6_networks]: raise exceptions.SDKException( "Networks: {network} was provided for external IPv6 " "access and those networks could not be found".format( network=net_name ) ) for net_name in self._internal_ipv6_names: if net_name not in [net['name'] for net in internal_ipv6_networks]: raise exceptions.SDKException( "Networks: {network} was provided for internal IPv6 " "access and those networks could not be found".format( network=net_name ) ) if self._nat_destination and not nat_destination: raise exceptions.SDKException( 'Network {network} was configured to be the ' 'destination for inbound NAT but it could not be ' 'found'.format(network=self._nat_destination) ) if self._nat_source and not nat_source: raise exceptions.SDKException( 'Network {network} was configured to be the ' 'source for inbound NAT but it could not be ' 'found'.format(network=self._nat_source) ) if self._default_network and not default_network: raise exceptions.SDKException( 'Network {network} was configured to be the ' 'default network interface but it could not be ' 'found'.format(network=self._default_network) ) self._external_ipv4_networks = external_ipv4_networks self._external_ipv4_floating_networks = external_ipv4_floating_networks self._internal_ipv4_networks = internal_ipv4_networks self._external_ipv6_networks = external_ipv6_networks self._internal_ipv6_networks = internal_ipv6_networks self._nat_destination_network = nat_destination self._nat_source_network = nat_source self._default_network_network = default_network def _find_interesting_networks(self): if self._networks_lock.acquire(): try: if self._network_list_stamp: return if ( not self._use_external_network and not self._use_internal_network ): # Both have been flagged as skip - don't do a list return if not self.has_service('network'): return self._set_interesting_networks() self._network_list_stamp = True finally: self._networks_lock.release() def get_nat_destination(self): """Return the network that is configured to be the NAT destination. :returns: A network ``Network`` object if one is found """ self._find_interesting_networks() return self._nat_destination_network def get_nat_source(self): """Return the network that is configured to be the NAT destination. :returns: A network ``Network`` object if one is found """ self._find_interesting_networks() return self._nat_source_network def get_default_network(self): """Return the network that is configured to be the default interface. :returns: A network ``Network`` object if one is found """ self._find_interesting_networks() return self._default_network_network def get_external_networks(self): """Return the networks that are configured to route northbound. This should be avoided in favor of the specific ipv4/ipv6 method, but is here for backwards compatibility. :returns: A list of network ``Network`` objects if any are found """ self._find_interesting_networks() return list(self._external_ipv4_networks) + list( self._external_ipv6_networks ) def get_internal_networks(self): """Return the networks that are configured to not route northbound. This should be avoided in favor of the specific ipv4/ipv6 method, but is here for backwards compatibility. :returns: A list of network ``Network`` objects if any are found """ self._find_interesting_networks() return list(self._internal_ipv4_networks) + list( self._internal_ipv6_networks ) def get_external_ipv4_networks(self): """Return the networks that are configured to route northbound. :returns: A list of network ``Network`` objects if any are found """ self._find_interesting_networks() return self._external_ipv4_networks def get_external_ipv4_floating_networks(self): """Return the networks that are configured to route northbound. :returns: A list of network ``Network`` objects if any are found """ self._find_interesting_networks() return self._external_ipv4_floating_networks def get_internal_ipv4_networks(self): """Return the networks that are configured to not route northbound. :returns: A list of network ``Network`` objects if any are found """ self._find_interesting_networks() return self._internal_ipv4_networks def get_external_ipv6_networks(self): """Return the networks that are configured to route northbound. :returns: A list of network ``Network`` objects if any are found """ self._find_interesting_networks() return self._external_ipv6_networks def get_internal_ipv6_networks(self): """Return the networks that are configured to not route northbound. :returns: A list of network ``Network`` objects if any are found """ self._find_interesting_networks() return self._internal_ipv6_networks # floating IPs def search_floating_ip_pools(self, name=None, filters=None): pools = self.list_floating_ip_pools() return _utils._filter_list(pools, name, filters) # With Neutron, there are some cases in which full server side filtering is # not possible (e.g. nested attributes or list of objects) so we also need # to use the client-side filtering # The same goes for all neutron-related search/get methods! def search_floating_ips(self, id=None, filters=None): # `filters` could be a jmespath expression which Neutron server doesn't # understand, obviously. warnings.warn( "search_floating_ips is deprecated. Use search_resource instead.", os_warnings.OpenStackDeprecationWarning, ) if self._use_neutron_floating() and isinstance(filters, dict): return list(self.network.ips(**filters)) else: floating_ips = self.list_floating_ips() return _utils._filter_list(floating_ips, id, filters) def _neutron_list_floating_ips(self, filters=None): if not filters: filters = {} data = list(self.network.ips(**filters)) return data def _nova_list_floating_ips(self): try: data = proxy._json_response(self.compute.get('/os-floating-ips')) except exceptions.NotFoundException: return [] return self._get_and_munchify('floating_ips', data) def get_floating_ip(self, id, filters=None): """Get a floating IP by ID :param id: ID of the floating IP. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A floating IP ``openstack.network.v2.floating_ip.FloatingIP`` or None if no matching floating IP is found. """ return _utils._get_entity(self, 'floating_ip', id, filters) def list_floating_ips(self, filters=None): """List all available floating IPs. :param filters: (optional) dict of filter conditions to push down :returns: A list of floating IP ``openstack.network.v2.floating_ip.FloatingIP``. """ if not filters: filters = {} if self._use_neutron_floating(): try: return self._neutron_list_floating_ips(filters) except exceptions.NotFoundException as e: # Nova-network don't support server-side floating ips # filtering, so it's safer to return an empty list than # to fallback to Nova which may return more results that # expected. if filters: self.log.error( "Neutron returned NotFound for floating IPs, which " "means this cloud doesn't have neutron floating ips. " "openstacksdk can't fallback to trying Nova since " "nova doesn't support server-side filtering when " "listing floating ips and filters were given. " "If you do not think openstacksdk should be " "attempting to list floating IPs on neutron, it is " "possible to control the behavior by setting " "floating_ip_source to 'nova' or None for cloud " "%(cloud)r in 'clouds.yaml'.", { 'cloud': self.name, }, ) # We can't fallback to nova because we push-down filters. # We got a 404 which means neutron doesn't exist. If the # user return [] self.log.debug( "Something went wrong talking to neutron API: " "'%(msg)s'. Trying with Nova.", {'msg': str(e)}, ) # Fall-through, trying with Nova else: if filters: raise ValueError( "nova-network doesn't support server-side floating IPs " "filtering. Use the 'search_floating_ips' method instead" ) floating_ips = self._nova_list_floating_ips() return self._normalize_floating_ips(floating_ips) def list_floating_ip_pools(self): """List all available floating IP pools. NOTE: This function supports the nova-net view of the world. nova-net has been deprecated, so it's highly recommended to switch to using neutron. `get_external_ipv4_floating_networks` is what you should almost certainly be using. :returns: A list of floating IP pool objects """ data = proxy._json_response( self.compute.get('os-floating-ip-pools'), error_message="Error fetching floating IP pool list", ) pools = self._get_and_munchify('floating_ip_pools', data) return [{'name': p['name']} for p in pools] def get_floating_ip_by_id(self, id): """Get a floating ip by ID :param id: ID of the floating ip. :returns: A floating ip `:class:`~openstack.network.v2.floating_ip.FloatingIP`. """ error_message = f"Error getting floating ip with ID {id}" if self._use_neutron_floating(): fip = self.network.get_ip(id) return fip else: data = proxy._json_response( self.compute.get(f'/os-floating-ips/{id}'), error_message=error_message, ) return self._normalize_floating_ip( self._get_and_munchify('floating_ip', data) ) def _neutron_available_floating_ips( self, network=None, project_id=None, server=None ): """Get a floating IP from a network. Return a list of available floating IPs or allocate a new one and return it in a list of 1 element. :param network: A single network name or ID, or a list of them. :param server: (server) Server the Floating IP is for :returns: a list of floating IP addresses. :raises: :class:`~openstack.exceptions.BadRequestException` if an external network that meets the specified criteria cannot be found. """ if project_id is None: # Make sure we are only listing floatingIPs allocated the current # tenant. This is the default behaviour of Nova project_id = self.current_project_id if network: if isinstance(network, str): network = [network] # Use given list to get first matching external network floating_network_id = None for net in network: for ext_net in self.get_external_ipv4_floating_networks(): if net in (ext_net['name'], ext_net['id']): floating_network_id = ext_net['id'] break if floating_network_id: break if floating_network_id is None: raise exceptions.NotFoundException( f"unable to find external network {network}" ) else: floating_network_id = self._get_floating_network_id() filters = { 'port_id': None, 'floating_network_id': floating_network_id, 'project_id': project_id, } floating_ips = self.list_floating_ips() available_ips = _utils._filter_list( floating_ips, name_or_id=None, filters=filters ) if available_ips: return available_ips # No available IP found or we didn't try # allocate a new Floating IP f_ip = self._neutron_create_floating_ip( network_id=floating_network_id, server=server ) return [f_ip] def _nova_available_floating_ips(self, pool=None): """Get available floating IPs from a floating IP pool. Return a list of available floating IPs or allocate a new one and return it in a list of 1 element. :param pool: Nova floating IP pool name. :returns: a list of floating IP addresses. :raises: :class:`~openstack.exceptions.BadRequestException` if a floating IP pool is not specified and cannot be found. """ with _utils.openstacksdk_exceptions( f"Unable to create floating IP in pool {pool}" ): if pool is None: pools = self.list_floating_ip_pools() if not pools: raise exceptions.NotFoundException( "unable to find a floating ip pool" ) pool = pools[0]['name'] filters = {'instance_id': None, 'pool': pool} floating_ips = self._nova_list_floating_ips() available_ips = _utils._filter_list( floating_ips, name_or_id=None, filters=filters ) if available_ips: return available_ips # No available IP found or we did not try. # Allocate a new Floating IP f_ip = self._nova_create_floating_ip(pool=pool) return [f_ip] def _find_floating_network_by_router(self): """Find the network providing floating ips by looking at routers.""" for router in self.network.routers(): if router['admin_state_up']: network_id = router.get('external_gateway_info', {}).get( 'network_id' ) if network_id: return network_id def available_floating_ip(self, network=None, server=None): """Get a floating IP from a network or a pool. Return the first available floating IP or allocate a new one. :param network: Name or ID of the network. :param server: Server the IP is for if known :returns: a (normalized) structure with a floating IP address description. """ if self._use_neutron_floating(): try: f_ips = self._neutron_available_floating_ips( network=network, server=server ) return f_ips[0] except exceptions.NotFoundException as e: self.log.debug( "Something went wrong talking to neutron API: " "'%(msg)s'. Trying with Nova.", {'msg': str(e)}, ) # Fall-through, trying with Nova f_ips = self._normalize_floating_ips( self._nova_available_floating_ips(pool=network) ) return f_ips[0] def _get_floating_network_id(self): # Get first existing external IPv4 network networks = self.get_external_ipv4_floating_networks() if networks: floating_network_id = networks[0]['id'] else: floating_network = self._find_floating_network_by_router() if floating_network: floating_network_id = floating_network else: raise exceptions.NotFoundException( "unable to find an external network" ) return floating_network_id def create_floating_ip( self, network=None, server=None, fixed_address=None, nat_destination=None, port=None, wait=False, timeout=60, ): """Allocate a new floating IP from a network or a pool. :param network: Name or ID of the network that the floating IP should come from. :param server: (optional) Server dict for the server to create the IP for and to which it should be attached. :param fixed_address: (optional) Fixed IP to attach the floating ip to. :param nat_destination: (optional) Name or ID of the network that the fixed IP to attach the floating IP to should be on. :param port: (optional) The port ID that the floating IP should be attached to. Specifying a port conflicts with specifying a server, fixed_address or nat_destination. :param wait: (optional) Whether to wait for the IP to be active. Defaults to False. Only applies if a server is provided. :param timeout: (optional) How long to wait for the IP to be active. Defaults to 60. Only applies if a server is provided. :returns: a floating IP address :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if self._use_neutron_floating(): try: return self._neutron_create_floating_ip( network_name_or_id=network, server=server, fixed_address=fixed_address, nat_destination=nat_destination, port=port, wait=wait, timeout=timeout, ) except exceptions.NotFoundException as e: self.log.debug( "Something went wrong talking to neutron API: " "'%(msg)s'. Trying with Nova.", {'msg': str(e)}, ) # Fall-through, trying with Nova if port: raise exceptions.SDKException( "This cloud uses nova-network which does not support " "arbitrary floating-ip/port mappings. Please nudge " "your cloud provider to upgrade the networking stack " "to neutron, or alternately provide the server, " "fixed_address and nat_destination arguments as appropriate" ) # Else, we are using Nova network f_ips = self._normalize_floating_ips( [self._nova_create_floating_ip(pool=network)] ) return f_ips[0] def _submit_create_fip(self, kwargs): # Split into a method to aid in test mocking return self.network.create_ip(**kwargs) def _neutron_create_floating_ip( self, network_name_or_id=None, server=None, fixed_address=None, nat_destination=None, port=None, wait=False, timeout=60, network_id=None, ): if not network_id: if network_name_or_id: try: network = self.network.find_network(network_name_or_id) except exceptions.NotFoundException: raise exceptions.NotFoundException( "unable to find network for floating ips with ID " "{}".format(network_name_or_id) ) network_id = network['id'] else: network_id = self._get_floating_network_id() kwargs = { 'floating_network_id': network_id, } if not port: if server: (port_obj, fixed_ip_address) = self._nat_destination_port( server, fixed_address=fixed_address, nat_destination=nat_destination, ) if port_obj: port = port_obj['id'] if fixed_ip_address: kwargs['fixed_ip_address'] = fixed_ip_address if port: kwargs['port_id'] = port fip = self._submit_create_fip(kwargs) fip_id = fip['id'] if port: # The FIP is only going to become active in this context # when we've attached it to something, which only occurs # if we've provided a port as a parameter if wait: try: for count in utils.iterate_timeout( timeout, "Timeout waiting for the floating IP to be ACTIVE", wait=min(5, timeout), ): fip = self.get_floating_ip(fip_id) if fip and fip['status'] == 'ACTIVE': break except exceptions.ResourceTimeout: self.log.error( "Timed out on floating ip %(fip)s becoming active. " "Deleting", {'fip': fip_id}, ) try: self.delete_floating_ip(fip_id) except Exception as e: self.log.error( "FIP LEAK: Attempted to delete floating ip " "%(fip)s but received %(exc)s exception: " "%(err)s", {'fip': fip_id, 'exc': e.__class__, 'err': str(e)}, ) raise if fip['port_id'] != port: if server: raise exceptions.SDKException( "Attempted to create FIP on port {port} for server " "{server} but FIP has port {port_id}".format( port=port, port_id=fip['port_id'], server=server['id'], ) ) else: raise exceptions.SDKException( "Attempted to create FIP on port {port} " "but something went wrong".format(port=port) ) return fip def _nova_create_floating_ip(self, pool=None): with _utils.openstacksdk_exceptions( f"Unable to create floating IP in pool {pool}" ): if pool is None: pools = self.list_floating_ip_pools() if not pools: raise exceptions.NotFoundException( "unable to find a floating ip pool" ) pool = pools[0]['name'] data = proxy._json_response( self.compute.post('/os-floating-ips', json=dict(pool=pool)) ) pool_ip = self._get_and_munchify('floating_ip', data) # TODO(mordred) Remove this - it's just for compat data = proxy._json_response( self.compute.get( '/os-floating-ips/{id}'.format(id=pool_ip['id']) ) ) return self._get_and_munchify('floating_ip', data) def delete_floating_ip(self, floating_ip_id, retry=1): """Deallocate a floating IP from a project. :param floating_ip_id: a floating IP address ID. :param retry: number of times to retry. Optional, defaults to 1, which is in addition to the initial delete call. A value of 0 will also cause no checking of results to occur. :returns: True if the IP address has been deleted, False if the IP address was not found. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ for count in range(0, max(0, retry) + 1): result = self._delete_floating_ip(floating_ip_id) if (retry == 0) or not result: return result # neutron sometimes returns success when deleting a floating # ip. That's awesome. SO - verify that the delete actually # worked. Some clouds will set the status to DOWN rather than # deleting the IP immediately. This is, of course, a bit absurd. f_ip = self.get_floating_ip(id=floating_ip_id) if not f_ip or f_ip['status'] == 'DOWN': return True raise exceptions.SDKException( "Attempted to delete Floating IP {ip} with ID {id} a total of " "{retry} times. Although the cloud did not indicate any errors " "the floating IP is still in existence. Aborting further " "operations.".format( id=floating_ip_id, ip=f_ip['floating_ip_address'], retry=retry + 1, ) ) def _delete_floating_ip(self, floating_ip_id): if self._use_neutron_floating(): try: return self._neutron_delete_floating_ip(floating_ip_id) except exceptions.NotFoundException as e: self.log.debug( "Something went wrong talking to neutron API: " "'%(msg)s'. Trying with Nova.", {'msg': str(e)}, ) return self._nova_delete_floating_ip(floating_ip_id) def _neutron_delete_floating_ip(self, floating_ip_id): try: self.network.delete_ip(floating_ip_id, ignore_missing=False) except exceptions.NotFoundException: return False return True def _nova_delete_floating_ip(self, floating_ip_id): try: proxy._json_response( self.compute.delete(f'/os-floating-ips/{floating_ip_id}'), error_message='Unable to delete floating IP {fip_id}'.format( fip_id=floating_ip_id ), ) except exceptions.NotFoundException: return False return True def delete_unattached_floating_ips(self, retry=1): """Safely delete unattached floating ips. If the cloud can safely purge any unattached floating ips without race conditions, do so. Safely here means a specific thing. It means that you are not running this while another process that might do a two step create/attach is running. You can safely run this method while another process is creating servers and attaching floating IPs to them if either that process is using add_auto_ip from shade, or is creating the floating IPs by passing in a server to the create_floating_ip call. :param retry: number of times to retry. Optional, defaults to 1, which is in addition to the initial delete call. A value of 0 will also cause no checking of results to occur. :returns: Number of Floating IPs deleted, False if none :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ processed = [] if self._use_neutron_floating(): for ip in self.list_floating_ips(): if not bool(ip.port_id): processed.append( self.delete_floating_ip( floating_ip_id=ip['id'], retry=retry ) ) return len(processed) if all(processed) else False def _attach_ip_to_server( self, server, floating_ip, fixed_address=None, wait=False, timeout=60, skip_attach=False, nat_destination=None, ): """Attach a floating IP to a server. :param server: Server dict :param floating_ip: Floating IP dict to attach :param fixed_address: (optional) fixed address to which attach the floating IP to. :param wait: (optional) Wait for the address to appear as assigned to the server. Defaults to False. :param timeout: (optional) Seconds to wait, defaults to 60. See the ``wait`` parameter. :param skip_attach: (optional) Skip the actual attach and just do the wait. Defaults to False. :param nat_destination: The fixed network the server's port for the FIP to attach to will come from. :returns: The server ``openstack.compute.v2.server.Server`` :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ # Short circuit if we're asking to attach an IP that's already # attached ext_ip = meta.get_server_ip(server, ext_tag='floating', public=True) if not ext_ip and floating_ip['port_id']: # When we came here from reuse_fip and created FIP it might be # already attached, but the server info might be also # old to check whether it belongs to us now, thus refresh # the server data and try again. There are some clouds, which # explicitely forbids FIP assign call if it is already assigned. server = self.compute.get_server(server['id']) ext_ip = meta.get_server_ip( server, ext_tag='floating', public=True ) if ext_ip == floating_ip['floating_ip_address']: return server if self._use_neutron_floating(): if not skip_attach: try: self._neutron_attach_ip_to_server( server=server, floating_ip=floating_ip, fixed_address=fixed_address, nat_destination=nat_destination, ) except exceptions.NotFoundException as e: self.log.debug( "Something went wrong talking to neutron API: " "'%(msg)s'. Trying with Nova.", {'msg': str(e)}, ) # Fall-through, trying with Nova else: # Nova network self._nova_attach_ip_to_server( server_id=server['id'], floating_ip_id=floating_ip['id'], fixed_address=fixed_address, ) if wait: # Wait for the address to be assigned to the server server_id = server['id'] for _ in utils.iterate_timeout( timeout, "Timeout waiting for the floating IP to be attached.", wait=min(5, timeout), ): server = self.compute.get_server(server_id) ext_ip = meta.get_server_ip( server, ext_tag='floating', public=True ) if ext_ip == floating_ip['floating_ip_address']: return server return server def _neutron_attach_ip_to_server( self, server, floating_ip, fixed_address=None, nat_destination=None ): # Find an available port (port, fixed_address) = self._nat_destination_port( server, fixed_address=fixed_address, nat_destination=nat_destination, ) if not port: raise exceptions.SDKException( "unable to find a port for server {}".format(server['id']) ) floating_ip_args = {'port_id': port['id']} if fixed_address is not None: floating_ip_args['fixed_ip_address'] = fixed_address return self.network.update_ip(floating_ip, **floating_ip_args) def _nova_attach_ip_to_server( self, server_id, floating_ip_id, fixed_address=None ): f_ip = self.get_floating_ip(id=floating_ip_id) if f_ip is None: raise exceptions.SDKException( f"unable to find floating IP {floating_ip_id}" ) error_message = "Error attaching IP {ip} to instance {id}".format( ip=floating_ip_id, id=server_id ) body = {'address': f_ip['floating_ip_address']} if fixed_address: body['fixed_address'] = fixed_address return proxy._json_response( self.compute.post( f'/servers/{server_id}/action', json=dict(addFloatingIp=body), ), error_message=error_message, ) def detach_ip_from_server(self, server_id, floating_ip_id): """Detach a floating IP from a server. :param server_id: ID of a server. :param floating_ip_id: Id of the floating IP to detach. :returns: True if the IP has been detached, or False if the IP wasn't attached to any server. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if self._use_neutron_floating(): try: return self._neutron_detach_ip_from_server( server_id=server_id, floating_ip_id=floating_ip_id ) except exceptions.NotFoundException as e: self.log.debug( "Something went wrong talking to neutron API: " "'%(msg)s'. Trying with Nova.", {'msg': str(e)}, ) # Fall-through, trying with Nova # Nova network self._nova_detach_ip_from_server( server_id=server_id, floating_ip_id=floating_ip_id ) def _neutron_detach_ip_from_server(self, server_id, floating_ip_id): f_ip = self.get_floating_ip(id=floating_ip_id) if f_ip is None or not bool(f_ip.port_id): return False try: self.network.update_ip(floating_ip_id, port_id=None) except exceptions.SDKException: raise exceptions.SDKException( "Error detaching IP {ip} from " "server {server_id}".format( ip=floating_ip_id, server_id=server_id ) ) return True def _nova_detach_ip_from_server(self, server_id, floating_ip_id): f_ip = self.get_floating_ip(id=floating_ip_id) if f_ip is None: raise exceptions.SDKException( f"unable to find floating IP {floating_ip_id}" ) error_message = "Error detaching IP {ip} from instance {id}".format( ip=floating_ip_id, id=server_id ) return proxy._json_response( self.compute.post( f'/servers/{server_id}/action', json=dict( removeFloatingIp=dict(address=f_ip['floating_ip_address']) ), ), error_message=error_message, ) return True def _add_ip_from_pool( self, server, network, fixed_address=None, reuse=True, wait=False, timeout=60, nat_destination=None, ): """Add a floating IP to a server from a given pool This method reuses available IPs, when possible, or allocate new IPs to the current tenant. The floating IP is attached to the given fixed address or to the first server port/fixed address :param server: Server dict :param network: Name or ID of the network. :param fixed_address: a fixed address :param reuse: Try to reuse existing ips. Defaults to True. :param wait: (optional) Wait for the address to appear as assigned to the server. Defaults to False. :param timeout: (optional) Seconds to wait, defaults to 60. See the ``wait`` parameter. :param nat_destination: (optional) the name of the network of the port to associate with the floating ip. :returns: the updated server ``openstack.compute.v2.server.Server`` """ if reuse: f_ip = self.available_floating_ip(network=network) else: start_time = time.time() f_ip = self.create_floating_ip( server=server, network=network, nat_destination=nat_destination, fixed_address=fixed_address, wait=wait, timeout=timeout, ) timeout = timeout - (time.time() - start_time) server = self.compute.get_server(server.id) # We run attach as a second call rather than in the create call # because there are code flows where we will not have an attached # FIP yet. However, even if it was attached in the create, we run # the attach function below to get back the server dict refreshed # with the FIP information. return self._attach_ip_to_server( server=server, floating_ip=f_ip, fixed_address=fixed_address, wait=wait, timeout=timeout, nat_destination=nat_destination, ) def add_ip_list( self, server, ips, wait=False, timeout=60, fixed_address=None, nat_destination=None, ): """Attach a list of IPs to a server. :param server: a server object :param ips: list of floating IP addresses or a single address :param wait: (optional) Wait for the address to appear as assigned to the server. Defaults to False. :param timeout: (optional) Seconds to wait, defaults to 60. See the ``wait`` parameter. :param fixed_address: (optional) Fixed address of the server to attach the IP to :param nat_destination: (optional) Name or ID of the network that the fixed IP to attach the floating IP should be on :returns: The updated server ``openstack.compute.v2.server.Server`` :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ if type(ips) is list: ips = [ips] for ip in ips: f_ip = self.get_floating_ip( id=None, filters={'floating_ip_address': ip} ) server = self._attach_ip_to_server( server=server, floating_ip=f_ip, wait=wait, timeout=timeout, fixed_address=fixed_address, nat_destination=nat_destination, ) return server def add_auto_ip(self, server, wait=False, timeout=60, reuse=True): """Add a floating IP to a server. This method is intended for basic usage. For advanced network architecture (e.g. multiple external networks or servers with multiple interfaces), use other floating IP methods. This method can reuse available IPs, or allocate new IPs to the current project. :param server: a server dictionary. :param reuse: Whether or not to attempt to reuse IPs, defaults to True. :param wait: (optional) Wait for the address to appear as assigned to the server. Defaults to False. :param timeout: (optional) Seconds to wait, defaults to 60. See the ``wait`` parameter. :param reuse: Try to reuse existing ips. Defaults to True. :returns: Floating IP address attached to server. """ server = self._add_auto_ip( server, wait=wait, timeout=timeout, reuse=reuse ) return server['interface_ip'] or None def _add_auto_ip(self, server, wait=False, timeout=60, reuse=True): skip_attach = False created = False if reuse: f_ip = self.available_floating_ip(server=server) else: start_time = time.time() f_ip = self.create_floating_ip( server=server, wait=wait, timeout=timeout ) timeout = timeout - (time.time() - start_time) if server: # This gets passed in for both nova and neutron # but is only meaningful for the neutron logic branch skip_attach = True created = True try: # We run attach as a second call rather than in the create call # because there are code flows where we will not have an attached # FIP yet. However, even if it was attached in the create, we run # the attach function below to get back the server dict refreshed # with the FIP information. return self._attach_ip_to_server( server=server, floating_ip=f_ip, wait=wait, timeout=timeout, skip_attach=skip_attach, ) except exceptions.ResourceTimeout: if self._use_neutron_floating() and created: # We are here because we created an IP on the port # It failed. Delete so as not to leak an unmanaged # resource self.log.error( "Timeout waiting for floating IP to become " "active. Floating IP %(ip)s:%(id)s was created for " "server %(server)s but is being deleted due to " "activation failure.", { 'ip': f_ip['floating_ip_address'], 'id': f_ip['id'], 'server': server['id'], }, ) try: self.delete_floating_ip(f_ip['id']) except Exception as e: self.log.error( "FIP LEAK: Attempted to delete floating ip " "%(fip)s but received %(exc)s exception: %(err)s", {'fip': f_ip['id'], 'exc': e.__class__, 'err': str(e)}, ) raise e raise def add_ips_to_server( self, server, auto_ip=True, ips=None, ip_pool=None, wait=False, timeout=60, reuse=True, fixed_address=None, nat_destination=None, ): if ip_pool: server = self._add_ip_from_pool( server, ip_pool, reuse=reuse, wait=wait, timeout=timeout, fixed_address=fixed_address, nat_destination=nat_destination, ) elif ips: server = self.add_ip_list( server, ips, wait=wait, timeout=timeout, fixed_address=fixed_address, nat_destination=nat_destination, ) elif auto_ip: if self._needs_floating_ip(server, nat_destination): server = self._add_auto_ip( server, wait=wait, timeout=timeout, reuse=reuse ) return server def _needs_floating_ip(self, server, nat_destination): """Figure out if auto_ip should add a floating ip to this server. If the server has a floating ip it does not need another one. If the server does not have a fixed ip address it does not need a floating ip. If self.private then the server does not need a floating ip. If the cloud runs nova, and the server has a private address and not a public address, then the server needs a floating ip. If the server has a fixed ip address and no floating ip address and the cloud has a network from which floating IPs come that is connected via a router to the network from which the fixed ip address came, then the server needs a floating ip. If the server has a fixed ip address and no floating ip address and the cloud does not have a network from which floating ips come, or it has one but that network is not connected to the network from which the server's fixed ip address came via a router, then the server does not need a floating ip. """ if not self._has_floating_ips(): return False if server['addresses'] is None: # fetch missing server details, e.g. because # meta.add_server_interfaces() was not called server = self.compute.get_server(server) if server['public_v4'] or any( [ any( [ address['OS-EXT-IPS:type'] == 'floating' for address in addresses ] ) for addresses in (server['addresses'] or {}).values() ] ): return False if not server['private_v4'] and not any( [ any( [ address['OS-EXT-IPS:type'] == 'fixed' for address in addresses ] ) for addresses in (server['addresses'] or {}).values() ] ): return False if self.private: return False if not self.has_service('network'): return True # No floating ip network - no FIPs try: self._get_floating_network_id() except exceptions.SDKException: return False (port_obj, fixed_ip_address) = self._nat_destination_port( server, nat_destination=nat_destination ) if not port_obj or not fixed_ip_address: return False return True def _nat_destination_port( self, server, fixed_address=None, nat_destination=None ): """Returns server port that is on a nat_destination network Find a port attached to the server which is on a network which has a subnet which can be the destination of NAT. Such a network is referred to in shade as a "nat_destination" network. So this then is a function which returns a port on such a network that is associated with the given server. :param server: Server dict. :param fixed_address: Fixed ip address of the port :param nat_destination: Name or ID of the network of the port. """ ports = list(self.network.ports(device_id=server['id'])) if not ports: return (None, None) port = None if not fixed_address: if len(ports) > 1: if nat_destination: nat_network = self.network.find_network(nat_destination) if not nat_network: raise exceptions.SDKException( 'NAT Destination {nat_destination} was configured' ' but not found on the cloud. Please check your' ' config and your cloud and try again.'.format( nat_destination=nat_destination ) ) else: nat_network = self.get_nat_destination() if not nat_network: raise exceptions.SDKException( 'Multiple ports were found for server {server}' ' but none of the networks are a valid NAT' ' destination, so it is impossible to add a' ' floating IP. If you have a network that is a valid' ' destination for NAT and we could not find it,' ' please file a bug. But also configure the' ' nat_destination property of the networks list in' ' your clouds.yaml file. If you do not have a' ' clouds.yaml file, please make one - your setup' ' is complicated.'.format(server=server['id']) ) maybe_ports = [] for maybe_port in ports: if maybe_port['network_id'] == nat_network['id']: maybe_ports.append(maybe_port) if not maybe_ports: raise exceptions.SDKException( 'No port on server {server} was found matching' ' your NAT destination network {dest}. Please ' ' check your config'.format( server=server['id'], dest=nat_network['name'] ) ) ports = maybe_ports # Select the most recent available IPv4 address # To do this, sort the ports in reverse order by the created_at # field which is a string containing an ISO DateTime (which # thankfully sort properly) This way the most recent port created, # if there are more than one, will be the arbitrary port we # select. for port in sorted( ports, key=lambda p: p.get('created_at', 0), reverse=True ): for address in port.get('fixed_ips', list()): try: ip = ipaddress.ip_address(address['ip_address']) except Exception: continue if ip.version == 4: fixed_address = address['ip_address'] return port, fixed_address raise exceptions.SDKException( "unable to find a free fixed IPv4 address for server " "{}".format(server['id']) ) # unfortunately a port can have more than one fixed IP: # we can't use the search_ports filtering for fixed_address as # they are contained in a list. e.g. # # "fixed_ips": [ # { # "subnet_id": "008ba151-0b8c-4a67-98b5-0d2b87666062", # "ip_address": "172.24.4.2" # } # ] # # Search fixed_address for p in ports: for fixed_ip in p['fixed_ips']: if fixed_address == fixed_ip['ip_address']: return (p, fixed_address) return (None, None) def _has_floating_ips(self): if not self._floating_ip_source: return False else: return self._floating_ip_source in ('nova', 'neutron') def _use_neutron_floating(self): return ( self.has_service('network') and self._floating_ip_source == 'neutron' ) def _normalize_floating_ips(self, ips): """Normalize the structure of floating IPs Unfortunately, not all the Neutron floating_ip attributes are available with Nova and not all Nova floating_ip attributes are available with Neutron. This function extract attributes that are common to Nova and Neutron floating IP resource. If the whole structure is needed inside openstacksdk there are private methods that returns "original" objects (e.g. _neutron_allocate_floating_ip) :param list ips: A list of Neutron floating IPs. :returns: A list of normalized dicts with the following attributes:: [ { "id": "this-is-a-floating-ip-id", "fixed_ip_address": "192.0.2.10", "floating_ip_address": "198.51.100.10", "network": "this-is-a-net-or-pool-id", "attached": True, "status": "ACTIVE" }, ... ] """ return [self._normalize_floating_ip(ip) for ip in ips] def _normalize_floating_ip(self, ip): # Copy incoming floating ip because of shared dicts in unittests # Only import munch when we really need it location = self._get_current_location(project_id=ip.get('owner')) # This copy is to keep things from getting epically weird in tests ip = ip.copy() ret = utils.Munch(location=location) fixed_ip_address = ip.pop('fixed_ip_address', ip.pop('fixed_ip', None)) floating_ip_address = ip.pop('floating_ip_address', ip.pop('ip', None)) network_id = ip.pop( 'floating_network_id', ip.pop('network', ip.pop('pool', None)) ) project_id = ip.pop('tenant_id', '') project_id = ip.pop('project_id', project_id) instance_id = ip.pop('instance_id', None) router_id = ip.pop('router_id', None) id = ip.pop('id') port_id = ip.pop('port_id', None) created_at = ip.pop('created_at', None) updated_at = ip.pop('updated_at', None) # Note - description may not always be on the underlying cloud. # Normalizing it here is easy - what do we do when people want to # set a description? description = ip.pop('description', '') revision_number = ip.pop('revision_number', None) if self._use_neutron_floating(): attached = bool(port_id) status = ip.pop('status', 'UNKNOWN') else: attached = bool(instance_id) # In neutron's terms, Nova floating IPs are always ACTIVE status = 'ACTIVE' ret = utils.Munch( attached=attached, fixed_ip_address=fixed_ip_address, floating_ip_address=floating_ip_address, id=id, location=self._get_current_location(project_id=project_id), network=network_id, port=port_id, router=router_id, status=status, created_at=created_at, updated_at=updated_at, description=description, revision_number=revision_number, properties=ip.copy(), ) # Backwards compat if not self.strict_mode: ret['port_id'] = port_id ret['router_id'] = router_id ret['project_id'] = project_id ret['tenant_id'] = project_id ret['floating_network_id'] = network_id for key, val in ret['properties'].items(): ret.setdefault(key, val) return ret # security groups def search_security_groups(self, name_or_id=None, filters=None): # `filters` could be a dict or a jmespath (str) groups = self.list_security_groups( filters=filters if isinstance(filters, dict) else None ) return _utils._filter_list(groups, name_or_id, filters) def list_security_groups(self, filters=None): """List all available security groups. :param filters: (optional) dict of filter conditions to push down :returns: A list of security group ``openstack.network.v2.security_group.SecurityGroup``. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) if not filters: filters = {} data = [] # Handle neutron security groups if self._use_neutron_secgroups(): # pass filters dict to the list to filter as much as possible on # the server side return list(self.network.security_groups(**filters)) # Handle nova security groups else: data = proxy._json_response( self.compute.get('/os-security-groups', params=filters) ) return self._normalize_secgroups( self._get_and_munchify('security_groups', data) ) def get_security_group(self, name_or_id, filters=None): """Get a security group by name or ID. :param name_or_id: Name or ID of the security group. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A security group ``openstack.network.v2.security_group.SecurityGroup`` or None if no matching security group is found. """ return _utils._get_entity(self, 'security_group', name_or_id, filters) def get_security_group_by_id(self, id): """Get a security group by ID :param id: ID of the security group. :returns: A security group ``openstack.network.v2.security_group.SecurityGroup``. """ if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) error_message = f"Error getting security group with ID {id}" if self._use_neutron_secgroups(): return self.network.get_security_group(id) else: data = proxy._json_response( self.compute.get(f'/os-security-groups/{id}'), error_message=error_message, ) return self._normalize_secgroup( self._get_and_munchify('security_group', data) ) def create_security_group( self, name, description, project_id=None, stateful=None ): """Create a new security group :param string name: A name for the security group. :param string description: Describes the security group. :param string project_id: Specify the project ID this security group will be created on (admin-only). :param string stateful: Whether the security group is stateful or not. :returns: A ``openstack.network.v2.security_group.SecurityGroup`` representing the new security group. :raises: :class:`~openstack.exceptions.SDKException` on operation error. :raises: OpenStackCloudUnavailableFeature if security groups are not supported on this cloud. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) data = [] security_group_json = {'name': name, 'description': description} if stateful is not None: security_group_json['stateful'] = stateful if project_id is not None: security_group_json['tenant_id'] = project_id if self._use_neutron_secgroups(): return self.network.create_security_group(**security_group_json) else: data = proxy._json_response( self.compute.post( '/os-security-groups', json={'security_group': security_group_json}, ) ) return self._normalize_secgroup( self._get_and_munchify('security_group', data) ) def delete_security_group(self, name_or_id): """Delete a security group :param string name_or_id: The name or unique ID of the security group. :returns: True if delete succeeded, False otherwise. :raises: :class:`~openstack.exceptions.SDKException` on operation error. :raises: OpenStackCloudUnavailableFeature if security groups are not supported on this cloud. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) # TODO(mordred): Let's come back and stop doing a GET before we do # the delete. secgroup = self.get_security_group(name_or_id) if secgroup is None: self.log.debug( 'Security group %s not found for deleting', name_or_id ) return False if self._use_neutron_secgroups(): self.network.delete_security_group( secgroup['id'], ignore_missing=False ) return True else: proxy._json_response( self.compute.delete( '/os-security-groups/{id}'.format(id=secgroup['id']) ) ) return True @_utils.valid_kwargs('name', 'description', 'stateful') def update_security_group(self, name_or_id, **kwargs): """Update a security group :param string name_or_id: Name or ID of the security group to update. :param string name: New name for the security group. :param string description: New description for the security group. :returns: A ``openstack.network.v2.security_group.SecurityGroup`` describing the updated security group. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) group = self.get_security_group(name_or_id) if group is None: raise exceptions.SDKException( "Security group %s not found." % name_or_id ) if self._use_neutron_secgroups(): return self.network.update_security_group(group['id'], **kwargs) else: for key in ('name', 'description'): kwargs.setdefault(key, group[key]) data = proxy._json_response( self.compute.put( '/os-security-groups/{id}'.format(id=group['id']), json={'security_group': kwargs}, ) ) return self._normalize_secgroup( self._get_and_munchify('security_group', data) ) def create_security_group_rule( self, secgroup_name_or_id, port_range_min=None, port_range_max=None, protocol=None, remote_ip_prefix=None, remote_group_id=None, remote_address_group_id=None, direction='ingress', ethertype='IPv4', project_id=None, description=None, ): """Create a new security group rule :param string secgroup_name_or_id: The security group name or ID to associate with this security group rule. If a non-unique group name is given, an exception is raised. :param int port_range_min: The minimum port number in the range that is matched by the security group rule. If the protocol is TCP or UDP, this value must be less than or equal to the port_range_max attribute value. If nova is used by the cloud provider for security groups, then a value of None will be transformed to -1. :param int port_range_max: The maximum port number in the range that is matched by the security group rule. The port_range_min attribute constrains the port_range_max attribute. If nova is used by the cloud provider for security groups, then a value of None will be transformed to -1. :param string protocol: The protocol that is matched by the security group rule. Valid values are None, tcp, udp, and icmp. :param string remote_ip_prefix: The remote IP prefix to be associated with this security group rule. This attribute matches the specified IP prefix as the source IP address of the IP packet. :param string remote_group_id: The remote group ID to be associated with this security group rule. :param string remote_address_group_id: The remote address group ID to be associated with this security group rule. :param string direction: Ingress or egress: The direction in which the security group rule is applied. For a compute instance, an ingress security group rule is applied to incoming (ingress) traffic for that instance. An egress rule is applied to traffic leaving the instance. :param string ethertype: Must be IPv4 or IPv6, and addresses represented in CIDR must match the ingress or egress rules. :param string project_id: Specify the project ID this security group will be created on (admin-only). :param string description: Description of the rule, max 255 characters. :returns: A ``openstack.network.v2.security_group.SecurityGroup`` representing the new security group rule. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) secgroup = self.get_security_group(secgroup_name_or_id) if not secgroup: raise exceptions.SDKException( "Security group %s not found." % secgroup_name_or_id ) if self._use_neutron_secgroups(): # NOTE: Nova accepts -1 port numbers, but Neutron accepts None # as the equivalent value. rule_def = { 'security_group_id': secgroup['id'], 'port_range_min': ( None if port_range_min == -1 else port_range_min ), 'port_range_max': ( None if port_range_max == -1 else port_range_max ), 'protocol': protocol, 'remote_ip_prefix': remote_ip_prefix, 'remote_group_id': remote_group_id, 'remote_address_group_id': remote_address_group_id, 'direction': direction, 'ethertype': ethertype, } if project_id is not None: rule_def['tenant_id'] = project_id if description is not None: rule_def["description"] = description return self.network.create_security_group_rule(**rule_def) else: # NOTE: Neutron accepts None for protocol. Nova does not. if protocol is None: raise exceptions.SDKException('Protocol must be specified') if direction == 'egress': self.log.debug( 'Rule creation failed: Nova does not support egress rules' ) raise exceptions.SDKException('No support for egress rules') # NOTE: Neutron accepts None for ports, but Nova requires -1 # as the equivalent value for ICMP. # # For TCP/UDP, if both are None, Neutron allows this and Nova # represents this as all ports (1-65535). Nova does not accept # None values, so to hide this difference, we will automatically # convert to the full port range. If only a single port value is # specified, it will error as normal. if protocol == 'icmp': if port_range_min is None: port_range_min = -1 if port_range_max is None: port_range_max = -1 elif protocol in ['tcp', 'udp']: if port_range_min is None and port_range_max is None: port_range_min = 1 port_range_max = 65535 security_group_rule_dict = dict( security_group_rule=dict( parent_group_id=secgroup['id'], ip_protocol=protocol, from_port=port_range_min, to_port=port_range_max, cidr=remote_ip_prefix, group_id=remote_group_id, ) ) if project_id is not None: security_group_rule_dict['security_group_rule'][ 'tenant_id' ] = project_id data = proxy._json_response( self.compute.post( '/os-security-group-rules', json=security_group_rule_dict ) ) return self._normalize_secgroup_rule( self._get_and_munchify('security_group_rule', data) ) def delete_security_group_rule(self, rule_id): """Delete a security group rule :param string rule_id: The unique ID of the security group rule. :returns: True if delete succeeded, False otherwise. :raises: :class:`~openstack.exceptions.SDKException` on operation error. :raises: OpenStackCloudUnavailableFeature if security groups are not supported on this cloud. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) if self._use_neutron_secgroups(): self.network.delete_security_group_rule( rule_id, ignore_missing=False ) return True else: try: exceptions.raise_from_response( self.compute.delete(f'/os-security-group-rules/{rule_id}') ) except exceptions.NotFoundException: return False return True def _has_secgroups(self): if not self.secgroup_source: return False else: return self.secgroup_source.lower() in ('nova', 'neutron') def _use_neutron_secgroups(self): return ( self.has_service('network') and self.secgroup_source == 'neutron' ) def _normalize_secgroups(self, groups): """Normalize the structure of security groups This makes security group dicts, as returned from nova, look like the security group dicts as returned from neutron. This does not make them look exactly the same, but it's pretty close. :param list groups: A list of security group dicts. :returns: A list of normalized dicts. """ ret = [] for group in groups: ret.append(self._normalize_secgroup(group)) return ret # TODO(stephenfin): Remove this once we get rid of support for nova # secgroups def _normalize_secgroup(self, group): ret = utils.Munch() # Copy incoming group because of shared dicts in unittests group = group.copy() # Discard noise self._remove_novaclient_artifacts(group) rules = self._normalize_secgroup_rules( group.pop('security_group_rules', group.pop('rules', [])) ) project_id = group.pop('tenant_id', '') project_id = group.pop('project_id', project_id) ret['location'] = self._get_current_location(project_id=project_id) ret['id'] = group.pop('id') ret['name'] = group.pop('name') ret['security_group_rules'] = rules ret['description'] = group.pop('description') ret['properties'] = group if self._use_neutron_secgroups(): ret['stateful'] = group.pop('stateful', True) # Backwards compat with Neutron if not self.strict_mode: ret['tenant_id'] = project_id ret['project_id'] = project_id for key, val in ret['properties'].items(): ret.setdefault(key, val) return ret # TODO(stephenfin): Remove this once we get rid of support for nova # secgroups def _normalize_secgroup_rules(self, rules): """Normalize the structure of nova security group rules Note that nova uses -1 for non-specific port values, but neutron represents these with None. :param list rules: A list of security group rule dicts. :returns: A list of normalized dicts. """ ret = [] for rule in rules: ret.append(self._normalize_secgroup_rule(rule)) return ret # TODO(stephenfin): Remove this once we get rid of support for nova # secgroups def _normalize_secgroup_rule(self, rule): ret = utils.Munch() # Copy incoming rule because of shared dicts in unittests rule = rule.copy() ret['id'] = rule.pop('id') ret['direction'] = rule.pop('direction', 'ingress') ret['ethertype'] = rule.pop('ethertype', 'IPv4') port_range_min = rule.get( 'port_range_min', rule.pop('from_port', None) ) if port_range_min == -1: port_range_min = None if port_range_min is not None: port_range_min = int(port_range_min) ret['port_range_min'] = port_range_min port_range_max = rule.pop('port_range_max', rule.pop('to_port', None)) if port_range_max == -1: port_range_max = None if port_range_min is not None: port_range_min = int(port_range_min) ret['port_range_max'] = port_range_max ret['protocol'] = rule.pop('protocol', rule.pop('ip_protocol', None)) ret['remote_ip_prefix'] = rule.pop( 'remote_ip_prefix', rule.pop('ip_range', {}).get('cidr', None) ) ret['security_group_id'] = rule.pop( 'security_group_id', rule.pop('parent_group_id', None) ) ret['remote_group_id'] = rule.pop('remote_group_id', None) project_id = rule.pop('tenant_id', '') project_id = rule.pop('project_id', project_id) ret['location'] = self._get_current_location(project_id=project_id) ret['properties'] = rule # Backwards compat with Neutron if not self.strict_mode: ret['tenant_id'] = project_id ret['project_id'] = project_id for key, val in ret['properties'].items(): ret.setdefault(key, val) return ret def _remove_novaclient_artifacts(self, item): # Remove novaclient artifacts item.pop('links', None) item.pop('NAME_ATTR', None) item.pop('HUMAN_ID', None) item.pop('human_id', None) item.pop('request_ids', None) item.pop('x_openstack_request_ids', None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/_object_store.py0000664000175000017500000004773700000000000022541 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import concurrent.futures import urllib.parse import keystoneauth1.exceptions from openstack.cloud import _utils from openstack.cloud import openstackcloud from openstack import exceptions OBJECT_CONTAINER_ACLS = { 'public': '.r:*,.rlistings', 'private': '', } class ObjectStoreCloudMixin(openstackcloud._OpenStackCloudMixin): # TODO(stephenfin): Remove 'full_listing' as it's a noop def list_containers(self, full_listing=True, prefix=None): """List containers. :param full_listing: Ignored. Present for backwards compat :param prefix: Only objects with this prefix will be returned. (optional) :returns: A list of object store ``Container`` objects. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ return list(self.object_store.containers(prefix=prefix)) def search_containers(self, name=None, filters=None): """Search containers. :param string name: Container name. :param filters: A dict containing additional filters to use. OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A list of object store ``Container`` objects matching the search criteria. :raises: :class:`~openstack.exceptions.SDKException`: If something goes wrong during the OpenStack API call. """ containers = self.list_containers() return _utils._filter_list(containers, name, filters) def get_container(self, name, skip_cache=False): """Get metadata about a container. :param str name: Name of the container to get metadata for. :param bool skip_cache: Ignore the cache of container metadata for this container. Defaults to ``False``. :returns: An object store ``Container`` object if found, else None. """ if skip_cache or name not in self._container_cache: try: container = self.object_store.get_container_metadata(name) self._container_cache[name] = container except exceptions.HttpException as ex: if ex.response.status_code == 404: return None raise return self._container_cache[name] def create_container(self, name, public=False): """Create an object-store container. :param str name: Name of the container to create. :param bool public: Whether to set this container to be public. Defaults to ``False``. :returns: The created object store ``Container`` object. """ container = self.get_container(name) if container: return container attrs = dict(name=name) if public: attrs['read_ACL'] = OBJECT_CONTAINER_ACLS['public'] container = self.object_store.create_container(**attrs) return self.get_container(name, skip_cache=True) def delete_container(self, name): """Delete an object-store container. :param str name: Name of the container to delete. """ try: self.object_store.delete_container(name, ignore_missing=False) self._container_cache.pop(name, None) return True except exceptions.NotFoundException: return False except exceptions.ConflictException: raise exceptions.SDKException( 'Attempt to delete container {container} failed. The' ' container is not empty. Please delete the objects' ' inside it before deleting the container'.format( container=name ) ) def update_container(self, name, headers): """Update the metadata in a container. :param str name: Name of the container to update. :param dict headers: Key/Value headers to set on the container. """ self.object_store.set_container_metadata( name, refresh=False, **headers ) def set_container_access(self, name, access, refresh=False): """Set the access control list on a container. :param str name: Name of the container. :param str access: ACL string to set on the container. Can also be ``public`` or ``private`` which will be translated into appropriate ACL strings. :param refresh: Flag to trigger refresh of the container properties """ if access not in OBJECT_CONTAINER_ACLS: raise exceptions.SDKException( "Invalid container access specified: %s. Must be one of %s" % (access, list(OBJECT_CONTAINER_ACLS.keys())) ) return self.object_store.set_container_metadata( name, read_ACL=OBJECT_CONTAINER_ACLS[access], refresh=refresh ) def get_container_access(self, name): """Get the control list from a container. :param str name: Name of the container. :returns: The contol list for the container. :raises: :class:`~openstack.exceptions.SDKException` if the container was not found or container access could not be determined. """ container = self.get_container(name, skip_cache=True) if not container: raise exceptions.SDKException("Container not found: %s" % name) acl = container.read_ACL or '' for key, value in OBJECT_CONTAINER_ACLS.items(): # Convert to string for the comparison because swiftclient # returns byte values as bytes sometimes and apparently == # on bytes doesn't work like you'd think if str(acl) == str(value): return key raise exceptions.SDKException( "Could not determine container access for ACL: %s." % acl ) def get_object_capabilities(self): """Get infomation about the object-storage service The object-storage service publishes a set of capabilities that include metadata about maximum values and thresholds. :returns: An object store ``Info`` object. """ return self.object_store.get_info() def get_object_segment_size(self, segment_size): """Get a segment size that will work given capabilities. :param segment_size: :returns: A segment size. """ return self.object_store.get_object_segment_size(segment_size) def is_object_stale( self, container, name, filename, file_md5=None, file_sha256=None ): """Check to see if an object matches the hashes of a file. :param container: Name of the container. :param name: Name of the object. :param filename: Path to the file. :param file_md5: Pre-calculated md5 of the file contents. Defaults to None which means calculate locally. :param file_sha256: Pre-calculated sha256 of the file contents. Defaults to None which means calculate locally. """ return self.object_store.is_object_stale( container, name, filename, file_md5=file_md5, file_sha256=file_sha256, ) def create_directory_marker_object(self, container, name, **headers): """Create a zero-byte directory marker object .. note:: This method is not needed in most cases. Modern swift does not require directory marker objects. However, some swift installs may need these. When using swift Static Web and Web Listings to serve static content one may need to create a zero-byte object to represent each "directory". Doing so allows Web Listings to generate an index of the objects inside of it, and allows Static Web to render index.html "files" that are "inside" the directory. :param container: The name of the container. :param name: Name for the directory marker object within the container. :param headers: These will be passed through to the object creation API as HTTP Headers. :returns: The created object store ``Object`` object. """ headers['content-type'] = 'application/directory' return self.create_object( container, name, data='', generate_checksums=False, **headers ) def create_object( self, container, name, filename=None, md5=None, sha256=None, segment_size=None, use_slo=True, metadata=None, generate_checksums=None, data=None, **headers, ): """Create a file object. Automatically uses large-object segments if needed. :param container: The name of the container to store the file in. This container will be created if it does not exist already. :param name: Name for the object within the container. :param filename: The path to the local file whose contents will be uploaded. Mutually exclusive with data. :param data: The content to upload to the object. Mutually exclusive with filename. :param md5: A hexadecimal md5 of the file. (Optional), if it is known and can be passed here, it will save repeating the expensive md5 process. It is assumed to be accurate. :param sha256: A hexadecimal sha256 of the file. (Optional) See md5. :param segment_size: Break the uploaded object into segments of this many bytes. (Optional) Shade will attempt to discover the maximum value for this from the server if it is not specified, or will use a reasonable default. :param headers: These will be passed through to the object creation API as HTTP Headers. :param use_slo: If the object is large enough to need to be a Large Object, use a static rather than dynamic object. Static Objects will delete segment objects when the manifest object is deleted. (optional, defaults to True) :param generate_checksums: Whether to generate checksums on the client side that get added to headers for later prevention of double uploads of identical data. (optional, defaults to True) :param metadata: This dict will get changed into headers that set metadata of the object :returns: The created object store ``Object`` object. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ return self.object_store.create_object( container, name, filename=filename, data=data, md5=md5, sha256=sha256, use_slo=use_slo, generate_checksums=generate_checksums, metadata=metadata, **headers, ) def update_object(self, container, name, metadata=None, **headers): """Update the metadata of an object :param container: The name of the container the object is in :param name: Name for the object within the container. :param metadata: This dict will get changed into headers that set metadata of the object :param headers: These will be passed through to the object update API as HTTP Headers. :returns: None :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ meta = metadata.copy() or {} meta.update(**headers) self.object_store.set_object_metadata(name, container, **meta) def list_objects(self, container, full_listing=True, prefix=None): """List objects. :param container: Name of the container to list objects in. :param full_listing: Ignored. Present for backwards compat :param prefix: Only objects with this prefix will be returned. (optional) :returns: A list of object store ``Object`` objects. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ return list( self.object_store.objects(container=container, prefix=prefix) ) def search_objects(self, container, name=None, filters=None): """Search objects. :param string name: Object name. :param filters: A dict containing additional filters to use. OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A list of object store ``Object`` objects matching the search criteria. :raises: :class:`~openstack.exceptions.SDKException`: If something goes wrong during the OpenStack API call. """ objects = self.list_objects(container) return _utils._filter_list(objects, name, filters) def delete_object(self, container, name, meta=None): """Delete an object from a container. :param string container: Name of the container holding the object. :param string name: Name of the object to delete. :param dict meta: Metadata for the object in question. (optional, will be fetched if not provided) :returns: True if delete succeeded, False if the object was not found. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ try: self.object_store.delete_object( name, ignore_missing=False, container=container, ) return True except exceptions.SDKException: return False def delete_autocreated_image_objects( self, container=None, segment_prefix=None, ): """Delete all objects autocreated for image uploads. This method should generally not be needed, as shade should clean up the objects it uses for object-based image creation. If something goes wrong and it is found that there are leaked objects, this method can be used to delete any objects that shade has created on the user's behalf in service of image uploads. :param str container: Name of the container. Defaults to 'images'. :param str segment_prefix: Prefix for the image segment names to delete. If not given, all image upload segments present are deleted. :returns: True if deletion was successful, else False. """ return self.object_store._delete_autocreated_image_objects( container, segment_prefix=segment_prefix ) def get_object_metadata(self, container, name): """Get object metadata. :param container: :param name: :returns: The object metadata. """ return self.object_store.get_object_metadata(name, container).metadata def get_object_raw(self, container, obj, query_string=None, stream=False): """Get a raw response object for an object. :param string container: Name of the container. :param string obj: Name of the object. :param string query_string: Query args for uri. (delimiter, prefix, etc.) :param bool stream: Whether to stream the response or not. :returns: A `requests.Response` :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ endpoint = self._get_object_endpoint(container, obj, query_string) return self.object_store.get(endpoint, stream=stream) def _get_object_endpoint(self, container, obj=None, query_string=None): endpoint = urllib.parse.quote(container) if obj: endpoint = '{endpoint}/{object}'.format( endpoint=endpoint, object=urllib.parse.quote(obj) ) if query_string: endpoint = '{endpoint}?{query_string}'.format( endpoint=endpoint, query_string=query_string ) return endpoint def stream_object( self, container, obj, query_string=None, resp_chunk_size=1024, ): """Download the content via a streaming iterator. :param string container: Name of the container. :param string obj: Name of the object. :param string query_string: Query args for uri. (delimiter, prefix, etc.) :param int resp_chunk_size: Chunk size of data to read. Only used if the results are :returns: An iterator over the content or None if the object is not found. :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ try: yield from self.object_store.stream_object( obj, container, chunk_size=resp_chunk_size ) except exceptions.NotFoundException: return def get_object( self, container, obj, query_string=None, resp_chunk_size=1024, outfile=None, stream=False, ): """Get the headers and body of an object :param string container: Name of the container. :param string obj: Name of the object. :param string query_string: Query args for uri. (delimiter, prefix, etc.) :param int resp_chunk_size: Chunk size of data to read. Only used if the results are being written to a file or stream is True. (optional, defaults to 1k) :param outfile: Write the object to a file instead of returning the contents. If this option is given, body in the return tuple will be None. outfile can either be a file path given as a string, or a File like object. :returns: Tuple (headers, body) of the object, or None if the object is not found (404). :raises: :class:`~openstack.exceptions.SDKException` on operation error. """ try: obj = self.object_store.get_object( obj, container=container, resp_chunk_size=resp_chunk_size, outfile=outfile, remember_content=(outfile is None), ) headers = {k.lower(): v for k, v in obj._last_headers.items()} return (headers, obj.data) except exceptions.NotFoundException: return None def _wait_for_futures(self, futures, raise_on_error=True): """Collect results or failures from a list of running future tasks.""" results = [] retries = [] # Check on each result as its thread finishes for completed in concurrent.futures.as_completed(futures): try: result = completed.result() exceptions.raise_from_response(result) results.append(result) except ( keystoneauth1.exceptions.RetriableConnectionFailure, exceptions.HttpException, ) as e: error_text = "Exception processing async task: {}".format( str(e) ) if raise_on_error: self.log.exception(error_text) raise else: self.log.debug(error_text) # If we get an exception, put the result into a list so we # can try again retries.append(completed.result()) return results, retries ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/_orchestration.py0000664000175000017500000002304000000000000022720 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack.cloud import _utils from openstack.cloud import openstackcloud from openstack import exceptions from openstack.orchestration.util import event_utils class OrchestrationCloudMixin(openstackcloud._OpenStackCloudMixin): def get_template_contents( self, template_file=None, template_url=None, template_object=None, files=None, ): return self.orchestration.get_template_contents( template_file=template_file, template_url=template_url, template_object=template_object, files=files, ) def create_stack( self, name, tags=None, template_file=None, template_url=None, template_object=None, files=None, rollback=True, wait=False, timeout=3600, environment_files=None, **parameters ): """Create a stack. :param string name: Name of the stack. :param tags: List of tag(s) of the stack. (optional) :param string template_file: Path to the template. :param string template_url: URL of template. :param string template_object: URL to retrieve template object. :param dict files: dict of additional file content to include. :param boolean rollback: Enable rollback on create failure. :param boolean wait: Whether to wait for the delete to finish. :param int timeout: Stack create timeout in seconds. :param environment_files: Paths to environment files to apply. Other arguments will be passed as stack parameters which will take precedence over any parameters specified in the environments. Only one of template_file, template_url, template_object should be specified. :returns: a dict containing the stack description :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call """ params = dict( tags=tags, is_rollback_disabled=not rollback, timeout_mins=timeout // 60, parameters=parameters, ) params.update( self.orchestration.read_env_and_templates( template_file=template_file, template_url=template_url, template_object=template_object, files=files, environment_files=environment_files, ) ) self.orchestration.create_stack(name=name, **params) if wait: event_utils.poll_for_events(self, stack_name=name, action='CREATE') return self.get_stack(name) def update_stack( self, name_or_id, template_file=None, template_url=None, template_object=None, files=None, rollback=True, tags=None, wait=False, timeout=3600, environment_files=None, **parameters ): """Update a stack. :param string name_or_id: Name or ID of the stack to update. :param string template_file: Path to the template. :param string template_url: URL of template. :param string template_object: URL to retrieve template object. :param dict files: dict of additional file content to include. :param boolean rollback: Enable rollback on update failure. :param boolean wait: Whether to wait for the delete to finish. :param int timeout: Stack update timeout in seconds. :param environment_files: Paths to environment files to apply. Other arguments will be passed as stack parameters which will take precedence over any parameters specified in the environments. Only one of template_file, template_url, template_object should be specified. :returns: a dict containing the stack description :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API calls """ params = dict( tags=tags, is_rollback_disabled=not rollback, timeout_mins=timeout // 60, parameters=parameters, ) params.update( self.orchestration.read_env_and_templates( template_file=template_file, template_url=template_url, template_object=template_object, files=files, environment_files=environment_files, ) ) if wait: # find the last event to use as the marker events = event_utils.get_events( self, name_or_id, event_args={'sort_dir': 'desc', 'limit': 1} ) marker = events[0].id if events else None # Not to cause update of ID field pass stack as dict self.orchestration.update_stack(stack={'id': name_or_id}, **params) if wait: event_utils.poll_for_events( self, name_or_id, action='UPDATE', marker=marker ) return self.get_stack(name_or_id) def delete_stack(self, name_or_id, wait=False): """Delete a stack :param string name_or_id: Stack name or ID. :param boolean wait: Whether to wait for the delete to finish :returns: True if delete succeeded, False if the stack was not found. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call """ stack = self.get_stack(name_or_id, resolve_outputs=False) if stack is None: self.log.debug("Stack %s not found for deleting", name_or_id) return False if wait: # find the last event to use as the marker events = event_utils.get_events( self, name_or_id, event_args={'sort_dir': 'desc', 'limit': 1} ) marker = events[0].id if events else None self.orchestration.delete_stack(stack) if wait: try: event_utils.poll_for_events( self, stack_name=name_or_id, action='DELETE', marker=marker ) except exceptions.HttpException: pass stack = self.get_stack(name_or_id, resolve_outputs=False) if stack and stack['stack_status'] == 'DELETE_FAILED': raise exceptions.SDKException( "Failed to delete stack {id}: {reason}".format( id=name_or_id, reason=stack['stack_status_reason'] ) ) return True def search_stacks(self, name_or_id=None, filters=None): """Search stacks. :param name_or_id: Name or ID of the desired stack. :param filters: a dict containing additional filters to use. e.g. {'stack_status': 'CREATE_COMPLETE'} :returns: a list of ``openstack.orchestration.v1.stack.Stack`` containing the stack description. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ stacks = self.list_stacks() return _utils._filter_list(stacks, name_or_id, filters) def list_stacks(self, **query): """List all stacks. :param dict query: Query parameters to limit stacks. :returns: a list of :class:`openstack.orchestration.v1.stack.Stack` objects containing the stack description. :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call. """ return list(self.orchestration.stacks(**query)) def get_stack(self, name_or_id, filters=None, resolve_outputs=True): """Get exactly one stack. :param name_or_id: Name or ID of the desired stack. :param filters: a dict containing additional filters to use. e.g. {'stack_status': 'CREATE_COMPLETE'} :param resolve_outputs: If True, then outputs for this stack will be resolved :returns: a :class:`openstack.orchestration.v1.stack.Stack` containing the stack description :raises: :class:`~openstack.exceptions.SDKException` if something goes wrong during the OpenStack API call or if multiple matches are found. """ def _search_one_stack(name_or_id=None, filters=None): # stack names are mandatory and enforced unique in the project # so a StackGet can always be used for name or ID. try: stack = self.orchestration.find_stack( name_or_id, ignore_missing=False, resolve_outputs=resolve_outputs, ) if stack.status == 'DELETE_COMPLETE': return [] except exceptions.NotFoundException: return [] return _utils._filter_list([stack], name_or_id, filters) return _utils._get_entity(self, _search_one_stack, name_or_id, filters) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/_shared_file_system.py0000664000175000017500000000162400000000000023711 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack.cloud import openstackcloud class SharedFileSystemCloudMixin(openstackcloud._OpenStackCloudMixin): def list_share_availability_zones(self): """List all availability zones for the Shared File Systems service. :returns: A list of Shared File Systems Availability Zones. """ return list(self.share.availability_zones()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/_utils.py0000664000175000017500000004011100000000000021172 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import fnmatch import inspect import re import uuid from decorator import decorator import jmespath import netifaces from openstack import _log from openstack import exceptions def _dictify_resource(resource): if isinstance(resource, list): return [_dictify_resource(r) for r in resource] else: if hasattr(resource, 'toDict'): return resource.toDict() else: return resource def _filter_list(data, name_or_id, filters): """Filter a list by name/ID and arbitrary meta data. :param list data: The list of dictionary data to filter. It is expected that each dictionary contains an 'id' and 'name' key if a value for name_or_id is given. :param string name_or_id: The name or ID of the entity being filtered. Can be a glob pattern, such as 'nb01*'. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Invalid filters will be ignored. """ # The logger is openstack.cloud.fmmatch to allow a user/operator to # configure logging not to communicate about fnmatch misses # (they shouldn't be too spammy, but one never knows) log = _log.setup_logging('openstack.fnmatch') if name_or_id: # name_or_id might already be unicode name_or_id = str(name_or_id) identifier_matches = [] bad_pattern = False try: fn_reg = re.compile(fnmatch.translate(name_or_id)) except re.error: # If the fnmatch re doesn't compile, then we don't care, # but log it in case the user DID pass a pattern but did # it poorly and wants to know what went wrong with their # search fn_reg = None for e in data: e_id = str(e.get('id', None)) e_name = str(e.get('name', None)) if (e_id and e_id == name_or_id) or ( e_name and e_name == name_or_id ): identifier_matches.append(e) else: # Only try fnmatch if we don't match exactly if not fn_reg: # If we don't have a pattern, skip this, but set the flag # so that we log the bad pattern bad_pattern = True continue if (e_id and fn_reg.match(e_id)) or ( e_name and fn_reg.match(e_name) ): identifier_matches.append(e) if not identifier_matches and bad_pattern: log.debug("Bad pattern passed to fnmatch", exc_info=True) data = identifier_matches if not filters: return data if isinstance(filters, str): return jmespath.search(filters, data) def _dict_filter(f, d): if not d: return False for key in f.keys(): if key not in d: log.warning( "Invalid filter: %s is not an attribute of %s.%s", key, e.__class__.__module__, e.__class__.__qualname__, ) # we intentionally skip this since the user was trying to # filter on _something_, but we don't know what that # _something_ was raise AttributeError(key) if isinstance(f[key], dict): if not _dict_filter(f[key], d.get(key, None)): return False elif d.get(key, None) != f[key]: return False return True filtered = [] for e in data: if _dict_filter(filters, e): filtered.append(e) return filtered def _get_entity(cloud, resource, name_or_id, filters, **kwargs): """Return a single entity from the list returned by a given method. :param object cloud: The controller class (Example: the main OpenStackCloud object). :param string or callable resource: The string that identifies the resource to use to lookup the get_<>_by_id or search_s methods (Example: network) or a callable to invoke. :param string name_or_id: The name or ID of the entity being filtered or an object or dict. If this is an object/dict with an 'id' attr/key, we return it and bypass resource lookup. :param filters: A dictionary of meta data to use for further filtering. OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" """ # Sometimes in the control flow of openstacksdk, we already have an object # fetched. Rather than then needing to pull the name or id out of that # object, pass it in here and rely on caching to prevent us from making # an additional call, it's simple enough to test to see if we got an # object and just short-circuit return it. if hasattr(name_or_id, 'id') or ( isinstance(name_or_id, dict) and 'id' in name_or_id ): return name_or_id # If a uuid is passed short-circuit it calling the # get__by_id method if getattr(cloud, 'use_direct_get', False) and _is_uuid_like(name_or_id): get_resource = getattr(cloud, 'get_%s_by_id' % resource, None) if get_resource: return get_resource(name_or_id) search = ( resource if callable(resource) else getattr(cloud, 'search_%ss' % resource, None) ) if search: entities = search(name_or_id, filters, **kwargs) if entities: if len(entities) > 1: raise exceptions.SDKException( "Multiple matches found for %s" % name_or_id ) return entities[0] return None def localhost_supports_ipv6(): """Determine whether the local host supports IPv6 We look for a default route that supports the IPv6 address family, and assume that if it is present, this host has globally routable IPv6 connectivity. """ try: return netifaces.AF_INET6 in netifaces.gateways()['default'] except AttributeError: return False def valid_kwargs(*valid_args): # This decorator checks if argument passed as **kwargs to a function are # present in valid_args. # # Typically, valid_kwargs is used when we want to distinguish between # None and omitted arguments and we still want to validate the argument # list. # # Example usage: # # @valid_kwargs('opt_arg1', 'opt_arg2') # def my_func(self, mandatory_arg1, mandatory_arg2, **kwargs): # ... # @decorator def func_wrapper(func, *args, **kwargs): argspec = inspect.getfullargspec(func) for k in kwargs: if k not in argspec.args[1:] and k not in valid_args: raise TypeError( "{f}() got an unexpected keyword argument " "'{arg}'".format(f=inspect.stack()[1][3], arg=k) ) return func(*args, **kwargs) return func_wrapper @contextlib.contextmanager def openstacksdk_exceptions(error_message=None): """Context manager for dealing with openstack exceptions. :param string error_message: String to use for the exception message content on non-SDKException exception. Useful for avoiding wrapping SDKException exceptions within themselves. Code called from within the context may throw such exceptions without having to catch and reraise them. Non-SDKException exceptions thrown within the context will be wrapped and the exception message will be appended to the given error message. """ try: yield except exceptions.SDKException: raise except Exception as e: if error_message is None: error_message = str(e) raise exceptions.SDKException(error_message) def safe_dict_min(key, data): """Safely find the minimum for a given key in a list of dict objects. This will find the minimum integer value for specific dictionary key across a list of dictionaries. The values for the given key MUST be integers, or string representations of an integer. The dictionary key does not have to be present in all (or any) of the elements/dicts within the data set. :param string key: The dictionary key to search for the minimum value. :param list data: List of dicts to use for the data set. :returns: None if the field was not found in any elements, or the minimum value for the field otherwise. """ min_value = None for d in data: if (key in d) and (d[key] is not None): try: val = int(d[key]) except ValueError: raise exceptions.SDKException( "Search for minimum value failed. " "Value for {key} is not an integer: {value}".format( key=key, value=d[key] ) ) if (min_value is None) or (val < min_value): min_value = val return min_value def safe_dict_max(key, data): """Safely find the maximum for a given key in a list of dict objects. This will find the maximum integer value for specific dictionary key across a list of dictionaries. The values for the given key MUST be integers, or string representations of an integer. The dictionary key does not have to be present in all (or any) of the elements/dicts within the data set. :param string key: The dictionary key to search for the maximum value. :param list data: List of dicts to use for the data set. :returns: None if the field was not found in any elements, or the maximum value for the field otherwise. """ max_value = None for d in data: if (key in d) and (d[key] is not None): try: val = int(d[key]) except ValueError: raise exceptions.SDKException( "Search for maximum value failed. " "Value for {key} is not an integer: {value}".format( key=key, value=d[key] ) ) if (max_value is None) or (val > max_value): max_value = val return max_value def parse_range(value): """Parse a numerical range string. Breakdown a range expression into its operater and numerical parts. This expression must be a string. Valid values must be an integer string, optionally preceeded by one of the following operators:: - "<" : Less than - ">" : Greater than - "<=" : Less than or equal to - ">=" : Greater than or equal to Some examples of valid values and function return values:: - "1024" : returns (None, 1024) - "<5" : returns ("<", 5) - ">=100" : returns (">=", 100) :param string value: The range expression to be parsed. :returns: A tuple with the operator string (or None if no operator was given) and the integer value. None is returned if parsing failed. """ if value is None: return None range_exp = re.match(r'(<|>|<=|>=){0,1}(\d+)$', value) if range_exp is None: return None op = range_exp.group(1) num = int(range_exp.group(2)) return (op, num) def range_filter(data, key, range_exp): """Filter a list by a single range expression. :param list data: List of dictionaries to be searched. :param string key: Key name to search within the data set. :param string range_exp: The expression describing the range of values. :returns: A list subset of the original data set. :raises: :class:`~openstack.exceptions.SDKException` on invalid range expressions. """ filtered = [] range_exp = str(range_exp).upper() if range_exp == "MIN": key_min = safe_dict_min(key, data) if key_min is None: return [] for d in data: if int(d[key]) == key_min: filtered.append(d) return filtered elif range_exp == "MAX": key_max = safe_dict_max(key, data) if key_max is None: return [] for d in data: if int(d[key]) == key_max: filtered.append(d) return filtered # Not looking for a min or max, so a range or exact value must # have been supplied. val_range = parse_range(range_exp) # If parsing the range fails, it must be a bad value. if val_range is None: raise exceptions.SDKException(f"Invalid range value: {range_exp}") op = val_range[0] if op: # Range matching for d in data: d_val = int(d[key]) if op == '<': if d_val < val_range[1]: filtered.append(d) elif op == '>': if d_val > val_range[1]: filtered.append(d) elif op == '<=': if d_val <= val_range[1]: filtered.append(d) elif op == '>=': if d_val >= val_range[1]: filtered.append(d) return filtered else: # Exact number match for d in data: if int(d[key]) == val_range[1]: filtered.append(d) return filtered def generate_patches_from_kwargs(operation, **kwargs): """Given a set of parameters, returns a list with the valid patch values. :param string operation: The operation to perform. :param list kwargs: Dict of parameters. :returns: A list with the right patch values. """ patches = [] for k, v in kwargs.items(): patch = {'op': operation, 'value': v, 'path': '/%s' % k} patches.append(patch) return sorted(patches) class FileSegment: """File-like object to pass to requests.""" def __init__(self, filename, offset, length): self.filename = filename self.offset = offset self.length = length self.pos = 0 self._file = open(filename, 'rb') self.seek(0) def tell(self): return self._file.tell() - self.offset def seek(self, offset, whence=0): if whence == 0: self._file.seek(self.offset + offset, whence) elif whence == 1: self._file.seek(offset, whence) elif whence == 2: self._file.seek(self.offset + self.length - offset, 0) def read(self, size=-1): remaining = self.length - self.pos if remaining <= 0: return b'' to_read = remaining if size < 0 else min(size, remaining) chunk = self._file.read(to_read) self.pos += len(chunk) return chunk def reset(self): self._file.seek(self.offset, 0) def _format_uuid_string(string): return ( string.replace('urn:', '') .replace('uuid:', '') .strip('{}') .replace('-', '') .lower() ) def _is_uuid_like(val): """Returns validation of a value as a UUID. :param val: Value to verify :type val: string :returns: bool .. versionchanged:: 1.1.1 Support non-lowercase UUIDs. """ try: return str(uuid.UUID(val)).replace('-', '') == _format_uuid_string(val) except (TypeError, ValueError, AttributeError): return False ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2333076 openstacksdk-4.0.0/openstack/cloud/cmd/0000775000175000017500000000000000000000000020067 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/cmd/__init__.py0000664000175000017500000000000000000000000022166 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/cmd/inventory.py0000664000175000017500000000473300000000000022505 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import sys import yaml import openstack.cloud import openstack.cloud.inventory from openstack import exceptions def output_format_dict(data, use_yaml): if use_yaml: return yaml.safe_dump(data, default_flow_style=False) else: return json.dumps(data, sort_keys=True, indent=2) def parse_args(): parser = argparse.ArgumentParser(description='OpenStack Inventory Module') parser.add_argument( '--refresh', action='store_true', help='Refresh cached information' ) group = parser.add_mutually_exclusive_group(required=True) group.add_argument( '--list', action='store_true', help='List active servers' ) group.add_argument('--host', help='List details about the specific host') parser.add_argument( '--private', action='store_true', default=False, help='Use private IPs for interface_ip', ) parser.add_argument( '--cloud', default=None, help='Return data for one cloud only' ) parser.add_argument( '--yaml', action='store_true', default=False, help='Output data in nicely readable yaml', ) parser.add_argument( '--debug', action='store_true', default=False, help='Enable debug output', ) return parser.parse_args() def main(): args = parse_args() try: openstack.enable_logging(debug=args.debug) inventory = openstack.cloud.inventory.OpenStackInventory( refresh=args.refresh, private=args.private, cloud=args.cloud ) if args.list: output = inventory.list_hosts() elif args.host: output = inventory.get_host(args.host) print(output_format_dict(output, args.yaml)) except exceptions.SDKException as e: sys.stderr.write(e.message + '\n') sys.exit(1) sys.exit(0) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/exc.py0000664000175000017500000000312000000000000020451 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack import exceptions OpenStackCloudException = exceptions.SDKException class OpenStackCloudUnavailableExtension(OpenStackCloudException): pass class OpenStackCloudUnavailableFeature(OpenStackCloudException): pass # Backwards compat. These are deprecated and should not be used in new code. class OpenStackCloudCreateException(OpenStackCloudException): def __init__(self, resource, resource_id, extra_data=None, **kwargs): super().__init__( message="Error creating {resource}: {resource_id}".format( resource=resource, resource_id=resource_id ), extra_data=extra_data, **kwargs ) self.resource_id = resource_id OpenStackCloudTimeout = exceptions.ResourceTimeout OpenStackCloudHTTPError = exceptions.HttpException OpenStackCloudBadRequest = exceptions.BadRequestException OpenStackCloudURINotFound = exceptions.NotFoundException OpenStackCloudResourceNotFound = OpenStackCloudURINotFound ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/inventory.py0000664000175000017500000000570100000000000021736 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from openstack.cloud import _utils from openstack.config import loader from openstack import connection from openstack import exceptions __all__ = ['OpenStackInventory'] class OpenStackInventory: # Put this here so the capability can be detected with hasattr on the class extra_config = None def __init__( self, config_files=None, refresh=False, private=False, config_key=None, config_defaults=None, cloud=None, use_direct_get=False, ): if config_files is None: config_files = [] config = loader.OpenStackConfig( config_files=loader.CONFIG_FILES + config_files ) self.extra_config = config.get_extra_config( config_key, config_defaults ) if cloud is None: self.clouds = [ connection.Connection(config=cloud_region) for cloud_region in config.get_all() ] else: self.clouds = [connection.Connection(config=config.get_one(cloud))] if private: for cloud in self.clouds: cloud.private = True # Handle manual invalidation of entire persistent cache if refresh: for cloud in self.clouds: cloud._cache.invalidate() def list_hosts( self, expand=True, fail_on_cloud_config=True, all_projects=False ): hostvars = [] for cloud in self.clouds: try: # Cycle on servers for server in cloud.list_servers( detailed=expand, all_projects=all_projects ): hostvars.append(server) except exceptions.SDKException: # Don't fail on one particular cloud as others may work if fail_on_cloud_config: raise return hostvars def search_hosts(self, name_or_id=None, filters=None, expand=True): hosts = self.list_hosts(expand=expand) return _utils._filter_list(hosts, name_or_id, filters) def get_host(self, name_or_id, filters=None, expand=True): if expand: func = self.search_hosts else: func = functools.partial(self.search_hosts, expand=False) return _utils._get_entity(self, func, name_or_id, filters) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/meta.py0000664000175000017500000005722000000000000020632 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ipaddress import socket from openstack import _log from openstack import exceptions from openstack import utils NON_CALLABLES = (str, bool, dict, int, float, list, type(None)) def find_nova_interfaces( addresses, ext_tag=None, key_name=None, version=4, mac_addr=None ): ret = [] for k, v in iter(addresses.items()): if key_name is not None and k != key_name: # key_name is specified and it doesn't match the current network. # Continue with the next one continue for interface_spec in v: if ext_tag is not None: if 'OS-EXT-IPS:type' not in interface_spec: # ext_tag is specified, but this interface has no tag # We could actually return right away as this means that # this cloud doesn't support OS-EXT-IPS. Nevertheless, # it would be better to perform an explicit check # but this needs cloud to be passed to this function. continue elif interface_spec['OS-EXT-IPS:type'] != ext_tag: # Type doesn't match, continue with next one continue if mac_addr is not None: if 'OS-EXT-IPS-MAC:mac_addr' not in interface_spec: # mac_addr is specified, but this interface has no mac_addr # We could actually return right away as this means that # this cloud doesn't support OS-EXT-IPS-MAC. Nevertheless, # it would be better to perform an explicit check # but this needs cloud to be passed to this function. continue elif interface_spec['OS-EXT-IPS-MAC:mac_addr'] != mac_addr: # MAC doesn't match, continue with next one continue if interface_spec['version'] == version: ret.append(interface_spec) return ret def find_nova_addresses( addresses, ext_tag=None, key_name=None, version=4, mac_addr=None ): interfaces = find_nova_interfaces( addresses, ext_tag, key_name, version, mac_addr ) floating_addrs = [] fixed_addrs = [] for i in interfaces: if i.get('OS-EXT-IPS:type') == 'floating': floating_addrs.append(i['addr']) else: fixed_addrs.append(i['addr']) return floating_addrs + fixed_addrs def get_server_ip(server, public=False, cloud_public=True, **kwargs): """Get an IP from the Nova addresses dict :param server: The server to pull the address from :param public: Whether the address we're looking for should be considered 'public' and therefore reachabiliity tests should be used. (defaults to False) :param cloud_public: Whether the cloud has been configured to use private IPs from servers as the interface_ip. This inverts the public reachability logic, as in this case it's the private ip we expect shade to be able to reach """ addrs = find_nova_addresses(server['addresses'], **kwargs) return find_best_address(addrs, public=public, cloud_public=cloud_public) def get_server_private_ip(server, cloud=None): """Find the private IP address If Neutron is available, search for a port on a network where `router:external` is False and `shared` is False. This combination indicates a private network with private IP addresses. This port should have the private IP. If Neutron is not available, or something goes wrong communicating with it, as a fallback, try the list of addresses associated with the server dict, looking for an IP type tagged as 'fixed' in the network named 'private'. Last resort, ignore the IP type and just look for an IP on the 'private' network (e.g., Rackspace). """ if cloud and not cloud.use_internal_network(): return None # Try to get a floating IP interface. If we have one then return the # private IP address associated with that floating IP for consistency. fip_ints = find_nova_interfaces(server['addresses'], ext_tag='floating') fip_mac = None if fip_ints: fip_mac = fip_ints[0].get('OS-EXT-IPS-MAC:mac_addr') # Short circuit the ports/networks search below with a heavily cached # and possibly pre-configured network name if cloud: int_nets = cloud.get_internal_ipv4_networks() for int_net in int_nets: int_ip = get_server_ip( server, key_name=int_net['name'], ext_tag='fixed', cloud_public=not cloud.private, mac_addr=fip_mac, ) if int_ip is not None: return int_ip # Try a second time without the fixed tag. This is for old nova-network # results that do not have the fixed/floating tag. for int_net in int_nets: int_ip = get_server_ip( server, key_name=int_net['name'], cloud_public=not cloud.private, mac_addr=fip_mac, ) if int_ip is not None: return int_ip ip = get_server_ip( server, ext_tag='fixed', key_name='private', mac_addr=fip_mac ) if ip: return ip # Last resort, and Rackspace return get_server_ip(server, key_name='private') def get_server_external_ipv4(cloud, server): """Find an externally routable IP for the server. There are 5 different scenarios we have to account for: * Cloud has externally routable IP from neutron but neutron APIs don't work (only info available is in nova server record) (rackspace) * Cloud has externally routable IP from neutron (runabove, ovh) * Cloud has externally routable IP from neutron AND supports optional private tenant networks (vexxhost, unitedstack) * Cloud only has private tenant network provided by neutron and requires floating-ip for external routing (dreamhost, hp) * Cloud only has private tenant network provided by nova-network and requires floating-ip for external routing (auro) :param cloud: the cloud we're working with :param server: the server dict from which we want to get an IPv4 address :return: a string containing the IPv4 address or None """ if not cloud.use_external_network(): return None if server['accessIPv4']: return server['accessIPv4'] # Short circuit the ports/networks search below with a heavily cached # and possibly pre-configured network name ext_nets = cloud.get_external_ipv4_networks() for ext_net in ext_nets: ext_ip = get_server_ip( server, key_name=ext_net['name'], public=True, cloud_public=not cloud.private, ) if ext_ip is not None: return ext_ip # Try to get a floating IP address # Much as I might find floating IPs annoying, if it has one, that's # almost certainly the one that wants to be used ext_ip = get_server_ip( server, ext_tag='floating', public=True, cloud_public=not cloud.private ) if ext_ip is not None: return ext_ip # The cloud doesn't support Neutron or Neutron can't be contacted. The # server might have fixed addresses that are reachable from outside the # cloud (e.g. Rax) or have plain ol' floating IPs # Try to get an address from a network named 'public' ext_ip = get_server_ip( server, key_name='public', public=True, cloud_public=not cloud.private ) if ext_ip is not None: return ext_ip # Nothing else works, try to find a globally routable IP address for interfaces in server['addresses'].values(): for interface in interfaces: try: ip = ipaddress.ip_address(interface['addr']) except Exception: # Skip any error, we're looking for a working ip - if the # cloud returns garbage, it wouldn't be the first weird thing # but it still doesn't meet the requirement of "be a working # ip address" continue if ip.version == 4 and not ip.is_private: return str(ip) return None def find_best_address(addresses, public=False, cloud_public=True): do_check = public == cloud_public if not addresses: return None if len(addresses) == 1: return addresses[0] if len(addresses) > 1 and do_check: # We only want to do this check if the address is supposed to be # reachable. Otherwise we're just debug log spamming on every listing # of private ip addresses for address in addresses: try: for count in utils.iterate_timeout( 5, "Timeout waiting for %s" % address, wait=0.1 ): # Return the first one that is reachable try: for res in socket.getaddrinfo( address, 22, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, ): family, socktype, proto, _, sa = res connect_socket = socket.socket( family, socktype, proto ) connect_socket.settimeout(1) connect_socket.connect(sa) return address except OSError: # Sometimes a "no route to address" type error # will fail fast, but can often come alive # when retried. continue except Exception: pass # Give up and return the first - none work as far as we can tell if do_check: log = _log.setup_logging('openstack') log.debug( "The cloud returned multiple addresses %s:, and we could not " "connect to port 22 on either. That might be what you wanted, " "but we have no clue what's going on, so we picked the first one " "%s" % (addresses, addresses[0]) ) return addresses[0] def get_server_external_ipv6(server): """Get an IPv6 address reachable from outside the cloud. This function assumes that if a server has an IPv6 address, that address is reachable from outside the cloud. :param server: the server from which we want to get an IPv6 address :return: a string containing the IPv6 address or None """ # Don't return ipv6 interfaces if forcing IPv4 if server['accessIPv6']: return server['accessIPv6'] addresses = find_nova_addresses(addresses=server['addresses'], version=6) return find_best_address(addresses, public=True) def get_server_default_ip(cloud, server): """Get the configured 'default' address It is possible in clouds.yaml to configure for a cloud a network that is the 'default_interface'. This is the network that should be used to talk to instances on the network. :param cloud: the cloud we're working with :param server: the server dict from which we want to get the default IPv4 address :return: a string containing the IPv4 address or None """ ext_net = cloud.get_default_network() if ext_net: if cloud._local_ipv6 and not cloud.force_ipv4: # try 6 first, fall back to four versions = [6, 4] else: versions = [4] for version in versions: ext_ip = get_server_ip( server, key_name=ext_net['name'], version=version, public=True, cloud_public=not cloud.private, ) if ext_ip is not None: return ext_ip return None def _get_interface_ip(cloud, server): """Get the interface IP for the server Interface IP is the IP that should be used for communicating with the server. It is: - the IP on the configured default_interface network - if cloud.private, the private ip if it exists - if the server has a public ip, the public ip """ default_ip = get_server_default_ip(cloud, server) if default_ip: return default_ip if cloud.private and server['private_v4']: return server['private_v4'] if server['public_v6'] and cloud._local_ipv6 and not cloud.force_ipv4: return server['public_v6'] else: return server['public_v4'] def get_groups_from_server(cloud, server, server_vars): groups = [] # NOTE(efried): This is hardcoded to 'compute' because this method is only # used from ComputeCloudMixin. region = cloud.config.get_region_name('compute') cloud_name = cloud.name # Create a group for the cloud groups.append(cloud_name) # Create a group on region groups.append(region) # And one by cloud_region groups.append(f"{cloud_name}_{region}") # Check if group metadata key in servers' metadata group = server['metadata'].get('group') if group: groups.append(group) for extra_group in server['metadata'].get('groups', '').split(','): if extra_group: groups.append(extra_group) groups.append('instance-%s' % server['id']) for key in ('flavor', 'image'): if 'name' in server_vars[key]: groups.append('{}-{}'.format(key, server_vars[key]['name'])) for key, value in iter(server['metadata'].items()): groups.append(f'meta-{key}_{value}') az = server_vars.get('az', None) if az: # Make groups for az, region_az and cloud_region_az groups.append(az) groups.append(f'{region}_{az}') groups.append(f'{cloud.name}_{region}_{az}') return groups def expand_server_vars(cloud, server): """Backwards compatibility function.""" return add_server_interfaces(cloud, server) def _make_address_dict(fip, port): address = dict(version=4, addr=fip['floating_ip_address']) address['OS-EXT-IPS:type'] = 'floating' address['OS-EXT-IPS-MAC:mac_addr'] = port['mac_address'] return address def _get_supplemental_addresses(cloud, server): fixed_ip_mapping = {} for name, network in server['addresses'].items(): for address in network: if address['version'] == 6: continue if address.get('OS-EXT-IPS:type') == 'floating': # We have a floating IP that nova knows about, do nothing return server['addresses'] fixed_ip_mapping[address['addr']] = name try: # Don't bother doing this before the server is active, it's a waste # of an API call while polling for a server to come up if ( cloud.has_service('network') and cloud._has_floating_ips() and server['status'] == 'ACTIVE' ): for port in cloud.search_ports( filters=dict(device_id=server['id']) ): # This SHOULD return one and only one FIP - but doing it as a # search/list lets the logic work regardless for fip in cloud.search_floating_ips( filters=dict(port_id=port['id']) ): fixed_net = fixed_ip_mapping.get(fip['fixed_ip_address']) if fixed_net is None: log = _log.setup_logging('openstack') log.debug( "The cloud returned floating ip %(fip)s attached" " to server %(server)s but the fixed ip associated" " with the floating ip in the neutron listing" " does not exist in the nova listing. Something" " is exceptionally broken.", dict(fip=fip['id'], server=server['id']), ) else: server['addresses'][fixed_net].append( _make_address_dict(fip, port) ) except exceptions.SDKException: # If something goes wrong with a cloud call, that's cool - this is # an attempt to provide additional data and should not block forward # progress pass return server['addresses'] def add_server_interfaces(cloud, server): """Add network interface information to server. Query the cloud as necessary to add information to the server record about the network information needed to interface with the server. Ensures that public_v4, public_v6, private_v4, private_v6, interface_ip, accessIPv4 and accessIPv6 are always set. """ # First, add an IP address. Set it to '' rather than None if it does # not exist to remain consistent with the pre-existing missing values server['addresses'] = _get_supplemental_addresses(cloud, server) server['public_v4'] = get_server_external_ipv4(cloud, server) or '' # If we're forcing IPv4, then don't report IPv6 interfaces which # are likely to be unconfigured. if cloud.force_ipv4: server['public_v6'] = '' else: server['public_v6'] = get_server_external_ipv6(server) or '' server['private_v4'] = get_server_private_ip(server, cloud) or '' server['interface_ip'] = _get_interface_ip(cloud, server) or '' # Some clouds do not set these, but they're a regular part of the Nova # server record. Since we know them, go ahead and set them. In the case # where they were set previous, we use the values, so this will not break # clouds that provide the information if cloud.private and server.private_v4: server['access_ipv4'] = server['private_v4'] else: server['access_ipv4'] = server['public_v4'] server['access_ipv6'] = server['public_v6'] return server def expand_server_security_groups(cloud, server): try: groups = cloud.list_server_security_groups(server) except exceptions.SDKException: groups = [] server['security_groups'] = groups or [] def get_hostvars_from_server(cloud, server, mounts=None): """Expand additional server information useful for ansible inventory. Variables in this function may make additional cloud queries to flesh out possibly interesting info, making it more expensive to call than expand_server_vars if caching is not set up. If caching is set up, the extra cost should be minimal. """ server_vars = obj_to_munch(add_server_interfaces(cloud, server)) flavor_id = server['flavor'].get('id') if flavor_id: # In newer nova, the flavor record can be kept around for flavors # that no longer exist. The id and name are not there. flavor_name = cloud.get_flavor_name(flavor_id) if flavor_name: server_vars['flavor']['name'] = flavor_name elif 'original_name' in server['flavor']: # Users might be have code still expecting name. That name is in # original_name. server_vars['flavor']['name'] = server['flavor']['original_name'] expand_server_security_groups(cloud, server) # OpenStack can return image as a string when you've booted from volume if str(server['image']) == server['image']: image_id = server['image'] server_vars['image'] = dict(id=image_id) else: image_id = server['image'].get('id', None) if image_id: image_name = cloud.get_image_name(image_id) if image_name: server_vars['image']['name'] = image_name # During the switch to returning sdk resource objects we need temporarily # to force convertion to dict. This will be dropped soon. if hasattr(server_vars['image'], 'to_dict'): server_vars['image'] = server_vars['image'].to_dict(computed=False) volumes = [] if cloud.has_service('volume'): try: for volume in cloud.get_volumes(server): # Make things easier to consume elsewhere volume['device'] = volume['attachments'][0]['device'] volumes.append(volume) except exceptions.SDKException: pass server_vars['volumes'] = volumes if mounts: for mount in mounts: for vol in server_vars['volumes']: if vol['display_name'] == mount['display_name']: if 'mount' in mount: vol['mount'] = mount['mount'] return server_vars def obj_to_munch(obj): """Turn an object with attributes into a dict suitable for serializing. Some of the things that are returned in OpenStack are objects with attributes. That's awesome - except when you want to expose them as JSON structures. We use this as the basis of get_hostvars_from_server above so that we can just have a plain dict of all of the values that exist in the nova metadata for a server. """ if obj is None: return None elif isinstance(obj, utils.Munch) or hasattr(obj, 'mock_add_spec'): # If we obj_to_munch twice, don't fail, just return the munch # Also, don't try to modify Mock objects - that way lies madness return obj elif isinstance(obj, dict): # The new request-id tracking spec: # https://specs.openstack.org/openstack/nova-specs/specs/juno/approved/log-request-id-mappings.html # adds a request-ids attribute to returned objects. It does this even # with dicts, which now become dict subclasses. So we want to convert # the dict we get, but we also want it to fall through to object # attribute processing so that we can also get the request_ids # data into our resulting object. instance = utils.Munch(obj) else: instance = utils.Munch() for key in dir(obj): try: value = getattr(obj, key) # some attributes can be defined as a @property, so we can't assure # to have a valid value # e.g. id in python-novaclient/tree/novaclient/v2/quotas.py except AttributeError: continue if isinstance(value, NON_CALLABLES) and not key.startswith('_'): instance[key] = value return instance obj_to_dict = obj_to_munch def obj_list_to_munch(obj_list): """Enumerate through lists of objects and return lists of dictonaries. Some of the objects returned in OpenStack are actually lists of objects, and in order to expose the data structures as JSON, we need to facilitate the conversion to lists of dictonaries. """ return [obj_to_munch(obj) for obj in obj_list] obj_list_to_dict = obj_list_to_munch def get_and_munchify(key, data): """Get the value associated to key and convert it. The value will be converted in a Munch object or a list of Munch objects based on the type """ result = data.get(key, []) if key else data if isinstance(result, list): return obj_list_to_munch(result) elif isinstance(result, dict): return obj_to_munch(result) return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/openstackcloud.py0000664000175000017500000007143100000000000022722 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import atexit import concurrent.futures import copy import functools import queue import typing as ty import warnings import weakref import dogpile.cache import keystoneauth1.exceptions import requests.models import requestsexceptions from openstack import _log from openstack import _services_mixin from openstack.cloud import _utils from openstack.cloud import meta import openstack.config import openstack.config.cloud_region from openstack import exceptions from openstack import proxy from openstack import resource from openstack import utils from openstack import warnings as os_warnings class _OpenStackCloudMixin(_services_mixin.ServicesMixin): """Represent a connection to an OpenStack Cloud. OpenStackCloud is the entry point for all cloud operations, regardless of which OpenStack service those operations may ultimately come from. The operations on an OpenStackCloud are resource oriented rather than REST API operation oriented. For instance, one will request a Floating IP and that Floating IP will be actualized either via neutron or via nova depending on how this particular cloud has decided to arrange itself. """ _OBJECT_MD5_KEY = 'x-sdk-md5' _OBJECT_SHA256_KEY = 'x-sdk-sha256' _OBJECT_AUTOCREATE_KEY = 'x-sdk-autocreated' _OBJECT_AUTOCREATE_CONTAINER = 'images' # NOTE(shade) shade keys were x-object-meta-x-shade-md5 - we need to check # those in freshness checks so that a shade->sdk transition # doesn't result in a re-upload _SHADE_OBJECT_MD5_KEY = 'x-object-meta-x-shade-md5' _SHADE_OBJECT_SHA256_KEY = 'x-object-meta-x-shade-sha256' _SHADE_OBJECT_AUTOCREATE_KEY = 'x-object-meta-x-shade-autocreated' def __init__( self, cloud=None, config=None, session=None, app_name=None, app_version=None, extra_services=None, strict=False, use_direct_get=None, task_manager=None, rate_limit=None, oslo_conf=None, service_types=None, global_request_id=None, strict_proxies=False, pool_executor=None, **kwargs, ): """Create a connection to a cloud. A connection needs information about how to connect, how to authenticate and how to select the appropriate services to use. The recommended way to provide this information is by referencing a named cloud config from an existing `clouds.yaml` file. The cloud name ``envvars`` may be used to consume a cloud configured via ``OS_`` environment variables. A pre-existing :class:`~openstack.config.cloud_region.CloudRegion` object can be passed in lieu of a cloud name, for cases where the user already has a fully formed CloudRegion and just wants to use it. Similarly, if for some reason the user already has a :class:`~keystoneauth1.session.Session` and wants to use it, it may be passed in. :param str cloud: Name of the cloud from config to use. :param config: CloudRegion object representing the config for the region of the cloud in question. :type config: :class:`~openstack.config.cloud_region.CloudRegion` :param session: A session object compatible with :class:`~keystoneauth1.session.Session`. :type session: :class:`~keystoneauth1.session.Session` :param str app_name: Name of the application to be added to User Agent. :param str app_version: Version of the application to be added to User Agent. :param extra_services: List of :class:`~openstack.service_description.ServiceDescription` objects describing services that openstacksdk otherwise does not know about. :param bool use_direct_get: For get methods, make specific REST calls for server-side filtering instead of making list calls and filtering client-side. Default false. :param task_manager: Ignored. Exists for backwards compat during transition. Rate limit parameters should be passed directly to the `rate_limit` parameter. :param rate_limit: Client-side rate limit, expressed in calls per second. The parameter can either be a single float, or it can be a dict with keys as service-type and values as floats expressing the calls per second for that service. Defaults to None, which means no rate-limiting is performed. :param oslo_conf: An oslo.config CONF object. :type oslo_conf: :class:`~oslo_config.cfg.ConfigOpts` An oslo.config ``CONF`` object that has been populated with ``keystoneauth1.loading.register_adapter_conf_options`` in groups named by the OpenStack service's project name. :param service_types: A list/set of service types this Connection should support. All other service types will be disabled (will error if used). **Currently only supported in conjunction with the ``oslo_conf`` kwarg.** :param global_request_id: A Request-id to send with all interactions. :param strict_proxies: If True, check proxies on creation and raise ServiceDiscoveryException if the service is unavailable. :type strict_proxies: bool Throw an ``openstack.exceptions.ServiceDiscoveryException`` if the endpoint for a given service doesn't work. This is useful for OpenStack services using sdk to talk to other OpenStack services where it can be expected that the deployer config is correct and errors should be reported immediately. Default false. :param pool_executor: :type pool_executor: :class:`~futurist.Executor` A futurist ``Executor`` object to be used for concurrent background activities. Defaults to None in which case a ThreadPoolExecutor will be created if needed. :param kwargs: If a config is not provided, the rest of the parameters provided are assumed to be arguments to be passed to the CloudRegion constructor. """ super().__init__() if use_direct_get is not None: warnings.warn( "The 'use_direct_get' argument is deprecated for removal", os_warnings.RemovedInSDK50Warning, ) self.config = config self._extra_services = {} self._strict_proxies = strict_proxies if extra_services: for service in extra_services: self._extra_services[service.service_type] = service if not self.config: if oslo_conf: self.config = openstack.config.cloud_region.from_conf( oslo_conf, session=session, app_name=app_name, app_version=app_version, service_types=service_types, ) elif session: self.config = openstack.config.cloud_region.from_session( session=session, app_name=app_name, app_version=app_version, load_yaml_config=False, load_envvars=False, rate_limit=rate_limit, **kwargs, ) else: self.config = openstack.config.get_cloud_region( cloud=cloud, app_name=app_name, app_version=app_version, load_yaml_config=cloud is not None, load_envvars=cloud is not None, rate_limit=rate_limit, **kwargs, ) self._session = None self._proxies = {} self.__pool_executor = pool_executor self._global_request_id = global_request_id self.use_direct_get = use_direct_get or False self.strict_mode = strict self.log = _log.setup_logging('openstack') self.name = self.config.name self.auth = self.config.get_auth_args() self.default_interface = self.config.get_interface() self.force_ipv4 = self.config.force_ipv4 (self.verify, self.cert) = self.config.get_requests_verify_args() # Turn off urllib3 warnings about insecure certs if we have # explicitly configured requests to tell it we do not want # cert verification if not self.verify: self.log.debug( "Turning off Insecure SSL warnings since verify=False" ) category = requestsexceptions.InsecureRequestWarning if category: # InsecureRequestWarning references a Warning class or is None warnings.filterwarnings('ignore', category=category) self._disable_warnings = {} cache_expiration_time = int(self.config.get_cache_expiration_time()) cache_class = self.config.get_cache_class() cache_arguments = self.config.get_cache_arguments() self._cache_expirations = dict() if cache_class != 'dogpile.cache.null': self.cache_enabled = True else: self.cache_enabled = False # Uncoditionally create cache even with a "null" backend self._cache = self._make_cache( cache_class, cache_expiration_time, cache_arguments ) expirations = self.config.get_cache_expirations() for expire_key in expirations.keys(): self._cache_expirations[expire_key] = expirations[expire_key] self._api_cache_keys = set() self._container_cache = dict() self._file_hash_cache = dict() self._local_ipv6 = ( _utils.localhost_supports_ipv6() if not self.force_ipv4 else False ) # Register cleanup steps atexit.register(self.close) @property def session(self): if not self._session: self._session = self.config.get_session() # Hide a reference to the connection on the session to help with # backwards compatibility for folks trying to just pass # conn.session to a Resource method's session argument. self.session._sdk_connection = weakref.proxy(self) return self._session @property def _pool_executor(self): if not self.__pool_executor: self.__pool_executor = concurrent.futures.ThreadPoolExecutor( max_workers=5 ) return self.__pool_executor def close(self): """Release any resources held open.""" self.config.set_auth_cache() if self.__pool_executor: self.__pool_executor.shutdown() atexit.unregister(self.close) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def set_global_request_id(self, global_request_id): self._global_request_id = global_request_id def global_request(self, global_request_id): """Make a new Connection object with a global request id set. Take the existing settings from the current Connection and construct a new Connection object with the global_request_id overridden. .. code-block:: python from oslo_context import context cloud = openstack.connect(cloud='example') # Work normally servers = cloud.list_servers() cloud2 = cloud.global_request(context.generate_request_id()) # cloud2 sends all requests with global_request_id set servers = cloud2.list_servers() Additionally, this can be used as a context manager: .. code-block:: python from oslo_context import context c = openstack.connect(cloud='example') # Work normally servers = c.list_servers() with c.global_request(context.generate_request_id()) as c2: # c2 sends all requests with global_request_id set servers = c2.list_servers() :param global_request_id: The `global_request_id` to send. """ params = copy.deepcopy(self.config.config) cloud_region = openstack.config.cloud_region.from_session( session=self.session, app_name=self.config._app_name, app_version=self.config._app_version, discovery_cache=self.session._discovery_cache, **params, ) # Override the cloud name so that logging/location work right cloud_region._name = self.name cloud_region.config['profile'] = self.name # Use self.__class__ so that we return whatever this is, like if it's # a subclass in the case of shade wrapping sdk. new_conn = self.__class__(config=cloud_region) new_conn.set_global_request_id(global_request_id) return new_conn def _make_cache(self, cache_class, expiration_time, arguments): return dogpile.cache.make_region( function_key_generator=self._make_cache_key ).configure( cache_class, expiration_time=expiration_time, arguments=arguments ) def _make_cache_key(self, namespace, fn): fname = fn.__name__ if namespace is None: name_key = self.name else: name_key = f'{self.name}:{namespace}' def generate_key(*args, **kwargs): # TODO(frickler): make handling arg keys actually work arg_key = '' kw_keys = sorted(kwargs.keys()) kwargs_key = ','.join( [f'{k}:{kwargs[k]}' for k in kw_keys if k != 'cache'] ) ans = "_".join([str(name_key), fname, arg_key, kwargs_key]) return ans return generate_key def pprint(self, resource): """Wrapper around pprint that groks munch objects""" # import late since this is a utility function import pprint new_resource = _utils._dictify_resource(resource) pprint.pprint(new_resource) def pformat(self, resource): """Wrapper around pformat that groks munch objects""" # import late since this is a utility function import pprint new_resource = _utils._dictify_resource(resource) return pprint.pformat(new_resource) @property def _keystone_catalog(self): return self.session.auth.get_access(self.session).service_catalog @property def service_catalog(self): return self._keystone_catalog.catalog @property def auth_token(self): # Keystone's session will reuse a token if it is still valid. # We don't need to track validity here, just get_token() each time. return self.session.get_token() @property def current_user_id(self): """Get the id of the currently logged-in user from the token.""" return self.session.auth.get_access(self.session).user_id @property def current_project_id(self): """Get the current project ID. Returns the project_id of the current token scope. None means that the token is domain scoped or unscoped. :raises keystoneauth1.exceptions.auth.AuthorizationFailure: if a new token fetch fails. :raises keystoneauth1.exceptions.auth_plugins.MissingAuthPlugin: if a plugin is not available. """ return self.session.get_project_id() @property def current_project(self): """Return a ``utils.Munch`` describing the current project""" return self._get_project_info() def _get_project_info(self, project_id=None): project_info = utils.Munch( id=project_id, name=None, domain_id=None, domain_name=None, ) if not project_id or project_id == self.current_project_id: # If we don't have a project_id parameter, it means a user is # directly asking what the current state is. # Alternately, if we have one, that means we're calling this # from within a normalize function, which means the object has # a project_id associated with it. If the project_id matches # the project_id of our current token, that means we can supplement # the info with human readable info about names if we have them. # If they don't match, that means we're an admin who has pulled # an object from a different project, so adding info from the # current token would be wrong. auth_args = self.config.config.get('auth', {}) project_info['id'] = self.current_project_id project_info['name'] = auth_args.get('project_name') project_info['domain_id'] = auth_args.get('project_domain_id') project_info['domain_name'] = auth_args.get('project_domain_name') return project_info @property def current_location(self): """Return a ``utils.Munch`` explaining the current cloud location.""" return self._get_current_location() def _get_current_location(self, project_id=None, zone=None): return utils.Munch( cloud=self.name, # TODO(efried): This is wrong, but it only seems to be used in a # repr; can we get rid of it? region_name=self.config.get_region_name(), zone=zone, project=self._get_project_info(project_id), ) def range_search(self, data, filters): """Perform integer range searches across a list of dictionaries. Given a list of dictionaries, search across the list using the given dictionary keys and a range of integer values for each key. Only dictionaries that match ALL search filters across the entire original data set will be returned. It is not a requirement that each dictionary contain the key used for searching. Those without the key will be considered non-matching. The range values must be string values and is either a set of digits representing an integer for matching, or a range operator followed by a set of digits representing an integer for matching. If a range operator is not given, exact value matching will be used. Valid operators are one of: <,>,<=,>= :param data: List of dictionaries to be searched. :param filters: Dict describing the one or more range searches to perform. If more than one search is given, the result will be the members of the original data set that match ALL searches. An example of filtering by multiple ranges:: {"vcpus": "<=5", "ram": "<=2048", "disk": "1"} :returns: A list subset of the original data set. :raises: :class:`~openstack.exceptions.SDKException` on invalid range expressions. """ filtered: ty.List[object] = [] for key, range_value in filters.items(): # We always want to operate on the full data set so that # calculations for minimum and maximum are correct. results = _utils.range_filter(data, key, range_value) if not filtered: # First set of results filtered = results else: # The combination of all searches should be the intersection of # all result sets from each search. So adjust the current set # of filtered data by computing its intersection with the # latest result set. filtered = [r for r in results for f in filtered if r == f] return filtered def _get_and_munchify(self, key, data): """Wrapper around meta.get_and_munchify. Some of the methods expect a `meta` attribute to be passed in as part of the method signature. In those methods the meta param is overriding the meta module making the call to meta.get_and_munchify to fail. """ if isinstance(data, requests.models.Response): data = proxy._json_response(data) return meta.get_and_munchify(key, data) def get_name(self): return self.name def get_session_endpoint(self, service_key, **kwargs): if not kwargs: kwargs = {} try: return self.config.get_session_endpoint(service_key, **kwargs) except keystoneauth1.exceptions.catalog.EndpointNotFound as e: self.log.debug( "Endpoint not found in %s cloud: %s", self.name, str(e) ) endpoint = None except exceptions.SDKException: raise except Exception as e: raise exceptions.SDKException( "Error getting {service} endpoint on {cloud}:{region}: " "{error}".format( service=service_key, cloud=self.name, region=self.config.get_region_name(service_key), error=str(e), ) ) return endpoint def has_service(self, service_key, version=None): if not self.config.has_service(service_key): # TODO(mordred) add a stamp here so that we only report this once if not ( service_key in self._disable_warnings and self._disable_warnings[service_key] ): self.log.debug( "Disabling %(service_key)s entry in catalog per config", {'service_key': service_key}, ) self._disable_warnings[service_key] = True return False try: kwargs = dict() # If a specific version was requested - try it if version is not None: kwargs['min_version'] = version kwargs['max_version'] = version endpoint = self.get_session_endpoint(service_key, **kwargs) except exceptions.SDKException: return False if endpoint: return True else: return False def search_resources( self, resource_type, name_or_id, get_args=None, get_kwargs=None, list_args=None, list_kwargs=None, **filters, ): """Search resources Search resources matching certain conditions :param str resource_type: String representation of the expected resource as `service.resource` (i.e. "network.security_group"). :param str name_or_id: Name or ID of the resource :param list get_args: Optional args to be passed to the _get call. :param dict get_kwargs: Optional kwargs to be passed to the _get call. :param list list_args: Optional args to be passed to the _list call. :param dict list_kwargs: Optional kwargs to be passed to the _list call :param dict filters: Additional filters to be used for querying resources. """ get_args = get_args or () get_kwargs = get_kwargs or {} list_args = list_args or () list_kwargs = list_kwargs or {} # User used string notation. Try to find proper # resource (service_name, resource_name) = resource_type.split('.') if not hasattr(self, service_name): raise exceptions.SDKException( "service %s is not existing/enabled" % service_name ) service_proxy = getattr(self, service_name) try: resource_type = service_proxy._resource_registry[resource_name] except KeyError: raise exceptions.SDKException( "Resource %s is not known in service %s" % (resource_name, service_name) ) if name_or_id: # name_or_id is definitely not None try: resource_by_id = service_proxy._get( resource_type, name_or_id, *get_args, **get_kwargs ) return [resource_by_id] except exceptions.NotFoundException: pass if not filters: filters = {} if name_or_id: filters["name"] = name_or_id list_kwargs.update(filters) return list( service_proxy._list(resource_type, *list_args, **list_kwargs) ) def project_cleanup( self, dry_run=True, wait_timeout=120, status_queue=None, filters=None, resource_evaluation_fn=None, skip_resources=None, ): """Cleanup the project resources. Cleanup all resources in all services, which provide cleanup methods. :param bool dry_run: Cleanup or only list identified resources. :param int wait_timeout: Maximum amount of time given to each service to comlete the cleanup. :param queue status_queue: a threading queue object used to get current process status. The queue contain processed resources. :param dict filters: Additional filters for the cleanup (only resources matching all filters will be deleted, if there are no other dependencies). :param resource_evaluation_fn: A callback function, which will be invoked for each resurce and must return True/False depending on whether resource need to be deleted or not. :param skip_resources: List of specific resources whose cleanup should be skipped. """ dependencies = {} get_dep_fn_name = '_get_cleanup_dependencies' cleanup_fn_name = '_service_cleanup' if not status_queue: status_queue = queue.Queue() for service in self.config.get_enabled_services(): try: if hasattr(self, service): proxy = getattr(self, service) if ( proxy and hasattr(proxy, get_dep_fn_name) and hasattr(proxy, cleanup_fn_name) ): deps = getattr(proxy, get_dep_fn_name)() if deps: dependencies.update(deps) except ( exceptions.NotSupported, exceptions.ServiceDisabledException, ): # Cloud may include endpoint in catalog but not # implement the service or disable it pass dep_graph = utils.TinyDAG() for k, v in dependencies.items(): dep_graph.add_node(k) for dep in v['before']: dep_graph.add_node(dep) dep_graph.add_edge(k, dep) for dep in v.get('after', []): dep_graph.add_edge(dep, k) cleanup_resources: ty.Dict[str, resource.Resource] = {} for service in dep_graph.walk(timeout=wait_timeout): fn = None try: if hasattr(self, service): proxy = getattr(self, service) cleanup_fn = getattr(proxy, cleanup_fn_name, None) if cleanup_fn: fn = functools.partial( cleanup_fn, dry_run=dry_run, client_status_queue=status_queue, identified_resources=cleanup_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, skip_resources=skip_resources, ) except exceptions.ServiceDisabledException: # same reason as above pass if fn: self._pool_executor.submit( cleanup_task, dep_graph, service, fn ) else: dep_graph.node_done(service) for count in utils.iterate_timeout( timeout=wait_timeout, message="Timeout waiting for cleanup to finish", wait=1, ): if dep_graph.is_complete(): return def cleanup_task(graph, service, fn): try: fn() except Exception: log = _log.setup_logging('openstack.project_cleanup') log.exception('Error in the %s cleanup function' % service) finally: graph.node_done(service) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2333076 openstacksdk-4.0.0/openstack/cloud/tests/0000775000175000017500000000000000000000000020466 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/cloud/tests/__init__.py0000664000175000017500000000000000000000000022565 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2373095 openstacksdk-4.0.0/openstack/clustering/0000775000175000017500000000000000000000000020375 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/__init__.py0000664000175000017500000000000000000000000022474 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/clustering_service.py0000664000175000017500000000143100000000000024645 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.clustering.v1 import _proxy from openstack import service_description class ClusteringService(service_description.ServiceDescription): """The clustering service.""" supported_versions = { '1': _proxy.Proxy, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2413113 openstacksdk-4.0.0/openstack/clustering/v1/0000775000175000017500000000000000000000000020723 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/v1/__init__.py0000664000175000017500000000000000000000000023022 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/v1/_async_resource.py0000664000175000017500000000354200000000000024464 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.clustering.v1 import action as _action from openstack import exceptions from openstack import resource class AsyncResource(resource.Resource): def delete(self, session, error_message=None): """Delete the remote resource based on this instance. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :return: An :class:`~openstack.clustering.v1.action.Action` instance. The ``fetch`` method will need to be used to populate the `Action` with status information. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_commit` is not set to ``True``. :raises: :exc:`~openstack.exceptions.NotFoundException` if the resource was not found. """ response = self._raw_delete(session) return self._delete_response(response, error_message) def _delete_response(self, response, error_message=None): exceptions.raise_from_response(response, error_message=error_message) location = response.headers['Location'] action_id = location.split('/')[-1] action = _action.Action.existing( id=action_id, connection=self._connection ) return action ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/v1/_proxy.py0000664000175000017500000013730600000000000022627 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.clustering.v1 import action as _action from openstack.clustering.v1 import build_info from openstack.clustering.v1 import cluster as _cluster from openstack.clustering.v1 import cluster_attr as _cluster_attr from openstack.clustering.v1 import cluster_policy as _cluster_policy from openstack.clustering.v1 import event as _event from openstack.clustering.v1 import node as _node from openstack.clustering.v1 import policy as _policy from openstack.clustering.v1 import policy_type as _policy_type from openstack.clustering.v1 import profile as _profile from openstack.clustering.v1 import profile_type as _profile_type from openstack.clustering.v1 import receiver as _receiver from openstack.clustering.v1 import service as _service from openstack import proxy from openstack import resource class Proxy(proxy.Proxy): _resource_registry = { "action": _action.Action, "build_info": build_info.BuildInfo, "cluster": _cluster.Cluster, "cluster_attr": _cluster_attr.ClusterAttr, "cluster_policy": _cluster_policy.ClusterPolicy, "event": _event.Event, "node": _node.Node, "policy": _policy.Policy, "policy_type": _policy_type.PolicyType, "profile": _profile.Profile, "profile_type": _profile_type.ProfileType, "receiver": _receiver.Receiver, "service": _service.Service, } def get_build_info(self): """Get build info for service engine and API :returns: A dictionary containing the API and engine revision string. """ return self._get(build_info.BuildInfo, requires_id=False) def profile_types(self, **query): """Get a generator of profile types. :returns: A generator of objects that are of type :class:`~openstack.clustering.v1.profile_type.ProfileType` """ return self._list(_profile_type.ProfileType, **query) def get_profile_type(self, profile_type): """Get the details about a profile type. :param profile_type: The name of the profile_type to retrieve or an object of :class:`~openstack.clustering.v1.profile_type.ProfileType`. :returns: A :class:`~openstack.clustering.v1.profile_type.ProfileType` object. :raises: :class:`~openstack.exceptions.NotFoundException` when no profile_type matching the name could be found. """ return self._get(_profile_type.ProfileType, profile_type) def policy_types(self, **query): """Get a generator of policy types. :returns: A generator of objects that are of type :class:`~openstack.clustering.v1.policy_type.PolicyType` """ return self._list(_policy_type.PolicyType, **query) def get_policy_type(self, policy_type): """Get the details about a policy type. :param policy_type: The name of a poicy_type or an object of :class:`~openstack.clustering.v1.policy_type.PolicyType`. :returns: A :class:`~openstack.clustering.v1.policy_type.PolicyType` object. :raises: :class:`~openstack.exceptions.NotFoundException` when no policy_type matching the name could be found. """ return self._get(_policy_type.PolicyType, policy_type) def create_profile(self, **attrs): """Create a new profile from attributes. :param dict attrs: Keyword arguments that will be used to create a :class:`~openstack.clustering.v1.profile.Profile`, it is comprised of the properties on the Profile class. :returns: The results of profile creation. :rtype: :class:`~openstack.clustering.v1.profile.Profile`. """ return self._create(_profile.Profile, **attrs) def delete_profile(self, profile, ignore_missing=True): """Delete a profile. :param profile: The value can be either the name or ID of a profile or a :class:`~openstack.clustering.v1.profile.Profile` instance. :param bool ignore_missing: When set to ``False``, an exception :class:`~openstack.exceptions.NotFoundException` will be raised when the profile could not be found. When set to ``True``, no exception will be raised when attempting to delete a non-existent profile. :returns: ``None`` """ self._delete(_profile.Profile, profile, ignore_missing=ignore_missing) def find_profile(self, name_or_id, ignore_missing=True): """Find a single profile. :param str name_or_id: The name or ID of a profile. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.clustering.v1.profile.Profile` object or None """ return self._find( _profile.Profile, name_or_id, ignore_missing=ignore_missing ) def get_profile(self, profile): """Get a single profile. :param profile: The value can be the name or ID of a profile or a :class:`~openstack.clustering.v1.profile.Profile` instance. :returns: One :class:`~openstack.clustering.v1.profile.Profile` :raises: :class:`~openstack.exceptions.NotFoundException` when no profile matching the criteria could be found. """ return self._get(_profile.Profile, profile) def profiles(self, **query): """Retrieve a generator of profiles. :param kwargs query: Optional query parameters to be sent to restrict the profiles to be returned. Available parameters include: * name: The name of a profile. * type: The type name of a profile. * metadata: A list of key-value pairs that are associated with a profile. * sort: A list of sorting keys separated by commas. Each sorting key can optionally be attached with a sorting direction modifier which can be ``asc`` or ``desc``. * limit: Requests a specified size of returned items from the query. Returns a number of items up to the specified limit value. * marker: Specifies the ID of the last-seen item. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. * global_project: A boolean value indicating whether profiles from all projects will be returned. :returns: A generator of profile instances. """ return self._list(_profile.Profile, **query) def update_profile(self, profile, **attrs): """Update a profile. :param profile: Either the name or the ID of the profile, or an instance of :class:`~openstack.clustering.v1.profile.Profile`. :param attrs: The attributes to update on the profile represented by the ``value`` parameter. :returns: The updated profile. :rtype: :class:`~openstack.clustering.v1.profile.Profile` """ return self._update(_profile.Profile, profile, **attrs) def validate_profile(self, **attrs): """Validate a profile spec. :param dict attrs: Keyword arguments that will be used to create a :class:`~openstack.clustering.v1.profile.ProfileValidate`, it is comprised of the properties on the Profile class. :returns: The results of profile validation. :rtype: :class:`~openstack.clustering.v1.profile.ProfileValidate`. """ return self._create(_profile.ProfileValidate, **attrs) # ====== CLUSTERS ====== def create_cluster(self, **attrs): """Create a new cluster from attributes. :param dict attrs: Keyword arguments that will be used to create a :class:`~openstack.clustering.v1.cluster.Cluster`, it is comprised of the properties on the Cluster class. :returns: The results of cluster creation. :rtype: :class:`~openstack.clustering.v1.cluster.Cluster`. """ return self._create(_cluster.Cluster, **attrs) def delete_cluster(self, cluster, ignore_missing=True, force_delete=False): """Delete a cluster. :param cluster: The value can be either the name or ID of a cluster or a :class:`~openstack.cluster.v1.cluster.Cluster` instance. :param bool ignore_missing: When set to ``False``, an exception :class:`~openstack.exceptions.NotFoundException` will be raised when the cluster could not be found. When set to ``True``, no exception will be raised when attempting to delete a non-existent cluster. :param bool force_delete: When set to ``True``, the cluster deletion will be forced immediately. :returns: The instance of the Cluster which was deleted. :rtype: :class:`~openstack.cluster.v1.cluster.Cluster`. """ if force_delete: server = self._get_resource(_cluster.Cluster, cluster) return server.force_delete(self) else: return self._delete( _cluster.Cluster, cluster, ignore_missing=ignore_missing ) def find_cluster(self, name_or_id, ignore_missing=True): """Find a single cluster. :param str name_or_id: The name or ID of a cluster. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.clustering.v1.cluster.Cluster` object or None """ return self._find( _cluster.Cluster, name_or_id, ignore_missing=ignore_missing ) def get_cluster(self, cluster): """Get a single cluster. :param cluster: The value can be the name or ID of a cluster or a :class:`~openstack.clustering.v1.cluster.Cluster` instance. :returns: One :class:`~openstack.clustering.v1.cluster.Cluster` :raises: :class:`~openstack.exceptions.NotFoundException` when no cluster matching the criteria could be found. """ return self._get(_cluster.Cluster, cluster) def clusters(self, **query): """Retrieve a generator of clusters. :param kwargs query: Optional query parameters to be sent to restrict the clusters to be returned. Available parameters include: * name: The name of a cluster. * status: The current status of a cluster. * sort: A list of sorting keys separated by commas. Each sorting key can optionally be attached with a sorting direction modifier which can be ``asc`` or ``desc``. * limit: Requests a specified size of returned items from the query. Returns a number of items up to the specified limit value. * marker: Specifies the ID of the last-seen item. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. * global_project: A boolean value indicating whether clusters from all projects will be returned. :returns: A generator of cluster instances. """ return self._list(_cluster.Cluster, **query) def update_cluster(self, cluster, **attrs): """Update a cluster. :param cluster: Either the name or the ID of the cluster, or an instance of :class:`~openstack.clustering.v1.cluster.Cluster`. :param attrs: The attributes to update on the cluster represented by the ``cluster`` parameter. :returns: The updated cluster. :rtype: :class:`~openstack.clustering.v1.cluster.Cluster` """ return self._update(_cluster.Cluster, cluster, **attrs) def get_cluster_metadata(self, cluster): """Return a dictionary of metadata for a cluster :param cluster: Either the ID of a cluster or a :class:`~openstack.clustering.v3.cluster.Cluster`. :returns: A :class:`~openstack.clustering.v3.cluster.Cluster` with the cluster's metadata. All keys and values are Unicode text. :rtype: :class:`~openstack.clustering.v3.cluster.Cluster` """ cluster = self._get_resource(_cluster.Cluster, cluster) return cluster.fetch_metadata(self) def set_cluster_metadata(self, cluster, **metadata): """Update metadata for a cluster :param cluster: Either the ID of a cluster or a :class:`~openstack.clustering.v3.cluster.Cluster`. :param kwargs metadata: Key/value pairs to be updated in the cluster's metadata. No other metadata is modified by this call. All keys and values are stored as Unicode. :returns: A :class:`~openstack.clustering.v3.cluster.Cluster` with the cluster's metadata. All keys and values are Unicode text. :rtype: :class:`~openstack.clustering.v3.cluster.Cluster` """ cluster = self._get_resource(_cluster.Cluster, cluster) return cluster.set_metadata(self, metadata=metadata) def delete_cluster_metadata(self, cluster, keys=None): """Delete metadata for a cluster :param cluster: Either the ID of a cluster or a :class:`~openstack.clustering.v3.cluster.Cluster`. :param list keys: The keys to delete. If left empty complete metadata will be removed. :rtype: ``None`` """ cluster = self._get_resource(_cluster.Cluster, cluster) if keys is not None: for key in keys: cluster.delete_metadata_item(self, key) else: cluster.delete_metadata(self) def add_nodes_to_cluster(self, cluster, nodes): """Add nodes to a cluster. :param cluster: Either the name or the ID of the cluster, or an instance of :class:`~openstack.clustering.v1.cluster.Cluster`. :param nodes: List of nodes to be added to the cluster. :returns: A dict containing the action initiated by this operation. """ if isinstance(cluster, _cluster.Cluster): obj = cluster else: obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) return obj.add_nodes(self, nodes) def remove_nodes_from_cluster(self, cluster, nodes, **params): """Remove nodes from a cluster. :param cluster: Either the name or the ID of the cluster, or an instance of :class:`~openstack.clustering.v1.cluster.Cluster`. :param nodes: List of nodes to be removed from the cluster. :param kwargs params: Optional query parameters to be sent to restrict the nodes to be returned. Available parameters include: * destroy_after_deletion: A boolean value indicating whether the deleted nodes to be destroyed right away. :returns: A dict containing the action initiated by this operation. """ if isinstance(cluster, _cluster.Cluster): obj = cluster else: obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) return obj.del_nodes(self, nodes, **params) def replace_nodes_in_cluster(self, cluster, nodes): """Replace the nodes in a cluster with specified nodes. :param cluster: Either the name or the ID of the cluster, or an instance of :class:`~openstack.clustering.v1.cluster.Cluster`. :param nodes: List of nodes to be deleted/added to the cluster. :returns: A dict containing the action initiated by this operation. """ if isinstance(cluster, _cluster.Cluster): obj = cluster else: obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) return obj.replace_nodes(self, nodes) def scale_out_cluster(self, cluster, count=None): """Inflate the size of a cluster. :param cluster: Either the name or the ID of the cluster, or an instance of :class:`~openstack.clustering.v1.cluster.Cluster`. :param count: Optional parameter specifying the number of nodes to be added. :returns: A dict containing the action initiated by this operation. """ if isinstance(cluster, _cluster.Cluster): obj = cluster else: obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) return obj.scale_out(self, count) def scale_in_cluster(self, cluster, count=None): """Shrink the size of a cluster. :param cluster: Either the name or the ID of the cluster, or an instance of :class:`~openstack.clustering.v1.cluster.Cluster`. :param count: Optional parameter specifying the number of nodes to be removed. :returns: A dict containing the action initiated by this operation. """ if isinstance(cluster, _cluster.Cluster): obj = cluster else: obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) return obj.scale_in(self, count) def resize_cluster(self, cluster, **params): """Resize of cluster. :param cluster: Either the name or the ID of the cluster, or an instance of :class:`~openstack.clustering.v1.cluster.Cluster`. :param dict params: A dictionary providing the parameters for the resize action. :returns: A dict containing the action initiated by this operation. """ if isinstance(cluster, _cluster.Cluster): obj = cluster else: obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) return obj.resize(self, **params) def attach_policy_to_cluster(self, cluster, policy, **params): """Attach a policy to a cluster. :param cluster: Either the name or the ID of the cluster, or an instance of :class:`~openstack.clustering.v1.cluster.Cluster`. :param policy: Either the name or the ID of a policy. :param dict params: A dictionary containing the properties for the policy to be attached. :returns: A dict containing the action initiated by this operation. """ if isinstance(cluster, _cluster.Cluster): obj = cluster else: obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) return obj.policy_attach(self, policy, **params) def detach_policy_from_cluster(self, cluster, policy): """Detach a policy from a cluster. :param cluster: Either the name or the ID of the cluster, or an instance of :class:`~openstack.clustering.v1.cluster.Cluster`. :param policy: Either the name or the ID of a policy. :returns: A dict containing the action initiated by this operation. """ if isinstance(cluster, _cluster.Cluster): obj = cluster else: obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) return obj.policy_detach(self, policy) def update_cluster_policy(self, cluster, policy, **params): """Change properties of a policy which is bound to the cluster. :param cluster: Either the name or the ID of the cluster, or an instance of :class:`~openstack.clustering.v1.cluster.Cluster`. :param policy: Either the name or the ID of a policy. :param dict params: A dictionary containing the new properties for the policy. :returns: A dict containing the action initiated by this operation. """ if isinstance(cluster, _cluster.Cluster): obj = cluster else: obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) return obj.policy_update(self, policy, **params) def collect_cluster_attrs(self, cluster, path, **query): """Collect attribute values across a cluster. :param cluster: The value can be either the ID of a cluster or a :class:`~openstack.clustering.v1.cluster.Cluster` instance. :param path: A Json path string specifying the attribute to collect. :param query: Optional query parameters to be sent to limit the resources being returned. :returns: A dictionary containing the list of attribute values. """ return self._list( _cluster_attr.ClusterAttr, cluster_id=cluster, path=path ) def check_cluster(self, cluster, **params): """Check a cluster. :param cluster: The value can be either the ID of a cluster or a :class:`~openstack.clustering.v1.cluster.Cluster` instance. :param dict params: A dictionary providing the parameters for the check action. :returns: A dictionary containing the action ID. """ obj = self._get_resource(_cluster.Cluster, cluster) return obj.check(self, **params) def recover_cluster(self, cluster, **params): """recover a cluster. :param cluster: The value can be either the ID of a cluster or a :class:`~openstack.clustering.v1.cluster.Cluster` instance. :param dict params: A dictionary providing the parameters for the recover action. :returns: A dictionary containing the action ID. """ obj = self._get_resource(_cluster.Cluster, cluster) return obj.recover(self, **params) def perform_operation_on_cluster(self, cluster, operation, **params): """Perform an operation on the specified cluster. :param cluster: The value can be either the ID of a cluster or a :class:`~openstack.clustering.v1.cluster.Cluster` instance. :param operation: A string specifying the operation to be performed. :param dict params: A dictionary providing the parameters for the operation. :returns: A dictionary containing the action ID. """ obj = self._get_resource(_cluster.Cluster, cluster) return obj.op(self, operation, **params) def create_node(self, **attrs): """Create a new node from attributes. :param dict attrs: Keyword arguments that will be used to create a :class:`~openstack.clustering.v1.node.Node`, it is comprised of the properties on the ``Node`` class. :returns: The results of node creation. :rtype: :class:`~openstack.clustering.v1.node.Node`. """ return self._create(_node.Node, **attrs) def delete_node(self, node, ignore_missing=True, force_delete=False): """Delete a node. :param node: The value can be either the name or ID of a node or a :class:`~openstack.cluster.v1.node.Node` instance. :param bool ignore_missing: When set to ``False``, an exception :class:`~openstack.exceptions.NotFoundException` will be raised when the node could not be found. When set to ``True``, no exception will be raised when attempting to delete a non-existent node. :param bool force_delete: When set to ``True``, the node deletion will be forced immediately. :returns: The instance of the Node which was deleted. :rtype: :class:`~openstack.cluster.v1.node.Node`. """ if force_delete: server = self._get_resource(_node.Node, node) return server.force_delete(self) else: return self._delete( _node.Node, node, ignore_missing=ignore_missing ) def find_node(self, name_or_id, ignore_missing=True): """Find a single node. :param str name_or_id: The name or ID of a node. :param bool ignore_missing: When set to "False" :class:`~openstack.exceptions.NotFoundException` will be raised when the specified node does not exist. when set to "True", None will be returned when attempting to find a nonexistent policy :returns: One :class:`~openstack.clustering.v1.node.Node` object or None. """ return self._find( _node.Node, name_or_id, ignore_missing=ignore_missing ) def get_node(self, node, details=False): """Get a single node. :param node: The value can be the name or ID of a node or a :class:`~openstack.clustering.v1.node.Node` instance. :param details: An optional argument that indicates whether the server should return more details when retrieving the node data. :returns: One :class:`~openstack.clustering.v1.node.Node` :raises: :class:`~openstack.exceptions.NotFoundException` when no node matching the name or ID could be found. """ # NOTE: When retrieving node with details (using NodeDetail resource), # the `node_id` is treated as part of the base_path thus a URI # property rather than a resource ID as assumed by the _get() method # in base proxy. if details: return self._get(_node.NodeDetail, requires_id=False, node_id=node) return self._get(_node.Node, node) def nodes(self, **query): """Retrieve a generator of nodes. :param kwargs query: Optional query parameters to be sent to restrict the nodes to be returned. Available parameters include: * cluster_id: A string including the name or ID of a cluster to which the resulted node(s) is a member. * name: The name of a node. * status: The current status of a node. * sort: A list of sorting keys separated by commas. Each sorting key can optionally be attached with a sorting direction modifier which can be ``asc`` or ``desc``. * limit: Requests at most the specified number of items be returned from the query. * marker: Specifies the ID of the last-seen node. Use the limit parameter to make an initial limited request and use the ID of the last-seen node from the response as the marker parameter value in a subsequent limited request. * global_project: A boolean value indicating whether nodes from all projects will be returned. :returns: A generator of node instances. """ return self._list(_node.Node, **query) def update_node(self, node, **attrs): """Update a node. :param node: Either the name or the ID of the node, or an instance of :class:`~openstack.clustering.v1.node.Node`. :param attrs: The attributes to update on the node represented by the ``node`` parameter. :returns: The updated node. :rtype: :class:`~openstack.clustering.v1.node.Node` """ return self._update(_node.Node, node, **attrs) def check_node(self, node, **params): """Check the health of the specified node. :param node: The value can be either the ID of a node or a :class:`~openstack.clustering.v1.node.Node` instance. :param dict params: A dictionary providing the parametes to the check action. :returns: A dictionary containing the action ID. """ obj = self._get_resource(_node.Node, node) return obj.check(self, **params) def recover_node(self, node, **params): """Recover the specified node into healthy status. :param node: The value can be either the ID of a node or a :class:`~openstack.clustering.v1.node.Node` instance. :param dict params: A dict supplying parameters to the recover action. :returns: A dictionary containing the action ID. """ obj = self._get_resource(_node.Node, node) return obj.recover(self, **params) def adopt_node(self, preview=False, **attrs): """Adopting an existing resource as a node. :param preview: A boolean indicating whether this is a "preview" operation which means only the profile to be used is returned rather than creating a node object using that profile. :param dict attrs: Keyword parameters for node adoption. Valid parameters include: * type: (Required) A string containing the profile type and version to be used for node adoption. For example, ``os.nova.sever-1.0``. * identity: (Required) A string including the name or ID of an OpenStack resource to be adopted as a Senlin node. * name: (Optional) The name of node to be created. Omitting this parameter will have the node named automatically. * snapshot: (Optional) A boolean indicating whether a snapshot of the target resource should be created if possible. Default is False. * metadata: (Optional) A dictionary of arbitrary key-value pairs to be associated with the adopted node. * overrides: (Optional) A dictionary of key-value pairs to be used to override attributes derived from the target resource. :returns: The result of node adoption. If `preview` is set to False (default), returns a :class:`~openstack.clustering.v1.node.Node` object, otherwise a Dict is returned containing the profile to be used for the new node. """ node = self._get_resource(_node.Node, None) return node.adopt(self, preview=preview, **attrs) def perform_operation_on_node(self, node, operation, **params): """Perform an operation on the specified node. :param node: The value can be either the ID of a node or a :class:`~openstack.clustering.v1.node.Node` instance. :param operation: A string specifying the operation to be performed. :param dict params: A dictionary providing the parameters for the operation. :returns: A dictionary containing the action ID. """ obj = self._get_resource(_node.Node, node) return obj.op(self, operation, **params) def create_policy(self, **attrs): """Create a new policy from attributes. :param dict attrs: Keyword arguments that will be used to create a :class:`~openstack.clustering.v1.policy.Policy`, it is comprised of the properties on the ``Policy`` class. :returns: The results of policy creation. :rtype: :class:`~openstack.clustering.v1.policy.Policy`. """ return self._create(_policy.Policy, **attrs) def delete_policy(self, policy, ignore_missing=True): """Delete a policy. :param policy: The value can be either the name or ID of a policy or a :class:`~openstack.clustering.v1.policy.Policy` instance. :param bool ignore_missing: When set to ``False``, an exception :class:`~openstack.exceptions.NotFoundException` will be raised when the policy could not be found. When set to ``True``, no exception will be raised when attempting to delete a non-existent policy. :returns: ``None`` """ self._delete(_policy.Policy, policy, ignore_missing=ignore_missing) def find_policy(self, name_or_id, ignore_missing=True): """Find a single policy. :param str name_or_id: The name or ID of a policy. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the specified policy does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent policy. :returns: A policy object or None. :rtype: :class:`~openstack.clustering.v1.policy.Policy` """ return self._find( _policy.Policy, name_or_id, ignore_missing=ignore_missing ) def get_policy(self, policy): """Get a single policy. :param policy: The value can be the name or ID of a policy or a :class:`~openstack.clustering.v1.policy.Policy` instance. :returns: A policy object. :rtype: :class:`~openstack.clustering.v1.policy.Policy` :raises: :class:`~openstack.exceptions.NotFoundException` when no policy matching the criteria could be found. """ return self._get(_policy.Policy, policy) def policies(self, **query): """Retrieve a generator of policies. :param kwargs query: Optional query parameters to be sent to restrict the policies to be returned. Available parameters include: * name: The name of a policy. * type: The type name of a policy. * sort: A list of sorting keys separated by commas. Each sorting key can optionally be attached with a sorting direction modifier which can be ``asc`` or ``desc``. * limit: Requests a specified size of returned items from the query. Returns a number of items up to the specified limit value. * marker: Specifies the ID of the last-seen item. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. * global_project: A boolean value indicating whether policies from all projects will be returned. :returns: A generator of policy instances. """ return self._list(_policy.Policy, **query) def update_policy(self, policy, **attrs): """Update a policy. :param policy: Either the name or the ID of a policy, or an instance of :class:`~openstack.clustering.v1.policy.Policy`. :param attrs: The attributes to update on the policy represented by the ``value`` parameter. :returns: The updated policy. :rtype: :class:`~openstack.clustering.v1.policy.Policy` """ return self._update(_policy.Policy, policy, **attrs) def validate_policy(self, **attrs): """Validate a policy spec. :param dict attrs: Keyword arguments that will be used to create a :class:`~openstack.clustering.v1.policy.PolicyValidate`, it is comprised of the properties on the Policy class. :returns: The results of Policy validation. :rtype: :class:`~openstack.clustering.v1.policy.PolicyValidate`. """ return self._create(_policy.PolicyValidate, **attrs) def cluster_policies(self, cluster, **query): """Retrieve a generator of cluster-policy bindings. :param cluster: The value can be the name or ID of a cluster or a :class:`~openstack.clustering.v1.cluster.Cluster` instance. :param kwargs query: Optional query parameters to be sent to restrict the policies to be returned. Available parameters include: * enabled: A boolean value indicating whether the policy is enabled on the cluster. :returns: A generator of cluster-policy binding instances. """ cluster_id = resource.Resource._get_id(cluster) return self._list( _cluster_policy.ClusterPolicy, cluster_id=cluster_id, **query ) def get_cluster_policy(self, cluster_policy, cluster): """Get a cluster-policy binding. :param cluster_policy: The value can be the name or ID of a policy or a :class:`~openstack.clustering.v1.policy.Policy` instance. :param cluster: The value can be the name or ID of a cluster or a :class:`~openstack.clustering.v1.cluster.Cluster` instance. :returns: a cluster-policy binding object. :rtype: :class:`~openstack.clustering.v1.cluster_policy.CLusterPolicy` :raises: :class:`~openstack.exceptions.NotFoundException` when no cluster-policy binding matching the criteria could be found. """ return self._get( _cluster_policy.ClusterPolicy, cluster_policy, cluster_id=cluster ) def create_receiver(self, **attrs): """Create a new receiver from attributes. :param dict attrs: Keyword arguments that will be used to create a :class:`~openstack.clustering.v1.receiver.Receiver`, it is comprised of the properties on the Receiver class. :returns: The results of receiver creation. :rtype: :class:`~openstack.clustering.v1.receiver.Receiver`. """ return self._create(_receiver.Receiver, **attrs) def update_receiver(self, receiver, **attrs): """Update a receiver. :param receiver: The value can be either the name or ID of a receiver or a :class:`~openstack.clustering.v1.receiver.Receiver` instance. :param attrs: The attributes to update on the receiver parameter. Valid attribute names include ``name``, ``action`` and ``params``. :returns: The updated receiver. :rtype: :class:`~openstack.clustering.v1.receiver.Receiver` """ return self._update(_receiver.Receiver, receiver, **attrs) def delete_receiver(self, receiver, ignore_missing=True): """Delete a receiver. :param receiver: The value can be either the name or ID of a receiver or a :class:`~openstack.clustering.v1.receiver.Receiver` instance. :param bool ignore_missing: When set to ``False``, an exception :class:`~openstack.exceptions.NotFoundException` will be raised when the receiver could not be found. When set to ``True``, no exception will be raised when attempting to delete a non-existent receiver. :returns: ``None`` """ self._delete( _receiver.Receiver, receiver, ignore_missing=ignore_missing ) def find_receiver(self, name_or_id, ignore_missing=True): """Find a single receiver. :param str name_or_id: The name or ID of a receiver. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the specified receiver does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent receiver. :returns: A receiver object or None. :rtype: :class:`~openstack.clustering.v1.receiver.Receiver` """ return self._find( _receiver.Receiver, name_or_id, ignore_missing=ignore_missing ) def get_receiver(self, receiver): """Get a single receiver. :param receiver: The value can be the name or ID of a receiver or a :class:`~openstack.clustering.v1.receiver.Receiver` instance. :returns: A receiver object. :rtype: :class:`~openstack.clustering.v1.receiver.Receiver` :raises: :class:`~openstack.exceptions.NotFoundException` when no receiver matching the criteria could be found. """ return self._get(_receiver.Receiver, receiver) def receivers(self, **query): """Retrieve a generator of receivers. :param kwargs query: Optional query parameters for restricting the receivers to be returned. Available parameters include: * name: The name of a receiver object. * type: The type of receiver objects. * cluster_id: The ID of the associated cluster. * action: The name of the associated action. * sort: A list of sorting keys separated by commas. Each sorting key can optionally be attached with a sorting direction modifier which can be ``asc`` or ``desc``. * global_project: A boolean value indicating whether receivers * from all projects will be returned. :returns: A generator of receiver instances. """ return self._list(_receiver.Receiver, **query) def get_action(self, action): """Get a single action. :param action: The value can be the name or ID of an action or a :class:`~openstack.clustering.v1.action.Action` instance. :returns: an action object. :rtype: :class:`~openstack.clustering.v1.action.Action` :raises: :class:`~openstack.exceptions.NotFoundException` when no action matching the criteria could be found. """ return self._get(_action.Action, action) def actions(self, **query): """Retrieve a generator of actions. :param kwargs query: Optional query parameters to be sent to restrict the actions to be returned. Available parameters include: * name: name of action for query. * target: ID of the target object for which the actions should be returned. * action: built-in action types for query. * sort: A list of sorting keys separated by commas. Each sorting key can optionally be attached with a sorting direction modifier which can be ``asc`` or ``desc``. * limit: Requests a specified size of returned items from the query. Returns a number of items up to the specified limit value. * marker: Specifies the ID of the last-seen item. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. :returns: A generator of action instances. """ return self._list(_action.Action, **query) def update_action(self, action, **attrs): """Update a profile. :param action: Either the ID of the action, or an instance of :class:`~openstack.clustering.v1.action.Action`. :param attrs: The attributes to update on the action represented by the ``value`` parameter. :returns: The updated action. :rtype: :class:`~openstack.clustering.v1.action.Action` """ return self._update(_action.Action, action, **attrs) def get_event(self, event): """Get a single event. :param event: The value can be the name or ID of an event or a :class:`~openstack.clustering.v1.event.Event` instance. :returns: an event object. :rtype: :class:`~openstack.clustering.v1.event.Event` :raises: :class:`~openstack.exceptions.NotFoundException` when no event matching the criteria could be found. """ return self._get(_event.Event, event) def events(self, **query): """Retrieve a generator of events. :param kwargs query: Optional query parameters to be sent to restrict the events to be returned. Available parameters include: * obj_name: name string of the object associated with an event. * obj_type: type string of the object related to an event. The value can be ``cluster``, ``node``, ``policy`` etc. * obj_id: ID of the object associated with an event. * cluster_id: ID of the cluster associated with the event, if any. * action: name of the action associated with an event. * sort: A list of sorting keys separated by commas. Each sorting key can optionally be attached with a sorting direction modifier which can be ``asc`` or ``desc``. * limit: Requests a specified size of returned items from the query. Returns a number of items up to the specified limit value. * marker: Specifies the ID of the last-seen item. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. * global_project: A boolean specifying whether events from all projects should be returned. This option is subject to access control checking. :returns: A generator of event instances. """ return self._list(_event.Event, **query) def wait_for_status( self, res, status, failures=None, interval=2, wait=120 ): """Wait for a resource to be in a particular status. :param res: The resource to wait on to reach the specified status. The resource must have a ``status`` attribute. :type resource: A :class:`~openstack.resource.Resource` object. :param status: Desired status. :param failures: Statuses that would be interpreted as failures. :type failures: :py:class:`list` :param interval: Number of seconds to wait before to consecutive checks. Default to 2. :param wait: Maximum number of seconds to wait before the change. Default to 120. :returns: The resource is returned on success. :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition to the desired status failed to occur in specified seconds. :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource has transited to one of the failure statuses. :raises: :class:`~AttributeError` if the resource does not have a ``status`` attribute. """ failures = [] if failures is None else failures return resource.wait_for_status( self, res, status, failures, interval, wait ) def wait_for_delete(self, res, interval=2, wait=120): """Wait for a resource to be deleted. :param res: The resource to wait on to be deleted. :type resource: A :class:`~openstack.resource.Resource` object. :param interval: Number of seconds to wait before to consecutive checks. Default to 2. :param wait: Maximum number of seconds to wait before the change. Default to 120. :returns: The resource is returned on success. :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition to delete failed to occur in the specified seconds. """ return resource.wait_for_delete(self, res, interval, wait) def services(self, **query): """Get a generator of services. :returns: A generator of objects that are of type :class:`~openstack.clustering.v1.service.Service` """ return self._list(_service.Service, **query) def list_profile_type_operations(self, profile_type): """Get the operation about a profile type. :param profile_type: The name of the profile_type to retrieve or an object of :class:`~openstack.clustering.v1.profile_type.ProfileType`. :returns: A :class:`~openstack.clustering.v1.profile_type.ProfileType` object. :raises: :class:`~openstack.exceptions.NotFoundException` when no profile_type matching the name could be found. """ obj = self._get_resource(_profile_type.ProfileType, profile_type) return obj.type_ops(self) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/v1/action.py0000664000175000017500000000577600000000000022571 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Action(resource.Resource): resource_key = 'action' resources_key = 'actions' base_path = '/actions' # Capabilities allow_list = True allow_fetch = True allow_commit = True commit_method = 'PATCH' _query_mapping = resource.QueryParameters( 'name', 'action', 'status', 'sort', 'global_project', 'cluster_id', target_id='target', ) # Properties #: Name of the action. name = resource.Body('name') #: ID of the target object, which can be a cluster or a node. target_id = resource.Body('target') #: Built-in type name of action. action = resource.Body('action') #: A string representation of the reason why the action was created. cause = resource.Body('cause') #: The owning engine that is currently running the action. owner_id = resource.Body('owner') #: The ID of the user who created this action. user_id = resource.Body('user') #: The ID of the project this profile belongs to. project_id = resource.Body('project') #: The domain ID of the action. domain_id = resource.Body('domain') #: Interval in seconds between two consecutive executions. interval = resource.Body('interval') #: The time the action was started. start_at = resource.Body('start_time') #: The time the action completed execution. end_at = resource.Body('end_time') #: The timeout in seconds. timeout = resource.Body('timeout') #: Current status of the action. status = resource.Body('status') #: A string describing the reason that brought the action to its current # status. status_reason = resource.Body('status_reason') #: A dictionary containing the inputs to the action. inputs = resource.Body('inputs', type=dict) #: A dictionary containing the outputs to the action. outputs = resource.Body('outputs', type=dict) #: A list of actions that must finish before this action starts execution. depends_on = resource.Body('depends_on', type=list) #: A list of actions that can start only after this action has finished. depended_by = resource.Body('depended_by', type=list) #: Timestamp when the action is created. created_at = resource.Body('created_at') #: Timestamp when the action was last updated. updated_at = resource.Body('updated_at') #: The ID of cluster which this action runs on. cluster_id = resource.Body('cluster_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/v1/build_info.py0000664000175000017500000000163000000000000023407 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class BuildInfo(resource.Resource): base_path = '/build-info' resource_key = 'build_info' # Capabilities allow_fetch = True # Properties #: String representation of the API build version api = resource.Body('api') #: String representation of the engine build version engine = resource.Body('engine') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/v1/cluster.py0000664000175000017500000001472000000000000022762 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.clustering.v1 import _async_resource from openstack.common import metadata from openstack import resource from openstack import utils class Cluster(_async_resource.AsyncResource, metadata.MetadataMixin): resource_key = 'cluster' resources_key = 'clusters' base_path = '/clusters' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True commit_method = 'PATCH' _query_mapping = resource.QueryParameters( 'name', 'status', 'sort', 'global_project' ) # Properties #: The name of the cluster. name = resource.Body('name') #: The ID of the profile used by this cluster. profile_id = resource.Body('profile_id') #: The ID of the user who created this cluster, thus the owner of it. user_id = resource.Body('user') #: The ID of the project this cluster belongs to. project_id = resource.Body('project') #: The domain ID of the cluster owner. domain_id = resource.Body('domain') #: Timestamp of when the cluster was initialized. #: *Type: datetime object parsed from ISO 8601 formatted string* init_at = resource.Body('init_at') #: Timestamp of when the cluster was created. #: *Type: datetime object parsed from ISO 8601 formatted string* created_at = resource.Body('created_at') #: Timestamp of when the cluster was last updated. #: *Type: datetime object parsed from ISO 8601 formatted string* updated_at = resource.Body('updated_at') #: Lower bound (inclusive) for the size of the cluster. min_size = resource.Body('min_size', type=int) #: Upper bound (inclusive) for the size of the cluster. A value of #: -1 indicates that there is no upper limit of cluster size. max_size = resource.Body('max_size', type=int) #: Desired capacity for the cluster. A cluster would be created at the #: scale specified by this value. desired_capacity = resource.Body('desired_capacity', type=int) #: Default timeout (in seconds) for cluster operations. timeout = resource.Body('timeout') #: A string representation of the cluster status. status = resource.Body('status') #: A string describing the reason why the cluster in current status. status_reason = resource.Body('status_reason') #: A dictionary configuration for cluster. config = resource.Body('config', type=dict) #: A collection of key-value pairs that are attached to the cluster. metadata = resource.Body('metadata', type=dict) #: A dictionary with some runtime data associated with the cluster. data = resource.Body('data', type=dict) #: A list IDs of nodes that are members of the cluster. node_ids = resource.Body('nodes') #: Name of the profile used by the cluster. profile_name = resource.Body('profile_name') #: Specify whether the cluster update should only pertain to the profile. is_profile_only = resource.Body('profile_only', type=bool) #: A dictionary with dependency information of the cluster dependents = resource.Body('dependents', type=dict) def action(self, session, body): url = utils.urljoin(self.base_path, self._get_id(self), 'actions') resp = session.post(url, json=body) return resp.json() def add_nodes(self, session, nodes): body = { 'add_nodes': { 'nodes': nodes, } } return self.action(session, body) def del_nodes(self, session, nodes, **params): data = {'nodes': nodes} data.update(params) body = {'del_nodes': data} return self.action(session, body) def replace_nodes(self, session, nodes): body = { 'replace_nodes': { 'nodes': nodes, } } return self.action(session, body) def scale_out(self, session, count=None): body = { 'scale_out': { 'count': count, } } return self.action(session, body) def scale_in(self, session, count=None): body = { 'scale_in': { 'count': count, } } return self.action(session, body) def resize(self, session, **params): body = {'resize': params} return self.action(session, body) def policy_attach(self, session, policy_id, **params): data = {'policy_id': policy_id} data.update(params) body = {'policy_attach': data} return self.action(session, body) def policy_detach(self, session, policy_id): body = { 'policy_detach': { 'policy_id': policy_id, } } return self.action(session, body) def policy_update(self, session, policy_id, **params): data = {'policy_id': policy_id} data.update(params) body = {'policy_update': data} return self.action(session, body) def check(self, session, **params): body = {'check': params} return self.action(session, body) def recover(self, session, **params): body = {'recover': params} return self.action(session, body) def op(self, session, operation, **params): """Perform an operation on the cluster. :param session: A session object used for sending request. :param operation: A string representing the operation to be performed. :param dict params: An optional dict providing the parameters for the operation. :returns: A dictionary containing the action ID. """ url = utils.urljoin(self.base_path, self.id, 'ops') resp = session.post(url, json={operation: params}) return resp.json() def force_delete(self, session): """Force delete a cluster.""" body = {'force': True} url = utils.urljoin(self.base_path, self.id) response = session.delete(url, json=body) return self._delete_response(response) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/v1/cluster_attr.py0000664000175000017500000000214100000000000024006 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ClusterAttr(resource.Resource): resources_key = 'cluster_attributes' base_path = '/clusters/%(cluster_id)s/attrs/%(path)s' # capabilities allow_list = True # Properties #: The identity of the cluster cluster_id = resource.URI('cluster_id') #: The json path string for attribute retrieval path = resource.URI('path') #: The id of the node that carries the attribute value. node_id = resource.Body('id') #: The value of the attribute requested. attr_value = resource.Body('value') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/v1/cluster_policy.py0000664000175000017500000000306500000000000024341 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ClusterPolicy(resource.Resource): resource_key = 'cluster_policy' resources_key = 'cluster_policies' base_path = '/clusters/%(cluster_id)s/policies' # Capabilities allow_list = True allow_fetch = True _query_mapping = resource.QueryParameters( 'sort', 'policy_name', 'policy_type', is_enabled='enabled' ) # Properties #: ID of the policy object. policy_id = resource.Body('policy_id', alternate_id=True) #: Name of the policy object. policy_name = resource.Body('policy_name') #: ID of the cluster object. cluster_id = resource.URI('cluster_id') #: Name of the cluster object. cluster_name = resource.Body('cluster_name') #: Type string of the policy. policy_type = resource.Body('policy_type') #: Whether the policy is enabled on the cluster. *Type: bool* is_enabled = resource.Body('enabled', type=bool) #: Data associated with the cluster-policy binding. data = resource.Body('data', type=dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/v1/event.py0000664000175000017500000000414200000000000022417 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Event(resource.Resource): resource_key = 'event' resources_key = 'events' base_path = '/events' # Capabilities allow_list = True allow_fetch = True _query_mapping = resource.QueryParameters( 'cluster_id', 'action', 'level', 'sort', 'global_project', obj_id='oid', obj_name='oname', obj_type='otype', ) # Properties #: Timestamp string (in ISO8601 format) when the event was generated. generated_at = resource.Body('timestamp') #: The UUID of the object related to this event. obj_id = resource.Body('oid') #: The name of the object related to this event. obj_name = resource.Body('oname') #: The type name of the object related to this event. obj_type = resource.Body('otype') #: The UUID of the cluster related to this event, if any. cluster_id = resource.Body('cluster_id') #: The event level (priority). level = resource.Body('level') #: The ID of the user. user_id = resource.Body('user') #: The ID of the project (tenant). project_id = resource.Body('project') #: The string representation of the action associated with the event. action = resource.Body('action') #: The status of the associated object. status = resource.Body('status') #: A string description of the reason that brought the object into its #: current status. status_reason = resource.Body('status_reason') #: The metadata of an event object. meta_data = resource.Body('meta_data') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/v1/node.py0000664000175000017500000001465100000000000022231 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.clustering.v1 import _async_resource from openstack import resource from openstack import utils class Node(_async_resource.AsyncResource): resource_key = 'node' resources_key = 'nodes' base_path = '/nodes' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True commit_method = 'PATCH' _query_mapping = resource.QueryParameters( 'show_details', 'name', 'sort', 'global_project', 'cluster_id', 'status', ) # Properties #: The name of the node. name = resource.Body('name') #: The ID of the physical object that backs the node. physical_id = resource.Body('physical_id') #: The ID of the cluster in which this node is a member. #: A node is an orphan node if this field is empty. cluster_id = resource.Body('cluster_id') #: The ID of the profile used by this node. profile_id = resource.Body('profile_id') #: The domain ID of the node. domain_id = resource.Body('domain') #: The ID of the user who created this node. user_id = resource.Body('user') #: The ID of the project this node belongs to. project_id = resource.Body('project') #: The name of the profile used by this node. profile_name = resource.Body('profile_name') #: An integer that is unique inside the owning cluster. #: A value of -1 means this node is an orphan node. index = resource.Body('index', type=int) #: A string indicating the role the node plays in a cluster. role = resource.Body('role') #: The timestamp of the node object's initialization. #: *Type: datetime object parsed from ISO 8601 formatted string* init_at = resource.Body('init_at') #: The timestamp of the node's creation, i.e. the physical object #: represented by this node is also created. #: *Type: datetime object parsed from ISO 8601 formatted string* created_at = resource.Body('created_at') #: The timestamp the node was last updated. #: *Type: datetime object parsed from ISO 8601 formatted string* updated_at = resource.Body('updated_at') #: A string indicating the node's status. status = resource.Body('status') #: A string describing why the node entered its current status. status_reason = resource.Body('status_reason') #: A map containing key-value pairs attached to the node. metadata = resource.Body('metadata', type=dict) #: A map containing some runtime data for this node. data = resource.Body('data', type=dict) #: A map containing the details of the physical object this node #: represents details = resource.Body('details', type=dict) #: A map containing the dependency of nodes dependents = resource.Body('dependents', type=dict) #: Whether the node is tainted. *Type: bool* tainted = resource.Body('tainted', type=bool) def _action(self, session, body): """Procedure the invoke an action API. :param session: A session object used for sending request. :param body: The body of action to be sent. """ url = utils.urljoin(self.base_path, self.id, 'actions') resp = session.post(url, json=body) return resp.json() def check(self, session, **params): """An action procedure for the node to check its health status. :param session: A session object used for sending request. :returns: A dictionary containing the action ID. """ body = {'check': params} return self._action(session, body) def recover(self, session, **params): """An action procedure for the node to recover. :param session: A session object used for sending request. :returns: A dictionary containing the action ID. """ body = {'recover': params} return self._action(session, body) def op(self, session, operation, **params): """Perform an operation on the specified node. :param session: A session object used for sending request. :param operation: A string representing the operation to be performed. :param dict params: An optional dict providing the parameters for the operation. :returns: A dictionary containing the action ID. """ url = utils.urljoin(self.base_path, self.id, 'ops') resp = session.post(url, json={operation: params}) return resp.json() def adopt(self, session, preview=False, **params): """Adopt a node for management. :param session: A session object used for sending request. :param preview: A boolean indicating whether the adoption is a preview. A "preview" does not create the node object. :param dict params: A dict providing the details of a node to be adopted. """ if preview: path = 'adopt-preview' attrs = { 'identity': params.get('identity'), 'overrides': params.get('overrides'), 'type': params.get('type'), 'snapshot': params.get('snapshot'), } else: path = 'adopt' attrs = params url = utils.urljoin(self.base_path, path) resp = session.post(url, json=attrs) if preview: return resp.json() self._translate_response(resp) return self def force_delete(self, session): """Force delete a node.""" body = {'force': True} url = utils.urljoin(self.base_path, self.id) response = session.delete(url, json=body) return self._delete_response(response) class NodeDetail(Node): base_path = '/nodes/%(node_id)s?show_details=True' allow_create = False allow_fetch = True allow_commit = False allow_delete = False allow_list = False node_id = resource.URI('node_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/v1/policy.py0000664000175000017500000000371500000000000022602 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Policy(resource.Resource): resource_key = 'policy' resources_key = 'policies' base_path = '/policies' # Capabilities allow_list = True allow_fetch = True allow_create = True allow_delete = True allow_commit = True commit_method = 'PATCH' _query_mapping = resource.QueryParameters( 'name', 'type', 'sort', 'global_project' ) # Properties #: The name of the policy. name = resource.Body('name') #: The type name of the policy. type = resource.Body('type') #: The ID of the project this policy belongs to. project_id = resource.Body('project') # The domain ID of the policy. domain_id = resource.Body('domain') #: The ID of the user who created this policy. user_id = resource.Body('user') #: The timestamp when the policy is created. created_at = resource.Body('created_at') #: The timestamp when the policy was last updated. updated_at = resource.Body('updated_at') #: The specification of the policy. spec = resource.Body('spec', type=dict) #: A dictionary containing runtime data of the policy. data = resource.Body('data', type=dict) class PolicyValidate(Policy): base_path = '/policies/validate' # Capabilities allow_list = False allow_fetch = False allow_create = True allow_delete = False allow_commit = False commit_method = 'PUT' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/v1/policy_type.py0000664000175000017500000000203700000000000023637 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class PolicyType(resource.Resource): resource_key = 'policy_type' resources_key = 'policy_types' base_path = '/policy-types' # Capabilities allow_list = True allow_fetch = True # Properties #: Name of policy type. name = resource.Body('name', alternate_id=True) #: The schema of the policy type. schema = resource.Body('schema') #: The support status of the policy type support_status = resource.Body('support_status') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/v1/profile.py0000664000175000017500000000371400000000000022742 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Profile(resource.Resource): resource_key = 'profile' resources_key = 'profiles' base_path = '/profiles' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True commit_method = 'PATCH' _query_mapping = resource.QueryParameters( 'sort', 'global_project', 'type', 'name' ) # Bodyerties #: The name of the profile name = resource.Body('name') #: The type of the profile. type = resource.Body('type') #: The ID of the project this profile belongs to. project_id = resource.Body('project') #: The domain ID of the profile. domain_id = resource.Body('domain') #: The ID of the user who created this profile. user_id = resource.Body('user') #: The spec of the profile. spec = resource.Body('spec', type=dict) #: A collection of key-value pairs that are attached to the profile. metadata = resource.Body('metadata', type=dict) #: Timestamp of when the profile was created. created_at = resource.Body('created_at') #: Timestamp of when the profile was last updated. updated_at = resource.Body('updated_at') class ProfileValidate(Profile): base_path = '/profiles/validate' allow_create = True allow_fetch = False allow_commit = False allow_delete = False allow_list = False commit_method = 'PUT' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/v1/profile_type.py0000664000175000017500000000233700000000000024003 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource from openstack import utils class ProfileType(resource.Resource): resource_key = 'profile_type' resources_key = 'profile_types' base_path = '/profile-types' # Capabilities allow_list = True allow_fetch = True # Properties #: Name of the profile type. name = resource.Body('name', alternate_id=True) #: The schema of the profile type. schema = resource.Body('schema') #: The support status of the profile type support_status = resource.Body('support_status') def type_ops(self, session): url = utils.urljoin(self.base_path, self.id, 'ops') resp = session.get(url) return resp.json() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/v1/receiver.py0000664000175000017500000000433000000000000023101 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Receiver(resource.Resource): resource_key = 'receiver' resources_key = 'receivers' base_path = '/receivers' # Capabilities allow_list = True allow_fetch = True allow_create = True allow_commit = True allow_delete = True commit_method = 'PATCH' _query_mapping = resource.QueryParameters( 'name', 'type', 'cluster_id', 'action', 'sort', 'global_project', user_id='user', ) # Properties #: The name of the receiver. name = resource.Body('name') #: The type of the receiver. type = resource.Body('type') #: The ID of the user who created the receiver, thus the owner of it. user_id = resource.Body('user') #: The ID of the project this receiver belongs to. project_id = resource.Body('project') #: The domain ID of the receiver. domain_id = resource.Body('domain') #: The ID of the targeted cluster. cluster_id = resource.Body('cluster_id') #: The name of the targeted action. action = resource.Body('action') #: Timestamp of when the receiver was created. created_at = resource.Body('created_at') #: Timestamp of when the receiver was last updated. updated_at = resource.Body('updated_at') #: The credential of the impersonated user. actor = resource.Body('actor', type=dict) #: A dictionary containing key-value pairs that are provided to the #: targeted action. params = resource.Body('params', type=dict) #: The information about the channel through which you can trigger the #: receiver hence the associated action. channel = resource.Body('channel', type=dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/v1/service.py0000664000175000017500000000235700000000000022744 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Service(resource.Resource): resource_key = 'service' resources_key = 'services' base_path = '/services' # Capabilities allow_list = True # Properties #: Status of service status = resource.Body('status') #: State of service state = resource.Body('state') #: Name of service binary = resource.Body('binary') #: Disabled reason of service disabled_reason = resource.Body('disabled_reason') #: Host where service runs host = resource.Body('host') #: The timestamp the service was last updated. #: *Type: datetime object parsed from ISO 8601 formatted string* updated_at = resource.Body('updated_at') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/clustering/version.py0000664000175000017500000000147500000000000022443 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Version(resource.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' # capabilities allow_list = True # Properties links = resource.Body('links') status = resource.Body('status') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2413113 openstacksdk-4.0.0/openstack/common/0000775000175000017500000000000000000000000017506 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/common/__init__.py0000664000175000017500000000000000000000000021605 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/common/metadata.py0000664000175000017500000001173600000000000021650 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class MetadataMixin: id: resource.Body base_path: str _body: resource._ComponentManager #: *Type: list of tag strings* metadata = resource.Body('metadata', type=dict) def fetch_metadata(self, session): """Lists metadata set on the entity. :param session: The session to use for making this request. :return: The dictionary with metadata attached to the entity """ url = utils.urljoin(self.base_path, self.id, 'metadata') response = session.get(url) exceptions.raise_from_response(response) json = response.json() if 'metadata' in json: self._body.attributes.update({'metadata': json['metadata']}) return self def set_metadata(self, session, metadata=None, replace=False): """Sets/Replaces metadata key value pairs on the resource. :param session: The session to use for making this request. :param dict metadata: Dictionary with key-value pairs :param bool replace: Replace all resource metadata with the new object or merge new and existing. """ url = utils.urljoin(self.base_path, self.id, 'metadata') if not metadata: metadata = {} if not replace: response = session.post(url, json={'metadata': metadata}) else: response = session.put(url, json={'metadata': metadata}) exceptions.raise_from_response(response) self._body.attributes.update({'metadata': metadata}) return self def replace_metadata(self, session, metadata=None): """Replaces all metadata key value pairs on the resource. :param session: The session to use for making this request. :param dict metadata: Dictionary with key-value pairs :param bool replace: Replace all resource metadata with the new object or merge new and existing. """ return self.set_metadata(session, metadata, replace=True) def delete_metadata(self, session): """Removes all metadata on the entity. :param session: The session to use for making this request. """ self.set_metadata(session, None, replace=True) return self def get_metadata_item(self, session, key): """Get the single metadata item on the entity. If the metadata key does not exist a 404 will be returned :param session: The session to use for making this request. :param str key: The key of a metadata item. """ url = utils.urljoin(self.base_path, self.id, 'metadata', key) response = session.get(url) exceptions.raise_from_response( response, error_message='Metadata item does not exist' ) meta = response.json().get('meta', {}) # Here we need to potentially init metadata metadata = self.metadata or {} metadata[key] = meta.get(key) self._body.attributes.update({'metadata': metadata}) return self def set_metadata_item(self, session, key, value): """Create or replace single metadata item to the resource. :param session: The session to use for making this request. :param str key: The key for the metadata item. :param str value: The value. """ url = utils.urljoin(self.base_path, self.id, 'metadata', key) response = session.put(url, json={'meta': {key: value}}) exceptions.raise_from_response(response) # we do not want to update tags directly metadata = self.metadata metadata[key] = value self._body.attributes.update({'metadata': metadata}) return self def delete_metadata_item(self, session, key): """Removes a single metadata item from the specified resource. :param session: The session to use for making this request. :param str key: The key as a string. """ url = utils.urljoin(self.base_path, self.id, 'metadata', key) response = session.delete(url) exceptions.raise_from_response(response) # we do not want to update tags directly metadata = self.metadata try: if metadata: metadata.pop(key) else: metadata = {} except ValueError: pass # do nothing! self._body.attributes.update({'metadata': metadata}) return self ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/common/quota_set.py0000664000175000017500000001165100000000000022070 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty from openstack import exceptions from openstack import resource # ATTENTION: Please do not inherit this class for anything else then QuotaSet, # since attribute processing is here very different! class QuotaSet(resource.Resource): resource_key = 'quota_set' # ATTENTION: different services might be using different base_path base_path = '/os-quota-sets/%(project_id)s' # capabilities allow_create = True allow_fetch = True allow_delete = True allow_commit = True _query_mapping = resource.QueryParameters("usage") # NOTE(gtema) Sadly this attribute is useless in all the methods, but keep # it here extra as a reminder requires_id = False # Quota-sets are not very well designed. We must keep what is # there and try to process it on best effort _allow_unknown_attrs_in_body = True #: Properties #: Current reservations #: *type:dict* reservation = resource.Body('reservation', type=dict) #: Quota usage #: *type:dict* usage = resource.Body('usage', type=dict) project_id = resource.URI('project_id') def fetch( self, session, requires_id=False, base_path=None, error_message=None, **params ): return super().fetch( session, requires_id=False, base_path=base_path, error_message=error_message, **params ) def _translate_response(self, response, has_body=None, error_message=None): """Given a KSA response, inflate this instance with its data DELETE operations don't return a body, so only try to work with a body when has_body is True. This method updates attributes that correspond to headers and body on this instance and clears the dirty set. """ if has_body is None: has_body = self.has_body exceptions.raise_from_response(response, error_message=error_message) if has_body: try: body = response.json() if self.resource_key and self.resource_key in body: body = body[self.resource_key] # Do not allow keys called "self" through. Glance chose # to name a key "self", so we need to pop it out because # we can't send it through cls.existing and into the # Resource initializer. "self" is already the first # argument and is practically a reserved word. body.pop("self", None) # Process body_attrs to strip usage and reservation out normalized_attrs: ty.Dict[str, ty.Any] = dict( reservation={}, usage={}, ) for key, val in body.items(): if isinstance(val, dict): if 'in_use' in val: normalized_attrs['usage'][key] = val['in_use'] if 'reserved' in val: normalized_attrs['reservation'][key] = val[ 'reserved' ] if 'limit' in val: normalized_attrs[key] = val['limit'] else: normalized_attrs[key] = val self._unknown_attrs_in_body.update(normalized_attrs) self._body.attributes.update(normalized_attrs) self._body.clean() if self.commit_jsonpatch or self.allow_patch: # We need the original body to compare against self._original_body = normalized_attrs.copy() except ValueError: # Server returned not parsable response (202, 204, etc) # Do simply nothing pass headers = self._consume_header_attrs(response.headers) self._header.attributes.update(headers) self._header.clean() self._update_location() dict.update(self, self.to_dict()) def _prepare_request_body(self, patch, prepend_key): body = self._body.dirty # Ensure we never try to send meta props reservation and usage body.pop('reservation', None) body.pop('usage', None) if prepend_key and self.resource_key is not None: body = {self.resource_key: body} return body ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/common/tag.py0000664000175000017500000001112500000000000020633 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class TagMixin: id: resource.Body base_path: str _body: resource._ComponentManager @classmethod def _get_session(cls, session): ... _tag_query_parameters = { 'tags': 'tags', 'any_tags': 'tags-any', 'not_tags': 'not-tags', 'not_any_tags': 'not-tags-any', } #: A list of associated tags #: *Type: list of tag strings* tags = resource.Body('tags', type=list, default=[]) def fetch_tags(self, session): """Lists tags set on the entity. :param session: The session to use for making this request. :return: The list with tags attached to the entity """ url = utils.urljoin(self.base_path, self.id, 'tags') session = self._get_session(session) response = session.get(url) exceptions.raise_from_response(response) # NOTE(gtema): since this is a common method # we can't rely on the resource_key, because tags are returned # without resource_key. Do parse response here json = response.json() if 'tags' in json: self._body.attributes.update({'tags': json['tags']}) return self def set_tags(self, session, tags=[]): """Sets/Replaces all tags on the resource. :param session: The session to use for making this request. :param list tags: List with tags to be set on the resource """ url = utils.urljoin(self.base_path, self.id, 'tags') session = self._get_session(session) response = session.put(url, json={'tags': tags}) exceptions.raise_from_response(response) self._body.attributes.update({'tags': tags}) return self def remove_all_tags(self, session): """Removes all tags on the entity. :param session: The session to use for making this request. """ url = utils.urljoin(self.base_path, self.id, 'tags') session = self._get_session(session) response = session.delete(url) exceptions.raise_from_response(response) self._body.attributes.update({'tags': []}) return self def check_tag(self, session, tag): """Checks if tag exists on the entity. If the tag does not exist a 404 will be returned :param session: The session to use for making this request. :param tag: The tag as a string. """ url = utils.urljoin(self.base_path, self.id, 'tags', tag) session = self._get_session(session) response = session.get(url) exceptions.raise_from_response( response, error_message='Tag does not exist' ) return self def add_tag(self, session, tag): """Adds a single tag to the resource. :param session: The session to use for making this request. :param tag: The tag as a string. """ url = utils.urljoin(self.base_path, self.id, 'tags', tag) session = self._get_session(session) response = session.put(url) exceptions.raise_from_response(response) # we do not want to update tags directly tags = self.tags tags.append(tag) self._body.attributes.update({'tags': tags}) return self def remove_tag(self, session, tag): """Removes a single tag from the specified resource. :param session: The session to use for making this request. :param tag: The tag as a string. """ url = utils.urljoin(self.base_path, self.id, 'tags', tag) session = self._get_session(session) response = session.delete(url) exceptions.raise_from_response(response) # we do not want to update tags directly tags = self.tags try: # NOTE(gtema): if tags were not fetched, but request suceeded # it is ok. Just ensure tag does not exist locally tags.remove(tag) except ValueError: pass # do nothing! self._body.attributes.update({'tags': tags}) return self ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2413113 openstacksdk-4.0.0/openstack/compute/0000775000175000017500000000000000000000000017672 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/__init__.py0000664000175000017500000000000000000000000021771 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/compute_service.py0000664000175000017500000000142000000000000023435 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.compute.v2 import _proxy from openstack import service_description class ComputeService(service_description.ServiceDescription): """The compute service.""" supported_versions = { '2': _proxy.Proxy, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2453132 openstacksdk-4.0.0/openstack/compute/v2/0000775000175000017500000000000000000000000020221 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/__init__.py0000664000175000017500000000000000000000000022320 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/_proxy.py0000664000175000017500000032007000000000000022115 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import warnings from openstack.block_storage.v3 import volume as _volume from openstack.compute.v2 import aggregate as _aggregate from openstack.compute.v2 import availability_zone from openstack.compute.v2 import extension from openstack.compute.v2 import flavor as _flavor from openstack.compute.v2 import hypervisor as _hypervisor from openstack.compute.v2 import image as _image from openstack.compute.v2 import keypair as _keypair from openstack.compute.v2 import limits from openstack.compute.v2 import migration as _migration from openstack.compute.v2 import quota_class_set as _quota_class_set from openstack.compute.v2 import quota_set as _quota_set from openstack.compute.v2 import server as _server from openstack.compute.v2 import server_action as _server_action from openstack.compute.v2 import server_diagnostics as _server_diagnostics from openstack.compute.v2 import server_group as _server_group from openstack.compute.v2 import server_interface as _server_interface from openstack.compute.v2 import server_ip from openstack.compute.v2 import server_migration as _server_migration from openstack.compute.v2 import server_remote_console as _src from openstack.compute.v2 import service as _service from openstack.compute.v2 import usage as _usage from openstack.compute.v2 import volume_attachment as _volume_attachment from openstack import exceptions from openstack.identity.v3 import project as _project from openstack.identity.v3 import user as _user from openstack.network.v2 import security_group as _sg from openstack import proxy from openstack import resource from openstack import utils from openstack import warnings as os_warnings class Proxy(proxy.Proxy): _resource_registry = { "aggregate": _aggregate.Aggregate, "availability_zone": availability_zone.AvailabilityZone, "extension": extension.Extension, "flavor": _flavor.Flavor, "hypervisor": _hypervisor.Hypervisor, "image": _image.Image, "keypair": _keypair.Keypair, "limits": limits.Limits, "migration": _migration.Migration, "quota_class_set": _quota_class_set.QuotaClassSet, "quota_set": _quota_set.QuotaSet, "server": _server.Server, "server_action": _server_action.ServerAction, "server_diagnostics": _server_diagnostics.ServerDiagnostics, "server_group": _server_group.ServerGroup, "server_interface": _server_interface.ServerInterface, "server_ip": server_ip.ServerIP, "server_migration": _server_migration.ServerMigration, "server_remote_console": _src.ServerRemoteConsole, "service": _service.Service, "usage": _usage.Usage, "volume_attachment": _volume_attachment.VolumeAttachment, } # ========== Extensions ========== def find_extension(self, name_or_id, ignore_missing=True): """Find a single extension :param name_or_id: The name or ID of an extension. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.compute.v2.extension.Extension` or None :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ return self._find( extension.Extension, name_or_id, ignore_missing=ignore_missing, ) def extensions(self): """Retrieve a generator of extensions :returns: A generator of extension instances. :rtype: :class:`~openstack.compute.v2.extension.Extension` """ return self._list(extension.Extension) # ========== Flavors ========== # TODO(stephenfin): Drop 'query' parameter or apply it consistently def find_flavor( self, name_or_id, ignore_missing=True, *, get_extra_specs=False, **query, ): """Find a single flavor :param name_or_id: The name or ID of a flavor. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param bool get_extra_specs: When set to ``True`` and extra_specs not present in the response will invoke additional API call to fetch extra_specs. :param kwargs query: Optional query parameters to be sent to limit the flavors being returned. :returns: One :class:`~openstack.compute.v2.flavor.Flavor` or None :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ flavor = self._find( _flavor.Flavor, name_or_id, ignore_missing=ignore_missing, **query, ) if flavor and get_extra_specs and not flavor.extra_specs: flavor = flavor.fetch_extra_specs(self) return flavor def create_flavor(self, **attrs): """Create a new flavor from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.compute.v2.flavor.Flavor`, comprised of the properties on the Flavor class. :returns: The results of flavor creation :rtype: :class:`~openstack.compute.v2.flavor.Flavor` """ return self._create(_flavor.Flavor, **attrs) def delete_flavor(self, flavor, ignore_missing=True): """Delete a flavor :param flavor: The value can be either the ID of a flavor or a :class:`~openstack.compute.v2.flavor.Flavor` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the flavor does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent flavor. :returns: ``None`` """ self._delete(_flavor.Flavor, flavor, ignore_missing=ignore_missing) def update_flavor(self, flavor, **attrs): """Update a flavor :param flavor: Either the ID of a flavor or a :class:`~openstack.compute.v2.flavor.Flavor` instance. :param attrs: The attributes to update on the flavor represented by ``flavor``. :returns: The updated flavor :rtype: :class:`~openstack.compute.v2.flavor.Flavor` """ return self._update(_flavor.Flavor, flavor, **attrs) def get_flavor(self, flavor, get_extra_specs=False): """Get a single flavor :param flavor: The value can be the ID of a flavor or a :class:`~openstack.compute.v2.flavor.Flavor` instance. :param bool get_extra_specs: When set to ``True`` and extra_specs not present in the response will invoke additional API call to fetch extra_specs. :returns: One :class:`~openstack.compute.v2.flavor.Flavor` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ flavor = self._get(_flavor.Flavor, flavor) if get_extra_specs and not flavor.extra_specs: flavor = flavor.fetch_extra_specs(self) return flavor def flavors(self, details=True, get_extra_specs=False, **query): """Return a generator of flavors :param bool details: When ``True``, returns :class:`~openstack.compute.v2.flavor.Flavor` objects, with additional attributes filled. :param bool get_extra_specs: When set to ``True`` and extra_specs not present in the response will invoke additional API call to fetch extra_specs. :param kwargs query: Optional query parameters to be sent to limit the flavors being returned. :returns: A generator of flavor objects """ base_path = '/flavors/detail' if details else '/flavors' for flv in self._list(_flavor.Flavor, base_path=base_path, **query): if get_extra_specs and not flv.extra_specs: flv = flv.fetch_extra_specs(self) yield flv def flavor_add_tenant_access(self, flavor, tenant): """Adds tenant/project access to flavor. :param flavor: Either the ID of a flavor or a :class:`~openstack.compute.v2.flavor.Flavor` instance. :param str tenant: The UUID of the tenant. :returns: One :class:`~openstack.compute.v2.flavor.Flavor` """ flavor = self._get_resource(_flavor.Flavor, flavor) return flavor.add_tenant_access(self, tenant) def flavor_remove_tenant_access(self, flavor, tenant): """Removes tenant/project access to flavor. :param flavor: Either the ID of a flavor or a :class:`~openstack.compute.v2.flavor.Flavor` instance. :param str tenant: The UUID of the tenant. :returns: One :class:`~openstack.compute.v2.flavor.Flavor` """ flavor = self._get_resource(_flavor.Flavor, flavor) return flavor.remove_tenant_access(self, tenant) def get_flavor_access(self, flavor): """Lists tenants who have access to private flavor :param flavor: Either the ID of a flavor or a :class:`~openstack.compute.v2.flavor.Flavor` instance. :returns: List of dicts with flavor_id and tenant_id attributes. """ flavor = self._get_resource(_flavor.Flavor, flavor) return flavor.get_access(self) def fetch_flavor_extra_specs(self, flavor): """Lists Extra Specs of a flavor :param flavor: Either the ID of a flavor or a :class:`~openstack.compute.v2.flavor.Flavor` instance. :returns: One :class:`~openstack.compute.v2.flavor.Flavor` """ flavor = self._get_resource(_flavor.Flavor, flavor) return flavor.fetch_extra_specs(self) def create_flavor_extra_specs(self, flavor, extra_specs): """Lists Extra Specs of a flavor :param flavor: Either the ID of a flavor or a :class:`~openstack.compute.v2.flavor.Flavor` instance. :param dict extra_specs: dict of extra specs :returns: One :class:`~openstack.compute.v2.flavor.Flavor` """ flavor = self._get_resource(_flavor.Flavor, flavor) return flavor.create_extra_specs(self, specs=extra_specs) def get_flavor_extra_specs_property(self, flavor, prop): """Get specific Extra Spec property of a flavor :param flavor: Either the ID of a flavor or a :class:`~openstack.compute.v2.flavor.Flavor` instance. :param str prop: Property name. :returns: String value of the requested property. """ flavor = self._get_resource(_flavor.Flavor, flavor) return flavor.get_extra_specs_property(self, prop) def update_flavor_extra_specs_property(self, flavor, prop, val): """Update specific Extra Spec property of a flavor :param flavor: Either the ID of a flavor or a :class:`~openstack.compute.v2.flavor.Flavor` instance. :param str prop: Property name. :param str val: Property value. :returns: String value of the requested property. """ flavor = self._get_resource(_flavor.Flavor, flavor) return flavor.update_extra_specs_property(self, prop, val) def delete_flavor_extra_specs_property(self, flavor, prop): """Delete specific Extra Spec property of a flavor :param flavor: Either the ID of a flavor or a :class:`~openstack.compute.v2.flavor.Flavor` instance. :param str prop: Property name. :returns: None """ flavor = self._get_resource(_flavor.Flavor, flavor) return flavor.delete_extra_specs_property(self, prop) # ========== Aggregates ========== def aggregates(self, **query): """Return a generator of aggregate :param kwargs query: Optional query parameters to be sent to limit the aggregates being returned. :returns: A generator of aggregate :rtype: class: `~openstack.compute.v2.aggregate.Aggregate` """ return self._list(_aggregate.Aggregate, **query) def get_aggregate(self, aggregate): """Get a single host aggregate :param aggregate: The value can be the ID of an aggregate or a :class:`~openstack.compute.v2.aggregate.Aggregate` instance. :returns: One :class:`~openstack.compute.v2.aggregate.Aggregate` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_aggregate.Aggregate, aggregate) def find_aggregate(self, name_or_id, ignore_missing=True): """Find a single aggregate :param name_or_id: The name or ID of an aggregate. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.compute.v2.aggregate.Aggregate` or None :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ return self._find( _aggregate.Aggregate, name_or_id, ignore_missing=ignore_missing, ) def create_aggregate(self, **attrs): """Create a new host aggregate from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.compute.v2.aggregate.Aggregate`, comprised of the properties on the Aggregate class. :returns: The results of aggregate creation :rtype: :class:`~openstack.compute.v2.aggregate.Aggregate` """ return self._create(_aggregate.Aggregate, **attrs) def update_aggregate(self, aggregate, **attrs): """Update a host aggregate :param server: Either the ID of a host aggregate or a :class:`~openstack.compute.v2.aggregate.Aggregate` instance. :param attrs: The attributes to update on the aggregate represented by ``aggregate``. :returns: The updated aggregate :rtype: :class:`~openstack.compute.v2.aggregate.Aggregate` """ return self._update(_aggregate.Aggregate, aggregate, **attrs) def delete_aggregate(self, aggregate, ignore_missing=True): """Delete a host aggregate :param keypair: The value can be either the ID of an aggregate or a :class:`~openstack.compute.v2.aggregate.Aggregate` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the aggregate does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent aggregate. :returns: ``None`` """ self._delete( _aggregate.Aggregate, aggregate, ignore_missing=ignore_missing, ) def add_host_to_aggregate(self, aggregate, host): """Adds a host to an aggregate :param aggregate: Either the ID of a aggregate or a :class:`~openstack.compute.v2.aggregate.Aggregate` instance. :param str host: The host to add to the aggregate :returns: One :class:`~openstack.compute.v2.aggregate.Aggregate` """ aggregate = self._get_resource(_aggregate.Aggregate, aggregate) return aggregate.add_host(self, host) def remove_host_from_aggregate(self, aggregate, host): """Removes a host from an aggregate :param aggregate: Either the ID of a aggregate or a :class:`~openstack.compute.v2.aggregate.Aggregate` instance. :param str host: The host to remove from the aggregate :returns: One :class:`~openstack.compute.v2.aggregate.Aggregate` """ aggregate = self._get_resource(_aggregate.Aggregate, aggregate) return aggregate.remove_host(self, host) def set_aggregate_metadata(self, aggregate, metadata): """Creates or replaces metadata for an aggregate :param aggregate: Either the ID of a aggregate or a :class:`~openstack.compute.v2.aggregate.Aggregate` instance. :param dict metadata: Metadata key and value pairs. The maximum size for each metadata key and value pair is 255 bytes. :returns: One :class:`~openstack.compute.v2.aggregate.Aggregate` """ aggregate = self._get_resource(_aggregate.Aggregate, aggregate) return aggregate.set_metadata(self, metadata) def aggregate_precache_images(self, aggregate, images): """Requests image precaching on an aggregate :param aggregate: Either the ID of a aggregate or a :class:`~openstack.compute.v2.aggregate.Aggregate` instance. :param images: Single image id or list of image ids. :returns: ``None`` """ aggregate = self._get_resource(_aggregate.Aggregate, aggregate) # We need to ensure we pass list of image IDs if isinstance(images, str): images = [images] image_data = [] for img in images: image_data.append({'id': img}) return aggregate.precache_images(self, image_data) # ========== Images ========== def delete_image(self, image, ignore_missing=True): """Delete an image :param image: The value can be either the ID of an image or a :class:`~openstack.compute.v2.image.Image` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the image does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent image. :returns: ``None`` """ warnings.warn( 'This API is a proxy to the image service and has been ' 'deprecated; use the image service proxy API instead', os_warnings.OpenStackDeprecationWarning, ) self._delete(_image.Image, image, ignore_missing=ignore_missing) # NOTE(stephenfin): We haven't added 'details' support here since this # method is deprecated def find_image(self, name_or_id, ignore_missing=True): """Find a single image :param name_or_id: The name or ID of a image. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.compute.v2.image.Image` or None :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ warnings.warn( 'This API is a proxy to the image service and has been ' 'deprecated; use the image service proxy API instead', os_warnings.OpenStackDeprecationWarning, ) return self._find( _image.Image, name_or_id, ignore_missing=ignore_missing, ) def get_image(self, image): """Get a single image :param image: The value can be the ID of an image or a :class:`~openstack.compute.v2.image.Image` instance. :returns: One :class:`~openstack.compute.v2.image.Image` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ warnings.warn( 'This API is a proxy to the image service and has been ' 'deprecated; use the image service proxy API instead', os_warnings.OpenStackDeprecationWarning, ) return self._get(_image.Image, image) def images(self, details=True, **query): """Return a generator of images :param bool details: When ``True``, returns :class:`~openstack.compute.v2.image.Image` objects with all available properties, otherwise only basic properties are returned. *Default: ``True``* :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of image objects """ warnings.warn( 'This API is a proxy to the image service and has been ' 'deprecated; use the image service proxy API instead', os_warnings.OpenStackDeprecationWarning, ) base_path = '/images/detail' if details else None return self._list(_image.Image, base_path=base_path, **query) def _get_base_resource(self, res, base): # Metadata calls for Image and Server can work for both those # resources but also ImageDetail and ServerDetail. If we get # either class, use it, otherwise create an instance of the base. if isinstance(res, base): return res else: return base(id=res) def get_image_metadata(self, image): """Return a dictionary of metadata for an image :param image: Either the ID of an image or a :class:`~openstack.compute.v2.image.Image` instance. :returns: A :class:`~openstack.compute.v2.image.Image` with only the image's metadata. All keys and values are Unicode text. :rtype: :class:`~openstack.compute.v2.image.Image` """ res = self._get_base_resource(image, _image.Image) return res.fetch_metadata(self) def set_image_metadata(self, image, **metadata): """Update metadata for an image :param image: Either the ID of an image or a :class:`~openstack.compute.v2.image.Image` instance. :param kwargs metadata: Key/value pairs to be updated in the image's metadata. No other metadata is modified by this call. All keys and values are stored as Unicode. :returns: A :class:`~openstack.compute.v2.image.Image` with only the image's metadata. All keys and values are Unicode text. :rtype: :class:`~openstack.compute.v2.image.Image` """ res = self._get_base_resource(image, _image.Image) return res.set_metadata(self, metadata=metadata) def delete_image_metadata(self, image, keys=None): """Delete metadata for an image Note: This method will do a HTTP DELETE request for every key in keys. :param image: Either the ID of an image or a :class:`~openstack.compute.v2.image.Image` instance. :param list keys: The keys to delete. If left empty complete metadata will be removed. :rtype: ``None`` """ res = self._get_base_resource(image, _image.Image) if keys is not None: # Create a set as a snapshot of keys to avoid "changed during # iteration" for key in set(keys): res.delete_metadata_item(self, key) else: res.delete_metadata(self) # ========== Keypairs ========== def create_keypair(self, **attrs): """Create a new keypair from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.compute.v2.keypair.Keypair`, comprised of the properties on the Keypair class. :returns: The results of keypair creation :rtype: :class:`~openstack.compute.v2.keypair.Keypair` """ return self._create(_keypair.Keypair, **attrs) def delete_keypair(self, keypair, ignore_missing=True, user_id=None): """Delete a keypair :param keypair: The value can be either the ID of a keypair or a :class:`~openstack.compute.v2.keypair.Keypair` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the keypair does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent keypair. :param str user_id: Optional user_id owning the keypair :returns: ``None`` """ attrs = {'user_id': user_id} if user_id else {} self._delete( _keypair.Keypair, keypair, ignore_missing=ignore_missing, **attrs, ) def get_keypair(self, keypair, user_id=None): """Get a single keypair :param keypair: The value can be the ID of a keypair or a :class:`~openstack.compute.v2.keypair.Keypair` instance. :param str user_id: Optional user_id owning the keypair :returns: One :class:`~openstack.compute.v2.keypair.Keypair` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ attrs = {'user_id': user_id} if user_id else {} return self._get(_keypair.Keypair, keypair, **attrs) def find_keypair(self, name_or_id, ignore_missing=True, *, user_id=None): """Find a single keypair :param name_or_id: The name or ID of a keypair. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param str user_id: Optional user_id owning the keypair :returns: One :class:`~openstack.compute.v2.keypair.Keypair` or None :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ attrs = {'user_id': user_id} if user_id else {} return self._find( _keypair.Keypair, name_or_id, ignore_missing=ignore_missing, **attrs, ) def keypairs(self, **query): """Return a generator of keypairs :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of keypair objects :rtype: :class:`~openstack.compute.v2.keypair.Keypair` """ return self._list(_keypair.Keypair, **query) # ========== Limits ========== def get_limits(self, **query): """Retrieve limits that are applied to the project's account :returns: A Limits object, including both :class:`~openstack.compute.v2.limits.AbsoluteLimits` and :class:`~openstack.compute.v2.limits.RateLimits` :rtype: :class:`~openstack.compute.v2.limits.Limits` """ res = self._get_resource(limits.Limits, None) return res.fetch(self, **query) # ========== Servers ========== def create_server(self, **attrs): """Create a new server from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.compute.v2.server.Server`, comprised of the properties on the Server class. :returns: The results of server creation :rtype: :class:`~openstack.compute.v2.server.Server` """ return self._create(_server.Server, **attrs) def delete_server(self, server, ignore_missing=True, force=False): """Delete a server :param server: The value can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the server does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent server :param bool force: When set to ``True``, the server deletion will be forced immediately. :returns: ``None`` """ if force: server = self._get_resource(_server.Server, server) server.force_delete(self) else: self._delete(_server.Server, server, ignore_missing=ignore_missing) def find_server( self, name_or_id, ignore_missing=True, *, details=True, all_projects=False, ): """Find a single server :param name_or_id: The name or ID of a server. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param bool details: When set to ``False`` instances with only basic data will be returned. The default, ``True``, will cause instances with full data to be returned. :param bool all_projects: When set to ``True``, search for server by name across all projects. Note that this will likely result in a higher chance of duplicates. Admin-only by default. :returns: One :class:`~openstack.compute.v2.server.Server` or None :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ query = {} if all_projects: query['all_projects'] = True list_base_path = '/servers/detail' if details else None return self._find( _server.Server, name_or_id, ignore_missing=ignore_missing, list_base_path=list_base_path, **query, ) def get_server(self, server): """Get a single server :param server: The value can be the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: One :class:`~openstack.compute.v2.server.Server` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_server.Server, server) def servers(self, details=True, all_projects=False, **query): """Retrieve a generator of servers :param bool details: When set to ``False`` instances with only basic data will be returned. The default, ``True``, will cause instances with full data to be returned. :param bool all_projects: When set to ``True``, lists servers from all projects. Admin-only by default. :param kwargs query: Optional query parameters to be sent to limit the servers being returned. Available parameters can be seen under https://docs.openstack.org/api-ref/compute/#list-servers :returns: A generator of server instances. """ if all_projects: query['all_projects'] = True base_path = '/servers/detail' if details else None return self._list(_server.Server, base_path=base_path, **query) def update_server(self, server, **attrs): """Update a server :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param attrs: The attributes to update on the server represented by ``server``. :returns: The updated server :rtype: :class:`~openstack.compute.v2.server.Server` """ return self._update(_server.Server, server, **attrs) def change_server_password(self, server, new_password): """Change the administrator password :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param str new_password: The new password to be set. :returns: None """ server = self._get_resource(_server.Server, server) server.change_password(self, new_password) def get_server_password(self, server): """Get the administrator password :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: encrypted password. """ server = self._get_resource(_server.Server, server) return server.get_password(self) def clear_server_password(self, server): """Clear the administrator password :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) server.clear_password(self) def reset_server_state(self, server, state): """Reset the state of server :param server: The server can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server`. :param state: The state of the server to be set, `active` or `error` are valid. :returns: None """ res = self._get_base_resource(server, _server.Server) res.reset_state(self, state) def reboot_server(self, server, reboot_type): """Reboot a server :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param str reboot_type: The type of reboot to perform. "HARD" and "SOFT" are the current options. :returns: None """ server = self._get_resource(_server.Server, server) server.reboot(self, reboot_type) def rebuild_server(self, server, image, **attrs): """Rebuild a server :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param str name: The name of the server :param str admin_password: The administrator password :param bool preserve_ephemeral: Indicates whether the server is rebuilt with the preservation of the ephemeral partition. *Default: False* :param str image: The id of an image to rebuild with. *Default: None* :param str access_ipv4: The IPv4 address to rebuild with. *Default: None* :param str access_ipv6: The IPv6 address to rebuild with. *Default: None* :param dict metadata: A dictionary of metadata to rebuild with. *Default: None* :param personality: A list of dictionaries, each including a **path** and **contents** key, to be injected into the rebuilt server at launch. *Default: None* :returns: The rebuilt :class:`~openstack.compute.v2.server.Server` instance. """ server = self._get_resource(_server.Server, server) return server.rebuild(self, image=image, **attrs) def resize_server(self, server, flavor): """Resize a server :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param flavor: Either the ID of a flavor or a :class:`~openstack.compute.v2.flavor.Flavor` instance. :returns: None """ server = self._get_resource(_server.Server, server) flavor_id = resource.Resource._get_id(flavor) server.resize(self, flavor_id) def confirm_server_resize(self, server): """Confirm a server resize :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) server.confirm_resize(self) def revert_server_resize(self, server): """Revert a server resize :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) server.revert_resize(self) def create_server_image( self, server, name, metadata=None, wait=False, timeout=120, ): """Create an image from a server :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param str name: The name of the image to be created. :param dict metadata: A dictionary of metadata to be set on the image. :returns: :class:`~openstack.image.v2.image.Image` object. """ server = self._get_resource(_server.Server, server) image_id = server.create_image(self, name, metadata) image = self._connection.get_image(image_id) if not wait: return image return self._connection.wait_for_image(image, timeout=timeout) def backup_server(self, server, name, backup_type, rotation): """Backup a server :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param name: The name of the backup image. :param backup_type: The type of the backup, for example, daily. :param rotation: The rotation of the back up image, the oldest image will be removed when image count exceed the rotation count. :returns: None """ server = self._get_resource(_server.Server, server) server.backup(self, name, backup_type, rotation) def pause_server(self, server): """Pauses a server and changes its status to ``PAUSED``. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) server.pause(self) def unpause_server(self, server): """Unpauses a paused server and changes its status to ``ACTIVE``. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) server.unpause(self) def suspend_server(self, server): """Suspends a server and changes its status to ``SUSPENDED``. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) server.suspend(self) def resume_server(self, server): """Resumes a suspended server and changes its status to ``ACTIVE``. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) server.resume(self) def lock_server(self, server, locked_reason=None): """Locks a server. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param locked_reason: The reason behind locking the server. Limited to 255 characters in length. :returns: None """ server = self._get_resource(_server.Server, server) server.lock(self, locked_reason=locked_reason) def unlock_server(self, server): """Unlocks a locked server. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) server.unlock(self) def rescue_server(self, server, admin_pass=None, image_ref=None): """Puts a server in rescue mode and changes it status to ``RESCUE``. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param admin_pass: The password for the rescued server. If you omit this parameter, the operation generates a new password. :param image_ref: The image reference to use to rescue your server. This can be the image ID or its full URL. If you omit this parameter, the base image reference will be used. :returns: None """ server = self._get_resource(_server.Server, server) server.rescue(self, admin_pass=admin_pass, image_ref=image_ref) def unrescue_server(self, server): """Unrescues a server and changes its status to ``ACTIVE``. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) server.unrescue(self) def evacuate_server( self, server, host=None, admin_pass=None, force=None, *, on_shared_storage=None, ): """Evacuates a server from a failed host to a new host. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param host: An optional parameter specifying the name or ID of the host to which the server is evacuated. :param admin_pass: An optional parameter specifying the administrative password to access the evacuated or rebuilt server. :param force: Force an evacuation by not verifying the provided destination host by the scheduler. (New in API version 2.29). :param on_shared_storage: Whether the host is using shared storage. (Optional) (Only supported before API version 2.14) :returns: None """ server = self._get_resource(_server.Server, server) server.evacuate( self, host=host, admin_pass=admin_pass, force=force, on_shared_storage=on_shared_storage, ) def start_server(self, server): """Starts a stopped server and changes its state to ``ACTIVE``. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) server.start(self) def stop_server(self, server): """Stops a running server and changes its state to ``SHUTOFF``. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) server.stop(self) def restore_server(self, server): """Restore a soft-deleted server. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) server.restore(self) def shelve_server(self, server): """Shelves a server. All associated data and resources are kept but anything still in memory is not retained. Policy defaults enable only users with administrative role or the owner of the server to perform this operation. Cloud provides could change this permission though. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) server.shelve(self) def shelve_offload_server(self, server): """Shelve-offloads, or removes, a server Data and resource associations are deleted. Policy defaults enable only users with administrative role or the owner of the server to perform this operation. Cloud provides could change this permission though. Note that in some clouds, shelved servers are automatically offloaded, sometimes after a certain time period. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) server.shelve_offload(self) def unshelve_server(self, server, *, host=None): """Unshelves or restores a shelved server. Policy defaults enable only users with administrative role or the owner of the server to perform this operation. Cloud provides could change this permission though. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param host: An optional parameter specifying the name the compute host to unshelve to. (New in API version 2.91). :returns: None """ server = self._get_resource(_server.Server, server) server.unshelve(self, host=host) def trigger_server_crash_dump(self, server): """Trigger a crash dump in a server. When a server starts behaving oddly at a fundamental level, it maybe be useful to get a kernel level crash dump to debug further. The crash dump action forces a crash dump followed by a system reboot of the server. Once the server comes back online, you can find a Kernel Crash Dump file in a certain location of the filesystem. For example, for Ubuntu you can find it in the /var/crash directory. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) server.trigger_crash_dump(self) def add_tag_to_server(self, server, tag): """Add a tag to a server. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param tag: The tag to add. :returns: None """ server = self._get_resource(_server.Server, server) server.add_tag(self, tag) def remove_tag_from_server(self, server, tag): """Remove a tag from a server. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param tag: The tag to remove. :returns: None """ server = self._get_resource(_server.Server, server) server.remove_tag(self, tag) def remove_tags_from_server(self, server): """Remove all tags from a server. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param tag: The tag to remove. :returns: None """ server = self._get_resource(_server.Server, server) server.remove_all_tags(self) # ========== Server security groups ========== def fetch_server_security_groups(self, server): """Fetch security groups with details for a server. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: updated :class:`~openstack.compute.v2.server.Server` instance """ server = self._get_resource(_server.Server, server) return server.fetch_security_groups(self) def add_security_group_to_server(self, server, security_group): """Add a security group to a server :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param security_group: Either the ID, Name of a security group or a :class:`~openstack.network.v2.security_group.SecurityGroup` instance. :returns: None """ server = self._get_resource(_server.Server, server) security_group = self._get_resource(_sg.SecurityGroup, security_group) server.add_security_group(self, security_group.name) def remove_security_group_from_server(self, server, security_group): """Remove a security group from a server :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param security_group: Either the ID of a security group or a :class:`~openstack.network.v2.security_group.SecurityGroup` instance. :returns: None """ server = self._get_resource(_server.Server, server) security_group = self._get_resource(_sg.SecurityGroup, security_group) server.remove_security_group(self, security_group.name) # ========== Server IPs ========== def add_fixed_ip_to_server(self, server, network_id): """Adds a fixed IP address to a server instance. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param network_id: The ID of the network from which a fixed IP address is about to be allocated. :returns: None """ server = self._get_resource(_server.Server, server) server.add_fixed_ip(self, network_id) def remove_fixed_ip_from_server(self, server, address): """Removes a fixed IP address from a server instance. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param address: The fixed IP address to be disassociated from the server. :returns: None """ server = self._get_resource(_server.Server, server) server.remove_fixed_ip(self, address) def add_floating_ip_to_server(self, server, address, fixed_address=None): """Adds a floating IP address to a server instance. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param address: The floating IP address to be added to the server. :param fixed_address: The fixed IP address to be associated with the floating IP address. Used when the server is connected to multiple networks. :returns: None """ server = self._get_resource(_server.Server, server) server.add_floating_ip(self, address, fixed_address=fixed_address) def remove_floating_ip_from_server(self, server, address): """Removes a floating IP address from a server instance. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param address: The floating IP address to be disassociated from the server. :returns: None """ server = self._get_resource(_server.Server, server) server.remove_floating_ip(self, address) # ========== Server Interfaces ========== def create_server_interface(self, server, **attrs): """Create a new server interface from attributes :param server: The server can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance that the interface belongs to. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.compute.v2.server_interface.ServerInterface`, comprised of the properties on the ServerInterface class. :returns: The results of server interface creation :rtype: :class:`~openstack.compute.v2.server_interface.ServerInterface` """ server_id = resource.Resource._get_id(server) return self._create( _server_interface.ServerInterface, server_id=server_id, **attrs, ) # TODO(stephenfin): Does this work? There's no 'value' parameter for the # call to '_delete' def delete_server_interface( self, server_interface, server=None, ignore_missing=True, ): """Delete a server interface :param server_interface: The value can be either the ID of a server interface or a :class:`~openstack.compute.v2.server_interface.ServerInterface` instance. :param server: This parameter need to be specified when ServerInterface ID is given as value. It can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance that the interface belongs to. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the server interface does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent server interface. :returns: ``None`` """ server_id = self._get_uri_attribute( server_interface, server, "server_id", ) server_interface = resource.Resource._get_id(server_interface) self._delete( _server_interface.ServerInterface, server_interface, server_id=server_id, ignore_missing=ignore_missing, ) def get_server_interface(self, server_interface, server=None): """Get a single server interface :param server_interface: The value can be the ID of a server interface or a :class:`~openstack.compute.v2.server_interface.ServerInterface` instance. :param server: This parameter need to be specified when ServerInterface ID is given as value. It can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance that the interface belongs to. :returns: One :class:`~openstack.compute.v2.server_interface.ServerInterface` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ server_id = self._get_uri_attribute( server_interface, server, "server_id", ) server_interface = resource.Resource._get_id(server_interface) return self._get( _server_interface.ServerInterface, server_id=server_id, port_id=server_interface, ) def server_interfaces(self, server, **query): """Return a generator of server interfaces :param server: The server can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server`. :param query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of ServerInterface objects :rtype: :class:`~openstack.compute.v2.server_interface.ServerInterface` """ server_id = resource.Resource._get_id(server) return self._list( _server_interface.ServerInterface, server_id=server_id, **query, ) def server_ips(self, server, network_label=None): """Return a generator of server IPs :param server: The server can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server`. :param network_label: The name of a particular network to list IP addresses from. :returns: A generator of ServerIP objects :rtype: :class:`~openstack.compute.v2.server_ip.ServerIP` """ server_id = resource.Resource._get_id(server) return self._list( server_ip.ServerIP, server_id=server_id, network_label=network_label, ) def availability_zones(self, details=False): """Return a generator of availability zones :param bool details: Return extra details about the availability zones. This defaults to `False` as it generally requires extra permission. :returns: A generator of availability zone :rtype: :class:`~openstack.compute.v2.availability_zone.AvailabilityZone` """ base_path = '/os-availability-zone/detail' if details else None return self._list( availability_zone.AvailabilityZone, base_path=base_path, ) # ========== Server Metadata ========== def get_server_metadata(self, server): """Return a dictionary of metadata for a server :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` or :class:`~openstack.compute.v2.server.ServerDetail` instance. :returns: A :class:`~openstack.compute.v2.server.Server` with the server's metadata. All keys and values are Unicode text. :rtype: :class:`~openstack.compute.v2.server.Server` """ res = self._get_base_resource(server, _server.Server) return res.fetch_metadata(self) def set_server_metadata(self, server, **metadata): """Update metadata for a server :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param kwargs metadata: Key/value pairs to be updated in the server's metadata. No other metadata is modified by this call. All keys and values are stored as Unicode. :returns: A :class:`~openstack.compute.v2.server.Server` with only the server's metadata. All keys and values are Unicode text. :rtype: :class:`~openstack.compute.v2.server.Server` """ res = self._get_base_resource(server, _server.Server) return res.set_metadata(self, metadata=metadata) def delete_server_metadata(self, server, keys=None): """Delete metadata for a server Note: This method will do a HTTP DELETE request for every key in keys. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param list keys: The keys to delete. If left empty complete metadata will be removed. :rtype: ``None`` """ res = self._get_base_resource(server, _server.Server) if keys is not None: # Create a set as a snapshot of keys to avoid "changed during # iteration" for key in set(keys): res.delete_metadata_item(self, key) else: res.delete_metadata(self) # ========== Server Groups ========== def create_server_group(self, **attrs): """Create a new server group from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.compute.v2.server_group.ServerGroup`, comprised of the properties on the ServerGroup class. :returns: The results of server group creation :rtype: :class:`~openstack.compute.v2.server_group.ServerGroup` """ return self._create(_server_group.ServerGroup, **attrs) def delete_server_group(self, server_group, ignore_missing=True): """Delete a server group :param server_group: The value can be either the ID of a server group or a :class:`~openstack.compute.v2.server_group.ServerGroup` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the server group does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent server group. :returns: ``None`` """ self._delete( _server_group.ServerGroup, server_group, ignore_missing=ignore_missing, ) def find_server_group( self, name_or_id, ignore_missing=True, *, all_projects=False, ): """Find a single server group :param name_or_id: The name or ID of a server group. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param bool all_projects: When set to ``True``, search for server groups by name across all projects. Note that this will likely result in a higher chance of duplicates. Admin-only by default. :returns: One :class:`~openstack.compute.v2.server_group.ServerGroup` or None :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ query = {} if all_projects: query['all_projects'] = True return self._find( _server_group.ServerGroup, name_or_id, ignore_missing=ignore_missing, **query, ) def get_server_group(self, server_group): """Get a single server group :param server_group: The value can be the ID of a server group or a :class:`~openstack.compute.v2.server_group.ServerGroup` instance. :returns: A :class:`~openstack.compute.v2.server_group.ServerGroup` object. :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_server_group.ServerGroup, server_group) def server_groups(self, *, all_projects=False, **query): """Return a generator of server groups :param bool all_projects: When set to ``True``, lists servers groups from all projects. Admin-only by default. :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of ServerGroup objects :rtype: :class:`~openstack.compute.v2.server_group.ServerGroup` """ if all_projects: query['all_projects'] = True return self._list(_server_group.ServerGroup, **query) # ========== Hypervisors ========== def hypervisors(self, details=False, **query): """Return a generator of hypervisors :param bool details: When set to the default, ``False``, :class:`~openstack.compute.v2.hypervisor.Hypervisor` instances will be returned with only basic information populated. :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of hypervisor :rtype: class: `~openstack.compute.v2.hypervisor.Hypervisor` """ base_path = '/os-hypervisors/detail' if details else None if ( 'hypervisor_hostname_pattern' in query and not utils.supports_microversion(self, '2.53') ): # Until 2.53 we need to use other API base_path = '/os-hypervisors/{pattern}/search'.format( pattern=query.pop('hypervisor_hostname_pattern') ) return self._list(_hypervisor.Hypervisor, base_path=base_path, **query) def find_hypervisor( self, name_or_id, ignore_missing=True, *, details=True, ): """Find a single hypervisor :param name_or_id: The name or ID of a hypervisor :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param bool details: When set to ``False`` instances with only basic data will be returned. The default, ``True``, will cause instances with full data to be returned. :returns: One: class:`~openstack.compute.v2.hypervisor.Hypervisor` or None :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ list_base_path = '/os-hypervisors/detail' if details else None return self._find( _hypervisor.Hypervisor, name_or_id, list_base_path=list_base_path, ignore_missing=ignore_missing, ) def get_hypervisor(self, hypervisor): """Get a single hypervisor :param hypervisor: The value can be the ID of a hypervisor or a :class:`~openstack.compute.v2.hypervisor.Hypervisor` instance. :returns: A :class:`~openstack.compute.v2.hypervisor.Hypervisor` object. :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_hypervisor.Hypervisor, hypervisor) def get_hypervisor_uptime(self, hypervisor): """Get uptime information for hypervisor :param hypervisor: The value can be the ID of a hypervisor or a :class:`~openstack.compute.v2.hypervisor.Hypervisor` instance. :returns: A :class:`~openstack.compute.v2.hypervisor.Hypervisor` object. :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ hypervisor = self._get_resource(_hypervisor.Hypervisor, hypervisor) return hypervisor.get_uptime(self) # ========== Services ========== def update_service_forced_down( self, service, host=None, binary=None, forced=True, ): """Update service forced_down information :param service: Either the ID of a service or a :class:`~openstack.compute.v2.service.Service` instance. :param str host: The host where service runs. :param str binary: The name of service. :param bool forced: Whether or not this service was forced down manually by an administrator after the service was fenced. :returns: Updated service instance :rtype: class: `~openstack.compute.v2.service.Service` """ if utils.supports_microversion(self, '2.53'): return self.update_service(service, forced_down=forced) service = self._get_resource(_service.Service, service) if (not host or not binary) and ( not service.host or not service.binary ): raise ValueError( 'Either service instance should have host and binary ' 'or they should be passed' ) service.set_forced_down(self, host, binary, forced) force_service_down = update_service_forced_down def disable_service( self, service, host=None, binary=None, disabled_reason=None, ): """Disable a service :param service: Either the ID of a service or a :class:`~openstack.compute.v2.service.Service` instance. :param str host: The host where service runs. :param str binary: The name of service. :param str disabled_reason: The reason of force down a service. :returns: Updated service instance :rtype: class: `~openstack.compute.v2.service.Service` """ if utils.supports_microversion(self, '2.53'): attrs = {'status': 'disabled'} if disabled_reason: attrs['disabled_reason'] = disabled_reason return self.update_service(service, **attrs) service = self._get_resource(_service.Service, service) return service.disable(self, host, binary, disabled_reason) def enable_service(self, service, host=None, binary=None): """Enable a service :param service: Either the ID of a service or a :class:`~openstack.compute.v2.service.Service` instance. :param str host: The host where service runs. :param str binary: The name of service. :returns: Updated service instance :rtype: class: `~openstack.compute.v2.service.Service` """ if utils.supports_microversion(self, '2.53'): return self.update_service(service, status='enabled') service = self._get_resource(_service.Service, service) return service.enable(self, host, binary) def services(self, **query): """Return a generator of service :params dict query: Query parameters :returns: A generator of service :rtype: class: `~openstack.compute.v2.service.Service` """ return self._list(_service.Service, **query) def find_service(self, name_or_id, ignore_missing=True, **query): """Find a service from name or id to get the corresponding info :param name_or_id: The name or id of a service :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Additional attributes like 'host' :returns: One: class:`~openstack.compute.v2.service.Service` or None :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ return self._find( _service.Service, name_or_id, ignore_missing=ignore_missing, **query, ) def delete_service(self, service, ignore_missing=True): """Delete a service :param service: The value can be either the ID of a service or a :class:`~openstack.compute.v2.service.Service` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the service does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent service. :returns: ``None`` """ self._delete(_service.Service, service, ignore_missing=ignore_missing) def update_service(self, service, **attrs): """Update a service :param service: Either the ID of a service or a :class:`~openstack.compute.v2.service.Service` instance. :param attrs: The attributes to update on the service represented by ``service``. :returns: The updated service :rtype: :class:`~openstack.compute.v2.service.Service` """ if utils.supports_microversion(self, '2.53'): return self._update(_service.Service, service, **attrs) raise exceptions.SDKException( 'Method require at least microversion 2.53' ) # ========== Volume Attachments ========== # TODO(stephenfin): Make the volume argument required in 2.0 def create_volume_attachment(self, server, volume=None, **attrs): """Create a new volume attachment from attributes :param server: The value can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance that the volume is attached to. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.compute.v2.volume_attachment.VolumeAttachment`, comprised of the properties on the VolumeAttachment class. :returns: The results of volume attachment creation :rtype: :class:`~openstack.compute.v2.volume_attachment.VolumeAttachment` """ # if the user didn't pass the new 'volume' argument, they're probably # calling things using a legacy parameter if volume is None: # there are two ways to pass this legacy parameter: either using # the openstacksdk alias, 'volume_id', or using the real nova field # name, 'volumeId' if 'volume_id' in attrs: volume_id = attrs.pop('volume_id') elif 'volumeId' in attrs: volume_id = attrs.pop('volumeId') else: # the user has used neither the new way nor the old way so they # should start using the new way # NOTE(stephenfin): we intentionally mimic the behavior of a # missing positional parameter in stdlib # https://github.com/python/cpython/blob/v3.10.0/Lib/inspect.py#L1464-L1467 raise TypeError( 'create_volume_attachment() missing 1 required positional ' 'argument: volume' ) # encourage users to the new way so we can eventually remove this # mess of logic deprecation_msg = ( 'This method was called with a volume_id or volumeId ' 'argument. This is legacy behavior that will be removed in ' 'a future version. Update callers to use a volume argument.' ) warnings.warn( deprecation_msg, os_warnings.OpenStackDeprecationWarning, ) else: volume_id = resource.Resource._get_id(volume) server_id = resource.Resource._get_id(server) return self._create( _volume_attachment.VolumeAttachment, server_id=server_id, volume_id=volume_id, **attrs, ) def update_volume_attachment( self, server, volume, volume_id=None, **attrs, ): """Update a volume attachment Note that the underlying API expects a volume ID, not a volume attachment ID. There is currently no way to update volume attachments by their own ID. :param server: The value can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance that the volume is attached to. :param volume: The value can be either the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param volume_id: The ID of a volume to swap to. If this is not specified, we will default to not swapping the volume. :param attrs: The attributes to update on the volume attachment represented by ``volume_attachment``. :returns: ``None`` """ new_volume_id = volume_id server_id = resource.Resource._get_id(server) volume_id = resource.Resource._get_id(volume) if new_volume_id is None: new_volume_id = volume_id return self._update( _volume_attachment.VolumeAttachment, id=volume_id, server_id=server_id, volume_id=new_volume_id, **attrs, ) # TODO(stephenfin): Remove this hack in openstacksdk 2.0 def _verify_server_volume_args(self, server, volume): deprecation_msg = ( 'The server and volume arguments to this function appear to ' 'be backwards and have been reversed. This is a breaking ' 'change introduced in openstacksdk 1.0. This shim will be ' 'removed in a future version' ) # if we have even partial type information and things look as they # should, we can assume the user did the right thing if isinstance(server, _server.Server) or isinstance( volume, _volume.Volume ): return server, volume # conversely, if there's type info and things appear off, tell the user if isinstance(server, _volume.Volume) or isinstance( volume, _server.Server ): warnings.warn( deprecation_msg, os_warnings.OpenStackDeprecationWarning, ) return volume, server # without type info we have to try a find the server corresponding to # the provided ID and validate it if self.find_server(server, ignore_missing=True) is not None: return server, volume else: warnings.warn( deprecation_msg, os_warnings.OpenStackDeprecationWarning, ) return volume, server def delete_volume_attachment(self, server, volume, ignore_missing=True): """Delete a volume attachment Note that the underlying API expects a volume ID, not a volume attachment ID. There is currently no way to delete volume attachments by their own ID. :param server: The value can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance that the volume is attached to. :param volume: The value can be the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the volume attachment does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent volume attachment. :returns: ``None`` """ server, volume = self._verify_server_volume_args(server, volume) server_id = resource.Resource._get_id(server) volume_id = resource.Resource._get_id(volume) self._delete( _volume_attachment.VolumeAttachment, id=volume_id, server_id=server_id, ignore_missing=ignore_missing, ) def get_volume_attachment(self, server, volume): """Get a single volume attachment Note that the underlying API expects a volume ID, not a volume attachment ID. There is currently no way to retrieve volume attachments by their own ID. :param server: The value can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance that the volume is attached to. :param volume: The value can be the ID of a volume or a :class:`~openstack.block_storage.v3.volume.Volume` instance. :returns: One :class:`~openstack.compute.v2.volume_attachment.VolumeAttachment` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ server_id = resource.Resource._get_id(server) volume_id = resource.Resource._get_id(volume) return self._get( _volume_attachment.VolumeAttachment, id=volume_id, server_id=server_id, ) def volume_attachments(self, server, **query): """Return a generator of volume attachments :param server: The server can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server`. :params dict query: Query parameters :returns: A generator of VolumeAttachment objects :rtype: :class:`~openstack.compute.v2.volume_attachment.VolumeAttachment` """ server_id = resource.Resource._get_id(server) return self._list( _volume_attachment.VolumeAttachment, server_id=server_id, **query, ) # ========== Server Migrations ========== def migrate_server(self, server): """Migrate a server from one host to another :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) server.migrate(self) def live_migrate_server( self, server, host=None, force=False, block_migration=None, ): """Live migrate a server from one host to target host :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param str host: The host to which to migrate the server. If the Nova service is too old, the host parameter implies force=True which causes the Nova scheduler to be bypassed. On such clouds, a ``ValueError`` will be thrown if ``host`` is given without ``force``. :param bool force: Force a live-migration by not verifying the provided destination host by the scheduler. This is unsafe and not recommended. :param block_migration: Perform a block live migration to the destination host by the scheduler. Can be 'auto', True or False. Some clouds are too old to support 'auto', in which case a ValueError will be thrown. If omitted, the value will be 'auto' on clouds that support it, and False on clouds that do not. :returns: None """ server = self._get_resource(_server.Server, server) server.live_migrate( self, host, force=force, block_migration=block_migration, ) def abort_server_migration( self, server_migration, server, ignore_missing=True, ): """Abort an in-progress server migration :param server_migration: The value can be either the ID of a server migration or a :class:`~openstack.compute.v2.server_migration.ServerMigration` instance. :param server: This parameter needs to be specified when ServerMigration ID is given as value. It can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance that the migration belongs to. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the server migration does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent server migration. :returns: ``None`` """ server_id = self._get_uri_attribute( server_migration, server, 'server_id', ) server_migration = resource.Resource._get_id(server_migration) self._delete( _server_migration.ServerMigration, server_migration, server_id=server_id, ignore_missing=ignore_missing, ) def force_complete_server_migration(self, server_migration, server=None): """Force complete an in-progress server migration :param server_migration: The value can be either the ID of a server migration or a :class:`~openstack.compute.v2.server_migration.ServerMigration` instance. :param server: This parameter needs to be specified when ServerMigration ID is given as value. It can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance that the migration belongs to. :returns: ``None`` """ server_id = self._get_uri_attribute( server_migration, server, 'server_id', ) server_migration = self._get_resource( _server_migration.ServerMigration, server_migration, server_id=server_id, ) server_migration.force_complete(self) def get_server_migration( self, server_migration, server, ignore_missing=True, ): """Get a single server migration :param server_migration: The value can be the ID of a server migration or a :class:`~openstack.compute.v2.server_migration.ServerMigration` instance. :param server: This parameter need to be specified when ServerMigration ID is given as value. It can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance that the migration belongs to. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the server migration does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent server migration. :returns: One :class:`~openstack.compute.v2.server_migration.ServerMigration` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ server_id = self._get_uri_attribute( server_migration, server, 'server_id', ) server_migration = resource.Resource._get_id(server_migration) return self._get( _server_migration.ServerMigration, server_migration, server_id=server_id, ignore_missing=ignore_missing, ) def server_migrations(self, server): """Return a generator of migrations for a server. :param server: The server can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server`. :returns: A generator of ServerMigration objects :rtype: :class:`~openstack.compute.v2.server_migration.ServerMigration` """ server_id = resource.Resource._get_id(server) return self._list( _server_migration.ServerMigration, server_id=server_id, ) # ========== Migrations ========== def migrations(self, **query): """Return a generator of migrations for all servers. :param kwargs query: Optional query parameters to be sent to limit the migrations being returned. :returns: A generator of Migration objects :rtype: :class:`~openstack.compute.v2.migration.Migration` """ return self._list(_migration.Migration, **query) # ========== Server diagnostics ========== def get_server_diagnostics(self, server): """Get a single server diagnostics :param server: This parameter need to be specified when ServerInterface ID is given as value. It can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance that the interface belongs to. :returns: One :class:`~openstack.compute.v2.server_diagnostics.ServerDiagnostics` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ server_id = self._get_resource(_server.Server, server).id return self._get( _server_diagnostics.ServerDiagnostics, server_id=server_id, requires_id=False, ) # ========== Project usage ============ def usages(self, start=None, end=None, **query): """Get project usages. :param datetime.datetime start: Usage range start date. :param datetime.datetime end: Usage range end date. :param dict query: Additional query parameters to use. :returns: A list of compute ``Usage`` objects. """ if start is not None: query['start'] = start.isoformat() if end is not None: query['end'] = end.isoformat() return self._list(_usage.Usage, **query) def get_usage(self, project, start=None, end=None, **query): """Get usage for a single project. :param project: ID or instance of :class:`~openstack.identity.project.Project` of the project for which the usage should be retrieved. :param datetime.datetime start: Usage range start date. :param datetime.datetime end: Usage range end date. :param dict query: Additional query parameters to use. :returns: A compute ``Usage`` object. """ project = self._get_resource(_project.Project, project) if start is not None: query['start'] = start.isoformat() if end is not None: query['end'] = end.isoformat() res = self._get_resource(_usage.Usage, project.id) return res.fetch(self, **query) # ========== Server consoles ========== def create_server_remote_console(self, server, **attrs): """Create a remote console on the server. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :returns: One :class:`~openstack.compute.v2.server_remote_console.ServerRemoteConsole` """ server_id = resource.Resource._get_id(server) return self._create( _src.ServerRemoteConsole, server_id=server_id, **attrs, ) def get_server_console_url(self, server, console_type): """Create a remote console on the server. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param console_type: Type of the console connection. :returns: Dictionary with console type and url """ server = self._get_resource(_server.Server, server) return server.get_console_url(self, console_type) def get_server_console_output(self, server, length=None): """Return the console output for a server. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param length: Optional number of line to fetch from the end of console log. All lines will be returned if this is not specified. :returns: The console output as a dict. Control characters will be escaped to create a valid JSON string. """ server = self._get_resource(_server.Server, server) return server.get_console_output(self, length=length) def create_console(self, server, console_type, console_protocol=None): """Create a remote console on the server. When microversion supported is higher then 2.6 remote console is created, otherwise deprecated call to get server console is issued. :param server: Either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance. :param console_type: Type of the remote console. Supported values as: * novnc * spice-html5 * rdp-html5 * serial * webmks (supported after 2.8) :param console_protocol: Optional console protocol (is respected only after microversion 2.6). :returns: Dictionary with console type, url and optionally protocol. """ server = self._get_resource(_server.Server, server) # NOTE: novaclient supports undocumented type xcpvnc also supported # historically by OSC. We support it, but do not document either. if utils.supports_microversion(self, '2.6'): console = self._create( _src.ServerRemoteConsole, server_id=server.id, type=console_type, protocol=console_protocol, ) return console.to_dict() else: return server.get_console_url(self, console_type) # ========== Quota class sets ========== def get_quota_class_set(self, quota_class_set='default'): """Get a single quota class set Only one quota class is permitted, ``default``. :param quota_class_set: The value can be the ID of a quota class set (only ``default`` is supported) or a :class:`~openstack.compute.v2.quota_class_set.QuotaClassSet` instance. :returns: One :class:`~openstack.compute.v2.quota_class_set.QuotaClassSet` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_quota_class_set.QuotaClassSet, quota_class_set) def update_quota_class_set(self, quota_class_set, **attrs): """Update a QuotaClassSet. Only one quota class is permitted, ``default``. :param quota_class_set: Either the ID of a quota class set (only ``default`` is supported) or a :class:`~openstack.compute.v2.quota_class_set.QuotaClassSet` instance. :param attrs: The attributes to update on the QuotaClassSet represented by ``quota_class_set``. :returns: The updated QuotaSet :rtype: :class:`~openstack.compute.v2.quota_set.QuotaSet` """ return self._update( _quota_class_set.QuotaClassSet, quota_class_set, **attrs ) # ========== Quota sets ========== def get_quota_set(self, project, usage=False, **query): """Show QuotaSet information for the project :param project: ID or instance of :class:`~openstack.identity.project.Project` of the project for which the quota should be retrieved :param bool usage: When set to ``True`` quota usage and reservations would be filled. :param dict query: Additional query parameters to use. :returns: One :class:`~openstack.compute.v2.quota_set.QuotaSet` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ project = self._get_resource(_project.Project, project) res = self._get_resource( _quota_set.QuotaSet, None, project_id=project.id, ) base_path = '/os-quota-sets/%(project_id)s/detail' if usage else None return res.fetch(self, base_path=base_path, **query) def get_quota_set_defaults(self, project): """Show QuotaSet defaults for the project :param project: ID or instance of :class:`~openstack.identity.project.Project` of the project for which the quota should be retrieved :returns: One :class:`~openstack.compute.v2.quota_set.QuotaSet` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ project = self._get_resource(_project.Project, project) res = self._get_resource( _quota_set.QuotaSet, None, project_id=project.id, ) return res.fetch( self, base_path='/os-quota-sets/%(project_id)s/defaults' ) def revert_quota_set(self, project, **query): """Reset Quota for the project/user. :param project: ID or instance of :class:`~openstack.identity.project.Project` of the project for which the quota should be reset. :param dict query: Additional parameters to be used. :returns: ``None`` """ project = self._get_resource(_project.Project, project) res = self._get_resource( _quota_set.QuotaSet, None, project_id=project.id ) if not query: query = {} return res.delete(self, **query) def update_quota_set(self, project, *, user=None, **attrs): """Update a QuotaSet. :param project: ID or instance of :class:`~openstack.identity.project.Project` of the project for which the quota should be reset. :param user_id: Optional ID of the user to set quotas as. :param attrs: The attributes to update on the QuotaSet represented by ``quota_set``. :returns: The updated QuotaSet :rtype: :class:`~openstack.compute.v2.quota_set.QuotaSet` """ query = {} if 'project_id' in attrs or isinstance(project, _quota_set.QuotaSet): warnings.warn( "The signature of 'update_quota_set' has changed and it " "now expects a Project as the first argument, in line " "with the other quota set methods.", os_warnings.RemovedInSDK50Warning, ) if isinstance(project, _quota_set.QuotaSet): attrs['project_id'] = project.project_id if 'query' in attrs: query = attrs.pop('query') else: project = self._get_resource(_project.Project, project) attrs['project_id'] = project.id if user: user = self._get_resource(_user.User, user) query['user_id'] = user.id # we don't use Proxy._update since that doesn't allow passing arbitrary # query string parameters quota_set = self._get_resource(_quota_set.QuotaSet, None, **attrs) return quota_set.commit(self, **query) # ========== Server actions ========== def get_server_action(self, server_action, server, ignore_missing=True): """Get a single server action :param server_action: The value can be the ID of a server action or a :class:`~openstack.compute.v2.server_action.ServerAction` instance. :param server: This parameter need to be specified when ServerAction ID is given as value. It can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server` instance that the action is associated with. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the server action does not exist. When set to ``True``, no exception will be set when attempting to retrieve a non-existent server action. :returns: One :class:`~openstack.compute.v2.server_action.ServerAction` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ server_id = self._get_uri_attribute(server_action, server, 'server_id') server_action = resource.Resource._get_id(server_action) return self._get( _server_action.ServerAction, server_id=server_id, request_id=server_action, ignore_missing=ignore_missing, ) def server_actions(self, server): """Return a generator of server actions :param server: The server can be either the ID of a server or a :class:`~openstack.compute.v2.server.Server`. :returns: A generator of ServerAction objects :rtype: :class:`~openstack.compute.v2.server_action.ServerAction` """ server_id = resource.Resource._get_id(server) return self._list(_server_action.ServerAction, server_id=server_id) # ========== Utilities ========== def wait_for_server( self, server, status='ACTIVE', failures=None, interval=2, wait=120, callback=None, ): """Wait for a server to be in a particular status. :param server: The :class:`~openstack.compute.v2.server.Server` to wait on to reach the specified status. :type server: :class:`~openstack.compute.v2.server.Server`: :param status: Desired status. :type status: str :param failures: Statuses that would be interpreted as failures. :type failures: :py:class:`list` :param interval: Number of seconds to wait before to consecutive checks. Default to 2. :type interval: int :param wait: Maximum number of seconds to wait before the change. Default to 120. :type wait: int :param callback: A callback function. This will be called with a single value, progress, which is a percentage value from 0-100. :type callback: callable :returns: The resource is returned on success. :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition to the desired status failed to occur in specified seconds. :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource has transited to one of the failure statuses. :raises: :class:`~AttributeError` if the resource does not have a ``status`` attribute. """ failures = ['ERROR'] if failures is None else failures return resource.wait_for_status( self, server, status, failures, interval, wait, callback=callback, ) def wait_for_delete(self, res, interval=2, wait=120, callback=None): """Wait for a resource to be deleted. :param res: The resource to wait on to be deleted. :type resource: A :class:`~openstack.resource.Resource` object. :param interval: Number of seconds to wait before to consecutive checks. Default to 2. :param wait: Maximum number of seconds to wait before the change. Default to 120. :param callback: A callback function. This will be called with a single value, progress, which is a percentage value from 0-100. :returns: The resource is returned on success. :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition to delete failed to occur in the specified seconds. """ return resource.wait_for_delete(self, res, interval, wait, callback) def _get_cleanup_dependencies(self): return { 'compute': { 'before': ['block_storage', 'network', 'identity', 'image'] } } def _service_cleanup( self, dry_run=True, client_status_queue=None, identified_resources=None, filters=None, resource_evaluation_fn=None, skip_resources=None, ): if self.should_skip_resource_cleanup("server", skip_resources): return servers = [] for obj in self.servers(): need_delete = self._service_cleanup_del_res( self.delete_server, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) if not dry_run and need_delete: # In the dry run we identified, that server will go. To propely # identify consequences we need to tell others, that the port # will disappear as well for port in self._connection.network.ports(device_id=obj.id): identified_resources[port.id] = port servers.append(obj) # We actually need to wait for servers to really disappear, since they # might be still holding ports on the subnet for server in servers: self.wait_for_delete(server) for obj in self.server_groups(): # Do not delete server groups that still have members if obj.member_ids: continue self._service_cleanup_del_res( self.delete_server_group, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/aggregate.py0000664000175000017500000000576400000000000022535 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class Aggregate(resource.Resource): resource_key = 'aggregate' resources_key = 'aggregates' base_path = '/os-aggregates' # capabilities allow_create = True allow_fetch = True allow_delete = True allow_list = True allow_commit = True # Properties #: Availability zone of aggregate availability_zone = resource.Body('availability_zone') #: The date and time when the resource was created. created_at = resource.Body('created_at') #: The date and time when the resource was deleted. deleted_at = resource.Body('deleted_at') #: Deleted? is_deleted = resource.Body('deleted', type=bool) #: Name of aggregate name = resource.Body('name') #: Hosts hosts = resource.Body('hosts', type=list) #: Metadata metadata = resource.Body('metadata', type=dict) #: The date and time when the resource was updated updated_at = resource.Body('updated_at') #: UUID uuid = resource.Body('uuid') # Image pre-caching introduced in 2.81 _max_microversion = '2.81' def _action(self, session, body, microversion=None): """Preform aggregate actions given the message body.""" url = utils.urljoin(self.base_path, self.id, 'action') response = session.post(url, json=body, microversion=microversion) exceptions.raise_from_response(response) aggregate = Aggregate() aggregate._translate_response(response=response) return aggregate def add_host(self, session, host): """Adds a host to an aggregate.""" body = {'add_host': {'host': host}} return self._action(session, body) def remove_host(self, session, host): """Removes a host from an aggregate.""" body = {'remove_host': {'host': host}} return self._action(session, body) def set_metadata(self, session, metadata): """Creates or replaces metadata for an aggregate.""" body = {'set_metadata': {'metadata': metadata}} return self._action(session, body) def precache_images(self, session, images): """Requests image pre-caching""" body = {'cache': images} url = utils.urljoin(self.base_path, self.id, 'images') response = session.post( url, json=body, microversion=self._max_microversion ) exceptions.raise_from_response(response) # This API has no result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/availability_zone.py0000664000175000017500000000177500000000000024312 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class AvailabilityZone(resource.Resource): resources_key = 'availabilityZoneInfo' base_path = '/os-availability-zone' # capabilities allow_list = True # Properties #: name of availability zone name = resource.Body('zoneName') #: state of availability zone state = resource.Body('zoneState') #: hosts of availability zone hosts = resource.Body('hosts') AvailabilityZoneDetail = AvailabilityZone ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/extension.py0000664000175000017500000000265500000000000022617 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Extension(resource.Resource): resource_key = 'extension' resources_key = 'extensions' base_path = '/extensions' # capabilities allow_fetch = True allow_list = True # Properties #: A short name by which this extension is also known. alias = resource.Body('alias', alternate_id=True) #: Text describing this extension's purpose. description = resource.Body('description') #: Links pertaining to this extension. This is a list of dictionaries, #: each including keys ``href`` and ``rel``. links = resource.Body('links', type=list, list_type=dict) #: The name of the extension. name = resource.Body('name') #: A URL pointing to the namespace for this extension. namespace = resource.Body('namespace') #: Timestamp when this extension was last updated. updated_at = resource.Body('updated') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/flavor.py0000664000175000017500000002250500000000000022070 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class Flavor(resource.Resource): resource_key = 'flavor' resources_key = 'flavors' base_path = '/flavors' # capabilities allow_create = True allow_fetch = True allow_delete = True allow_list = True allow_commit = True _query_mapping = resource.QueryParameters( "sort_key", "sort_dir", "is_public", min_disk="minDisk", min_ram="minRam", ) # extra_specs introduced in 2.61 _max_microversion = '2.61' # Properties #: The name of this flavor. name = resource.Body('name', alias='original_name') #: The name of this flavor when returned by server list/show original_name = resource.Body('original_name') #: The description of the flavor. description = resource.Body('description') #: Size of the disk this flavor offers. *Type: int* disk = resource.Body('disk', type=int, default=0) #: ``True`` if this is a publicly visible flavor. ``False`` if this is #: a private image. *Type: bool* is_public = resource.Body( 'os-flavor-access:is_public', type=bool, default=True ) #: The amount of RAM (in MB) this flavor offers. *Type: int* ram = resource.Body('ram', type=int, default=0) #: The number of virtual CPUs this flavor offers. *Type: int* vcpus = resource.Body('vcpus', type=int, default=0) #: Size of the swap partitions. swap = resource.Body('swap', type=int, default=0) #: Size of the ephemeral data disk attached to this server. *Type: int* ephemeral = resource.Body('OS-FLV-EXT-DATA:ephemeral', type=int, default=0) #: ``True`` if this flavor is disabled, ``False`` if not. *Type: bool* is_disabled = resource.Body('OS-FLV-DISABLED:disabled', type=bool) #: The bandwidth scaling factor this flavor receives on the network. rxtx_factor = resource.Body('rxtx_factor', type=float) # TODO(mordred) extra_specs can historically also come from # OS-FLV-WITH-EXT-SPECS:extra_specs. Do we care? #: A dictionary of the flavor's extra-specs key-and-value pairs. extra_specs = resource.Body('extra_specs', type=dict, default={}) def __getattribute__(self, name): """Return an attribute on this instance This is mostly a pass-through except for a specialization on the 'id' name, as this can exist under a different name via the `alternate_id` argument to resource.Body. """ if name == "id": # ID handling in flavor is very tricky. Sometimes we get ID back, # sometimes we get only name (but it is same as id), sometimes we # get original_name back, but it is still id. # To get this handled try sequentially to access it from various # places until we find first non-empty value. for xname in ["id", "name", "original_name"]: if xname in self._body and self._body[xname]: return self._body[xname] else: return super().__getattribute__(name) @classmethod def list( cls, session, paginated=True, base_path='/flavors/detail', **params, ): # Find will invoke list when name was passed. Since we want to return # flavor with details (same as direct get) we need to swap default here # and list with "/flavors" if no details explicitely requested if 'is_public' not in params or params['is_public'] is None: # is_public is ternary - None means give all flavors. # Force it to string to avoid requests skipping it. params['is_public'] = 'None' return super().list( session, paginated=paginated, base_path=base_path, **params ) def _action(self, session, body, microversion=None): """Preform flavor actions given the message body.""" url = utils.urljoin(Flavor.base_path, self.id, 'action') headers = {'Accept': ''} attrs = {} if microversion: # Do not reset microversion if it is set on a session level attrs['microversion'] = microversion response = session.post(url, json=body, headers=headers, **attrs) exceptions.raise_from_response(response) return response def add_tenant_access(self, session, tenant): """Adds flavor access to a tenant and flavor. :param session: The session to use for making this request. :param tenant: :returns: None """ body = {'addTenantAccess': {'tenant': tenant}} self._action(session, body) def remove_tenant_access(self, session, tenant): """Removes flavor access to a tenant and flavor. :param session: The session to use for making this request. :param tenant: :returns: None """ body = {'removeTenantAccess': {'tenant': tenant}} self._action(session, body) def get_access(self, session): """Lists tenants who have access to a private flavor By default, only administrators can manage private flavor access. A private flavor has ``is_public`` set to false while a public flavor has ``is_public`` set to true. :param session: The session to use for making this request. :return: List of dicts with flavor_id and tenant_id attributes """ url = utils.urljoin(Flavor.base_path, self.id, 'os-flavor-access') response = session.get(url) exceptions.raise_from_response(response) return response.json().get('flavor_access', []) def fetch_extra_specs(self, session): """Fetch extra specs of the flavor Starting with 2.61 extra specs are returned with the flavor details, before that a separate call is required. :param session: The session to use for making this request. :returns: The updated flavor. """ url = utils.urljoin(Flavor.base_path, self.id, 'os-extra_specs') microversion = self._get_microversion(session, action='fetch') response = session.get(url, microversion=microversion) exceptions.raise_from_response(response) specs = response.json().get('extra_specs', {}) self._update(extra_specs=specs) return self def create_extra_specs(self, session, specs): """Creates extra specs for a flavor. :param session: The session to use for making this request. :param specs: :returns: The updated flavor. """ url = utils.urljoin(Flavor.base_path, self.id, 'os-extra_specs') microversion = self._get_microversion(session, action='create') response = session.post( url, json={'extra_specs': specs}, microversion=microversion ) exceptions.raise_from_response(response) specs = response.json().get('extra_specs', {}) self._update(extra_specs=specs) return self def get_extra_specs_property(self, session, prop): """Get an individual extra spec property. :param session: The session to use for making this request. :param prop: The property to fetch. :returns: The value of the property if it exists, else ``None``. """ url = utils.urljoin(Flavor.base_path, self.id, 'os-extra_specs', prop) microversion = self._get_microversion(session, action='fetch') response = session.get(url, microversion=microversion) exceptions.raise_from_response(response) val = response.json().get(prop) return val def update_extra_specs_property(self, session, prop, val): """Update an extra spec for a flavor. :param session: The session to use for making this request. :param prop: The property to update. :param val: The value to update with. :returns: The updated value of the property. """ url = utils.urljoin(Flavor.base_path, self.id, 'os-extra_specs', prop) microversion = self._get_microversion(session, action='commit') response = session.put( url, json={prop: val}, microversion=microversion ) exceptions.raise_from_response(response) val = response.json().get(prop) return val def delete_extra_specs_property(self, session, prop): """Delete an extra spec for a flavor. :param session: The session to use for making this request. :param prop: The property to delete. :returns: None """ url = utils.urljoin(Flavor.base_path, self.id, 'os-extra_specs', prop) microversion = self._get_microversion(session, action='delete') response = session.delete(url, microversion=microversion) exceptions.raise_from_response(response) # TODO(stephenfin): Deprecate this for removal in 2.0 FlavorDetail = Flavor ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/hypervisor.py0000664000175000017500000001002100000000000022777 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import warnings from openstack import exceptions from openstack import resource from openstack import utils from openstack import warnings as os_warnings class Hypervisor(resource.Resource): resource_key = 'hypervisor' resources_key = 'hypervisors' base_path = '/os-hypervisors' # capabilities allow_fetch = True allow_list = True _query_mapping = resource.QueryParameters( 'hypervisor_hostname_pattern', 'with_servers' ) # Lot of attributes are dropped in 2.88 _max_microversion = '2.88' # Properties #: Information about the hypervisor's CPU. Up to 2.28 it was string. cpu_info = resource.Body('cpu_info') #: IP address of the host host_ip = resource.Body('host_ip') #: The type of hypervisor hypervisor_type = resource.Body('hypervisor_type') #: Version of the hypervisor hypervisor_version = resource.Body('hypervisor_version') #: Name of hypervisor name = resource.Body('hypervisor_hostname') #: Service details service_details = resource.Body('service', type=dict) #: List of Servers servers = resource.Body('servers', type=list, list_type=dict) #: State of hypervisor state = resource.Body('state') #: Status of hypervisor status = resource.Body('status') #: The total uptime of the hypervisor and information about average load. #: This attribute is set only when querying uptime explicitly. uptime = resource.Body('uptime') # Attributes deprecated with 2.88 #: Measurement of the hypervisor's current workload current_workload = resource.Body('current_workload', deprecated=True) #: Disk space available to the scheduler disk_available = resource.Body("disk_available_least", deprecated=True) #: The amount, in gigabytes, of local storage used local_disk_used = resource.Body('local_gb_used', deprecated=True) #: The amount, in gigabytes, of the local storage device local_disk_size = resource.Body('local_gb', deprecated=True) #: The amount, in gigabytes, of free space on the local storage device local_disk_free = resource.Body('free_disk_gb', deprecated=True) #: The amount, in megabytes, of memory memory_used = resource.Body('memory_mb_used', deprecated=True) #: The amount, in megabytes, of total memory memory_size = resource.Body('memory_mb', deprecated=True) #: The amount, in megabytes, of available memory memory_free = resource.Body('free_ram_mb', deprecated=True) #: Count of the running virtual machines running_vms = resource.Body('running_vms', deprecated=True) #: Count of the VCPUs in use vcpus_used = resource.Body('vcpus_used', deprecated=True) #: Count of all VCPUs vcpus = resource.Body('vcpus', deprecated=True) def get_uptime(self, session): """Get uptime information for the hypervisor Updates uptime attribute of the hypervisor object """ warnings.warn( "This call is deprecated and is only available until Nova 2.88", os_warnings.LegacyAPIWarning, ) if utils.supports_microversion(session, '2.88'): raise exceptions.SDKException( 'Hypervisor.get_uptime is not supported anymore' ) url = utils.urljoin(self.base_path, self.id, 'uptime') microversion = self._get_microversion(session, action='fetch') response = session.get(url, microversion=microversion) self._translate_response(response) return self HypervisorDetail = Hypervisor ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/image.py0000664000175000017500000000402000000000000021651 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import metadata from openstack import resource class Image(resource.Resource, metadata.MetadataMixin): resource_key = 'image' resources_key = 'images' base_path = '/images' # capabilities allow_fetch = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( "server", "name", "status", "type", min_disk="minDisk", min_ram="minRam", changes_since="changes-since", ) # Properties #: Links pertaining to this image. This is a list of dictionaries, #: each including keys ``href`` and ``rel``, and optionally ``type``. links = resource.Body('links') #: The name of this image. name = resource.Body('name') #: Timestamp when the image was created. created_at = resource.Body('created') #: The mimimum disk size. *Type: int* min_disk = resource.Body('minDisk', type=int) #: The minimum RAM size. *Type: int* min_ram = resource.Body('minRam', type=int) #: If this image is still building, its progress is represented here. #: Once an image is created, progres will be 100. *Type: int* progress = resource.Body('progress', type=int) #: The status of this image. status = resource.Body('status') #: Timestamp when the image was updated. updated_at = resource.Body('updated') #: Size of the image in bytes. *Type: int* size = resource.Body('OS-EXT-IMG-SIZE:size', type=int) ImageDetail = Image ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/keypair.py0000664000175000017500000000672700000000000022253 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Keypair(resource.Resource): resource_key = 'keypair' resources_key = 'keypairs' base_path = '/os-keypairs' _query_mapping = resource.QueryParameters('user_id') # capabilities allow_create = True allow_fetch = True allow_delete = True allow_list = True _max_microversion = '2.10' # Properties #: The date and time when the resource was created. created_at = resource.Body('created_at') #: A boolean indicates whether this keypair is deleted or not. is_deleted = resource.Body('deleted', type=bool) #: The short fingerprint associated with the ``public_key`` for #: this keypair. fingerprint = resource.Body('fingerprint') # NOTE: There is in fact an 'id' field. However, it's not useful # because all operations use the 'name' as an identifier. # Additionally, the 'id' field only appears *after* creation, # so suddenly you have an 'id' field filled in after the fact, # and it just gets in the way. We need to cover this up by listing # name as alternate_id and listing id as coming from name. #: The id identifying the keypair id = resource.Body('name') #: A name identifying the keypair name = resource.Body('name', alternate_id=True) #: The private key for the keypair private_key = resource.Body('private_key') #: The SSH public key that is paired with the server. public_key = resource.Body('public_key') #: The type of the keypair. type = resource.Body('type', default='ssh') #: The user_id for a keypair. user_id = resource.Body('user_id') def _consume_attrs(self, mapping, attrs): # TODO(mordred) This should not be required. However, without doing # it **SOMETIMES** keypair picks up id and not name. This is a hammer. if 'id' in attrs: attrs.setdefault('name', attrs.pop('id')) return super()._consume_attrs(mapping, attrs) @classmethod def existing(cls, connection=None, **kwargs): """Create an instance of an existing remote resource. When creating the instance set the ``_synchronized`` parameter of :class:`Resource` to ``True`` to indicate that it represents the state of an existing server-side resource. As such, all attributes passed in ``**kwargs`` are considered "clean", such that an immediate :meth:`update` call would not generate a body of attributes to be modified on the server. :param dict kwargs: Each of the named arguments will be set as attributes on the resulting Resource object. """ # Listing KPs return list with resource_key structure. Instead of # overriding whole list just try to create object smart. if cls.resource_key in kwargs: args = kwargs.pop(cls.resource_key) kwargs.update(**args) return cls(_synchronized=True, connection=connection, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/limits.py0000664000175000017500000001277000000000000022103 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class AbsoluteLimits(resource.Resource): _max_microversion = '2.57' # Properties #: The number of key-value pairs that can be set as image metadata. image_meta = resource.Body("maxImageMeta", aka="max_image_meta") #: The maximum number of personality contents that can be supplied. personality = resource.Body("maxPersonality", deprecated=True) #: The maximum size, in bytes, of a personality. personality_size = resource.Body("maxPersonalitySize", deprecated=True) #: The maximum amount of security group rules allowed. security_group_rules = resource.Body( "maxSecurityGroupRules", aka="max_security_group_rules" ) #: The maximum amount of security groups allowed. security_groups = resource.Body( "maxSecurityGroups", aka="max_security_groups" ) #: The amount of security groups currently in use. security_groups_used = resource.Body( "totalSecurityGroupsUsed", aka="total_security_groups_used" ) #: The number of key-value pairs that can be set as server metadata. server_meta = resource.Body("maxServerMeta", aka="max_server_meta") #: The maximum amount of cores. total_cores = resource.Body("maxTotalCores", aka="max_total_cores") #: The amount of cores currently in use. total_cores_used = resource.Body("totalCoresUsed", aka="total_cores_used") #: The maximum amount of floating IPs. floating_ips = resource.Body( "maxTotalFloatingIps", aka="max_total_floating_ips" ) #: The amount of floating IPs currently in use. floating_ips_used = resource.Body( "totalFloatingIpsUsed", aka="total_floating_ips_used" ) #: The maximum amount of instances. instances = resource.Body("maxTotalInstances", aka="max_total_instances") #: The amount of instances currently in use. instances_used = resource.Body( "totalInstancesUsed", aka="total_instances_used" ) #: The maximum amount of keypairs. keypairs = resource.Body("maxTotalKeypairs", aka="max_total_keypairs") #: The maximum RAM size in megabytes. total_ram = resource.Body("maxTotalRAMSize", aka="max_total_ram_size") #: The RAM size in megabytes currently in use. total_ram_used = resource.Body("totalRAMUsed", aka="total_ram_used") #: The maximum amount of server groups. server_groups = resource.Body("maxServerGroups", aka="max_server_groups") #: The amount of server groups currently in use. server_groups_used = resource.Body( "totalServerGroupsUsed", aka="total_server_groups_used" ) #: The maximum number of members in a server group. server_group_members = resource.Body( "maxServerGroupMembers", aka="max_server_group_members" ) class RateLimit(resource.Resource): # Properties #: Rate limits next availabe time. next_available = resource.Body("next-available") #: Integer for rate limits remaining. remaining = resource.Body("remaining", type=int) #: Unit of measurement for the value parameter. unit = resource.Body("unit") #: Integer number of requests which can be made. value = resource.Body("value", type=int) #: An HTTP verb (POST, PUT, etc.). verb = resource.Body("verb") class RateLimits(resource.Resource): # Properties #: A list of the specific limits that apply to the ``regex`` and ``uri``. limits = resource.Body("limit", type=list, list_type=RateLimit) #: A regex representing which routes this rate limit applies to. regex = resource.Body("regex") #: A URI representing which routes this rate limit applies to. uri = resource.Body("uri") class Limits(resource.Resource): base_path = "/limits" resource_key = "limits" allow_fetch = True _query_mapping = resource.QueryParameters( 'tenant_id', 'reserved', project_id='tenant_id', ) # Properties #: An absolute limits object. absolute = resource.Body("absolute", type=AbsoluteLimits) #: Rate-limit compute resources. This is only populated when using the #: legacy v2 API which was removed in Nova 14.0.0 (Newton). In v2.1 it will #: always be an empty list. rate = resource.Body("rate", type=list, list_type=RateLimits) def fetch( self, session, requires_id=False, error_message=None, base_path=None, skip_cache=False, **params ): """Get the Limits resource. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :returns: A Limits instance :rtype: :class:`~openstack.compute.v2.limits.Limits` """ # TODO(mordred) We shouldn't have to subclass just to declare # requires_id = False. return super().fetch( session=session, requires_id=requires_id, error_message=error_message, base_path=base_path, skip_cache=skip_cache, **params ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/migration.py0000664000175000017500000000537000000000000022571 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Migration(resource.Resource): resources_key = 'migrations' base_path = '/os-migrations' # capabilities allow_list = True _query_mapping = resource.QueryParameters( 'host', 'status', 'migration_type', 'source_compute', 'user_id', 'project_id', changes_since='changes-since', changes_before='changes-before', server_id='instance_uuid', ) #: The date and time when the resource was created. created_at = resource.Body('created_at') #: The target compute of the migration. dest_compute = resource.Body('dest_compute') #: The target host of the migration. dest_host = resource.Body('dest_host') #: The target node of the migration. dest_node = resource.Body('dest_node') #: The type of the migration. One of 'migration', 'resize', #: 'live-migration' or 'evacuation' migration_type = resource.Body('migration_type') #: The ID of the old flavor. This value corresponds to the ID of the flavor #: in the database. This will be the same as new_flavor_id except for #: resize operations. new_flavor_id = resource.Body('new_instance_type_id') #: The ID of the old flavor. This value corresponds to the ID of the flavor #: in the database. old_flavor_id = resource.Body('old_instance_type_id') #: The ID of the project that initiated the server migration (since #: microversion 2.80) project_id = resource.Body('project_id') #: The UUID of the server server_id = resource.Body('instance_uuid') #: The source compute of the migration. source_compute = resource.Body('source_compute') #: The source node of the migration. source_node = resource.Body('source_node') #: The current status of the migration. status = resource.Body('status') #: The date and time when the resource was last updated. updated_at = resource.Body('updated_at') #: The ID of the user that initiated the server migration (since #: microversion 2.80) user_id = resource.Body('user_id') #: The UUID of the migration (since microversion 2.59) uuid = resource.Body('uuid', alternate_id=True) _max_microversion = '2.80' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/quota_class_set.py0000664000175000017500000000533000000000000023765 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class QuotaClassSet(resource.Resource): resource_key = 'quota_class_set' base_path = '/os-quota-class-sets' _max_microversion = '2.56' # capabilities allow_fetch = True allow_commit = True #: Properties #: The number of allowed server cores for each tenant. cores = resource.Body('cores', type=int) #: The number of allowed fixed IP addresses for each tenant. Must be #: equal to or greater than the number of allowed servers. fixed_ips = resource.Body('fixed_ips', type=int) #: The number of allowed floating IP addresses for each tenant. floating_ips = resource.Body('floating_ips', type=int) #: The number of allowed bytes of content for each injected file. injected_file_content_bytes = resource.Body( 'injected_file_content_bytes', type=int ) #: The number of allowed bytes for each injected file path. injected_file_path_bytes = resource.Body( 'injected_file_path_bytes', type=int ) #: The number of allowed injected files for each tenant. injected_files = resource.Body('injected_files', type=int) #: The number of allowed servers for each tenant. instances = resource.Body('instances', type=int) #: The number of allowed key pairs for each user. key_pairs = resource.Body('key_pairs', type=int) #: The number of allowed metadata items for each server. metadata_items = resource.Body('metadata_items', type=int) #: The number of private networks that can be created per project. networks = resource.Body('networks', type=int) #: The amount of allowed server RAM, in MiB, for each tenant. ram = resource.Body('ram', type=int) #: The number of allowed rules for each security group. security_group_rules = resource.Body('security_group_rules', type=int) #: The number of allowed security groups for each tenant. security_groups = resource.Body('security_groups', type=int) #: The number of allowed server groups for each tenant. server_groups = resource.Body('server_groups', type=int) #: The number of allowed members for each server group. server_group_members = resource.Body('server_group_members', type=int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/quota_set.py0000664000175000017500000000562000000000000022602 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import quota_set from openstack import resource class QuotaSet(quota_set.QuotaSet): # We generally only want compute QS support max_microversion. Otherwise be # explicit and list all the attributes _max_microversion = '2.56' #: Properties #: The number of allowed server cores for each tenant. cores = resource.Body('cores', type=int) #: The number of allowed fixed IP addresses for each tenant. Must be #: equal to or greater than the number of allowed servers. fixed_ips = resource.Body('fixed_ips', type=int) #: The number of allowed floating IP addresses for each tenant. floating_ips = resource.Body('floating_ips', type=int) #: You can force the update even if the quota has already been used and #: the reserved quota exceeds the new quota. force = resource.Body('force', type=bool) #: The number of allowed bytes of content for each injected file. injected_file_content_bytes = resource.Body( 'injected_file_content_bytes', type=int ) #: The number of allowed bytes for each injected file path. injected_file_path_bytes = resource.Body( 'injected_file_path_bytes', type=int ) #: The number of allowed injected files for each tenant. injected_files = resource.Body('injected_files', type=int) #: The number of allowed servers for each tenant. instances = resource.Body('instances', type=int) #: The number of allowed key pairs for each user. key_pairs = resource.Body('key_pairs', type=int) #: The number of allowed metadata items for each server. metadata_items = resource.Body('metadata_items', type=int) #: The number of private networks that can be created per project. networks = resource.Body('networks', type=int) #: The amount of allowed server RAM, in MiB, for each tenant. ram = resource.Body('ram', type=int) #: The number of allowed rules for each security group. security_group_rules = resource.Body('security_group_rules', type=int) #: The number of allowed security groups for each tenant. security_groups = resource.Body('security_groups', type=int) #: The number of allowed server groups for each tenant. server_groups = resource.Body('server_groups', type=int) #: The number of allowed members for each server group. server_group_members = resource.Body('server_group_members', type=int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/server.py0000664000175000017500000011553500000000000022113 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty from openstack.common import metadata from openstack.common import tag from openstack.compute.v2 import flavor from openstack.compute.v2 import volume_attachment from openstack import exceptions from openstack.image.v2 import image from openstack import resource from openstack import utils # Workaround Python's lack of an undefined sentinel # https://python-patterns.guide/python/sentinel-object/ class Unset: def __bool__(self) -> ty.Literal[False]: return False UNSET: Unset = Unset() CONSOLE_TYPE_ACTION_MAPPING = { 'novnc': 'os-getVNCConsole', 'xvpvnc': 'os-getVNCConsole', 'spice-html5': 'os-getSPICEConsole', 'rdp-html5': 'os-getRDPConsole', 'serial': 'os-getSerialConsole', } class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin): resource_key = 'server' resources_key = 'servers' base_path = '/servers' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Sentinel used to differentiate API called without parameter or None # Ex unshelve API can be called without an availability_zone or with # availability_zone = None to unpin the az. _sentinel = object() _query_mapping = resource.QueryParameters( "auto_disk_config", "availability_zone", "created_at", "description", "flavor", "hostname", "image", "kernel_id", "key_name", "launch_index", "launched_at", "locked_by", "name", "node", "power_state", "progress", "project_id", "ramdisk_id", "reservation_id", "root_device_name", "status", "task_state", "terminated_at", "user_id", "vm_state", "sort_key", "sort_dir", "pinned_availability_zone", access_ipv4="access_ip_v4", access_ipv6="access_ip_v6", has_config_drive="config_drive", deleted_only="deleted", compute_host="host", is_soft_deleted="soft_deleted", ipv4_address="ip", ipv6_address="ip6", changes_since="changes-since", changes_before="changes-before", id="uuid", all_projects="all_tenants", **tag.TagMixin._tag_query_parameters, ) _max_microversion = '2.96' #: A list of dictionaries holding links relevant to this server. links = resource.Body('links') access_ipv4 = resource.Body('accessIPv4') access_ipv6 = resource.Body('accessIPv6') #: A dictionary of addresses this server can be accessed through. #: The dictionary contains keys such as ``private`` and ``public``, #: each containing a list of dictionaries for addresses of that type. #: The addresses are contained in a dictionary with keys ``addr`` #: and ``version``, which is either 4 or 6 depending on the protocol #: of the IP address. *Type: dict* addresses = resource.Body('addresses', type=dict) #: When a server is first created, it provides the administrator password. admin_password = resource.Body('adminPass') #: A list of an attached volumes. Each item in the list contains at least #: an "id" key to identify the specific volumes. attached_volumes = resource.Body( 'os-extended-volumes:volumes_attached', aka='volumes', type=list, list_type=volume_attachment.VolumeAttachment, default=[], ) #: The name of the availability zone this server is a part of. availability_zone = resource.Body('OS-EXT-AZ:availability_zone') #: Enables fine grained control of the block device mapping for an #: instance. This is typically used for booting servers from volumes. block_device_mapping = resource.Body('block_device_mapping_v2') #: Indicates whether or not a config drive was used for this server. config_drive = resource.Body('config_drive') #: The name of the compute host on which this instance is running. #: Appears in the response for administrative users only. compute_host = resource.Body('OS-EXT-SRV-ATTR:host') #: Timestamp of when the server was created. created_at = resource.Body('created') #: The description of the server. Before microversion #: 2.19 this was set to the server name. description = resource.Body('description') #: The disk configuration. Either AUTO or MANUAL. disk_config = resource.Body('OS-DCF:diskConfig') #: The flavor reference, as a ID or full URL, for the flavor to use for #: this server. flavor_id = resource.Body('flavorRef') #: The flavor property as returned from server. flavor = resource.Body('flavor', type=flavor.Flavor) #: Indicates whether a configuration drive enables metadata injection. #: Not all cloud providers enable this feature. has_config_drive = resource.Body('config_drive') #: An ID representing the host of this server. host_id = resource.Body('hostId') #: A fault object. Only available when the server status #: is ERROR or DELETED and a fault occurred. fault = resource.Body('fault') #: The host to boot the server on. host = resource.Body('host') #: The host status. host_status = resource.Body('host_status') #: The hostname set on the instance when it is booted. #: By default, it appears in the response for administrative users only. hostname = resource.Body('OS-EXT-SRV-ATTR:hostname') #: The hypervisor host name. Appears in the response for administrative #: users only. hypervisor_hostname = resource.Body('OS-EXT-SRV-ATTR:hypervisor_hostname') #: The image reference, as a ID or full URL, for the image to use for #: this server. image_id = resource.Body('imageRef') #: The image property as returned from server. image = resource.Body('image', type=image.Image) #: The instance name. The Compute API generates the instance name from the #: instance name template. Appears in the response for administrative users #: only. instance_name = resource.Body('OS-EXT-SRV-ATTR:instance_name') #: The address to use to connect to this server from the current calling #: context. This will be set to public_ipv6 if the calling host has #: routable ipv6 addresses, and to private_ipv4 if the Connection was #: created with private=True. Otherwise it will be set to public_ipv4. interface_ip = resource.Computed('interface_ip', default='') # The locked status of the server is_locked = resource.Body('locked', type=bool) #: The UUID of the kernel image when using an AMI. Will be null if not. #: By default, it appears in the response for administrative users only. kernel_id = resource.Body('OS-EXT-SRV-ATTR:kernel_id') #: The name of an associated keypair key_name = resource.Body('key_name') #: When servers are launched via multiple create, this is the #: sequence in which the servers were launched. By default, it #: appears in the response for administrative users only. launch_index = resource.Body('OS-EXT-SRV-ATTR:launch_index', type=int) #: The timestamp when the server was launched. launched_at = resource.Body('OS-SRV-USG:launched_at') #: The reason the server was locked, if any. locked_reason = resource.Body('locked_reason') #: The maximum number of servers to create. max_count = resource.Body('max_count') #: The minimum number of servers to create. min_count = resource.Body('min_count') #: A networks object. Required parameter when there are multiple #: networks defined for the tenant. When you do not specify the #: networks parameter, the server attaches to the only network #: created for the current tenant. networks = resource.Body('networks') #: Personality files. This should be a list of dicts with each dict #: containing a file name ('name') and a base64-encoded file contents #: ('contents') personality = resource.Body('personality', type=list) #: The availability zone requested during server creation OR pinned #: availability zone, which is configured using default_schedule_zone #: config option. pinned_availability_zone = resource.Body('pinned_availability_zone') #: The power state of this server. power_state = resource.Body('OS-EXT-STS:power_state') #: While the server is building, this value represents the percentage #: of completion. Once it is completed, it will be 100. *Type: int* progress = resource.Body('progress', type=int) #: The ID of the project this server is associated with. project_id = resource.Body('tenant_id') #: The private IPv4 address of this server private_v4 = resource.Computed('private_v4', default='') #: The private IPv6 address of this server private_v6 = resource.Computed('private_v6', default='') #: The public IPv4 address of this server public_v4 = resource.Computed('public_v4', default='') #: The public IPv6 address of this server public_v6 = resource.Computed('public_v6', default='') #: The UUID of the ramdisk image when using an AMI. Will be null if not. #: By default, it appears in the response for administrative users only. ramdisk_id = resource.Body('OS-EXT-SRV-ATTR:ramdisk_id') #: The reservation id for the server. This is an id that can be #: useful in tracking groups of servers created with multiple create, #: that will all have the same reservation_id. By default, it appears #: in the response for administrative users only. reservation_id = resource.Body('OS-EXT-SRV-ATTR:reservation_id') #: The root device name for the instance By default, it appears in the #: response for administrative users only. root_device_name = resource.Body('OS-EXT-SRV-ATTR:root_device_name') #: The dictionary of data to send to the scheduler. scheduler_hints = resource.Body('OS-SCH-HNT:scheduler_hints', type=dict) #: A list of applicable security groups. Each group contains keys for #: description, name, id, and rules. security_groups = resource.Body( 'security_groups', type=list, list_type=dict ) #: The UUIDs of the server groups to which the server belongs. #: Currently this can contain at most one entry. server_groups = resource.Body('server_groups', type=list) #: The state this server is in. Valid values include ``ACTIVE``, #: ``BUILDING``, ``DELETED``, ``ERROR``, ``HARD_REBOOT``, ``PASSWORD``, #: ``PAUSED``, ``REBOOT``, ``REBUILD``, ``RESCUED``, ``RESIZED``, #: ``REVERT_RESIZE``, ``SHUTOFF``, ``SOFT_DELETED``, ``STOPPED``, #: ``SUSPENDED``, ``UNKNOWN``, or ``VERIFY_RESIZE``. status = resource.Body('status') #: The task state of this server. task_state = resource.Body('OS-EXT-STS:task_state') #: The timestamp when the server was terminated (if it has been). terminated_at = resource.Body('OS-SRV-USG:terminated_at') #: A list of trusted certificate IDs, that were used during image #: signature verification to verify the signing certificate. trusted_image_certificates = resource.Body( 'trusted_image_certificates', type=list ) #: Timestamp of when this server was last updated. updated_at = resource.Body('updated') #: Configuration information or scripts to use upon launch. #: Must be Base64 encoded. user_data = resource.Body('OS-EXT-SRV-ATTR:user_data') #: The ID of the owners of this server. user_id = resource.Body('user_id') #: The VM state of this server. vm_state = resource.Body('OS-EXT-STS:vm_state') def _prepare_request( self, requires_id=True, prepend_key=True, base_path=None, **kwargs, ): request = super()._prepare_request( requires_id=requires_id, prepend_key=prepend_key, base_path=base_path, ) server_body = request.body[self.resource_key] # Some names exist without prefix on requests but with a prefix # on responses. If we find that we've populated one of these # attributes with something and then go to make a request, swap out # the name to the bare version. # Availability Zones exist with a prefix on response, but not request az_key = "OS-EXT-AZ:availability_zone" if az_key in server_body: server_body["availability_zone"] = server_body.pop(az_key) # User Data exists with a prefix on response, but not request ud_key = "OS-EXT-SRV-ATTR:user_data" if ud_key in server_body: server_body["user_data"] = server_body.pop(ud_key) # Scheduler hints are sent in a top-level scope, not within the # resource_key scope like everything else. If we try to send # scheduler_hints, pop them out of the resource_key scope and into # their own top-level scope. hint_key = "OS-SCH-HNT:scheduler_hints" if hint_key in server_body: request.body[hint_key] = server_body.pop(hint_key) return request def _action(self, session, body, microversion=None): """Preform server actions given the message body.""" # NOTE: This is using Server.base_path instead of self.base_path # as both Server and ServerDetail instances can be acted on, but # the URL used is sans any additional /detail/ part. url = utils.urljoin(Server.base_path, self.id, 'action') headers = {'Accept': ''} # these aren't all necessary "commit" actions (i.e. updates) but it's # good enough... if microversion is None: microversion = self._get_microversion(session, action='commit') response = session.post( url, json=body, headers=headers, microversion=microversion, ) exceptions.raise_from_response(response) return response def change_password(self, session, password, *, microversion=None): """Change the administrator password to the given password. :param session: The session to use for making this request. :param password: The new password. :returns: None """ body = {'changePassword': {'adminPass': password}} self._action(session, body, microversion=microversion) def get_password(self, session, *, microversion=None): """Get the encrypted administrator password. :param session: The session to use for making this request. :returns: The encrypted administrator password. """ url = utils.urljoin(Server.base_path, self.id, 'os-server-password') if microversion is None: microversion = self._get_microversion(session, action='commit') response = session.get(url, microversion=microversion) exceptions.raise_from_response(response) data = response.json() return data.get('password') def clear_password(self, session, *, microversion=None): """Clear the administrator password. This removes the password from the database. It does not actually change the server password. :param session: The session to use for making this request. :returns: None """ url = utils.urljoin(Server.base_path, self.id, 'os-server-password') if microversion is None: microversion = self._get_microversion(session, action='commit') response = session.delete(url, microversion=microversion) exceptions.raise_from_response(response) def reboot(self, session, reboot_type): """Reboot server where reboot_type might be 'SOFT' or 'HARD'. :param session: The session to use for making this request. :param reboot_type: The type of reboot. One of: ``SOFT``, ``HARD``. :returns: None """ body = {'reboot': {'type': reboot_type}} self._action(session, body) def force_delete(self, session): """Force delete the server. :param session: The session to use for making this request. :returns: None """ body = {'forceDelete': None} self._action(session, body) def rebuild( self, session, image, name=UNSET, admin_password=UNSET, preserve_ephemeral=UNSET, access_ipv4=UNSET, access_ipv6=UNSET, metadata=UNSET, user_data=UNSET, key_name=UNSET, description=UNSET, trusted_image_certificates=UNSET, hostname=UNSET, ): """Rebuild the server with the given arguments. :param session: The session to use for making this request. :param image: The image to rebuild to. Either an ID or a :class:`~openstack.image.v1.image.Image` instance. :param name: A name to set on the rebuilt server. (Optional) :param admin_password: An admin password to set on the rebuilt server. (Optional) :param preserve_ephemeral: Whether to preserve the ephemeral drive during the rebuild. (Optional) :param access_ipv4: An IPv4 address that will be used to access the rebuilt server. (Optional) :param access_ipv6: An IPv6 address that will be used to access the rebuilt server. (Optional) :param metadata: Metadata to set on the updated server. (Optional) :param user_data: User data to set on the updated server. (Optional) :param key_name: A key name to set on the updated server. (Optional) :param description: The description to set on the updated server. (Optional) (Requires API microversion 2.19) :param trusted_image_certificates: The trusted image certificates to set on the updated server. (Optional) (Requires API microversion 2.78) :param hostname: The hostname to set on the updated server. (Optional) (Requires API microversion 2.90) :returns: The updated server. """ action = {'imageRef': resource.Resource._get_id(image)} if preserve_ephemeral is not UNSET: action['preserve_ephemeral'] = preserve_ephemeral if name is not UNSET: action['name'] = name if admin_password is not UNSET: action['adminPass'] = admin_password if access_ipv4 is not UNSET: action['accessIPv4'] = access_ipv4 if access_ipv6 is not UNSET: action['accessIPv6'] = access_ipv6 if metadata is not UNSET: action['metadata'] = metadata if user_data is not UNSET: action['user_data'] = user_data if key_name is not UNSET: action['key_name'] = key_name if description is not UNSET: action['description'] = description if trusted_image_certificates is not UNSET: action['trusted_image_certificates'] = trusted_image_certificates if hostname is not UNSET: action['hostname'] = hostname body = {'rebuild': action} response = self._action(session, body) self._translate_response(response) return self def resize(self, session, flavor): """Resize server to flavor reference. :param session: The session to use for making this request. :param flavor: The server to resize to. :returns: None """ body = {'resize': {'flavorRef': flavor}} self._action(session, body) def confirm_resize(self, session): """Confirm the resize of the server. :param session: The session to use for making this request. :returns: None """ body = {'confirmResize': None} self._action(session, body) def revert_resize(self, session): """Revert the resize of the server. :param session: The session to use for making this request. :returns: None """ body = {'revertResize': None} self._action(session, body) def create_image(self, session, name, metadata=None): """Create image from server. :param session: The session to use for making this request. :param name: The name to use for the created image. :param metadata: Metadata to set on the created image. (Optional) :returns: None """ action = {'name': name} if metadata is not None: action['metadata'] = metadata body = {'createImage': action} # You won't believe it - wait, who am I kidding - of course you will! # Nova returns the URL of the image created in the Location # header of the response. (what?) But, even better, the URL it responds # with has a very good chance of being wrong (it is built from # nova.conf values that point to internal API servers in any cloud # large enough to have both public and internal endpoints. # However, nobody has ever noticed this because novaclient doesn't # actually use that URL - it extracts the id from the end of # the url, then returns the id. This leads us to question: # a) why Nova is going to return a value in a header # b) why it's going to return data that probably broken # c) indeed the very nature of the fabric of reality # Although it fills us with existential dread, we have no choice but # to follow suit like a lemming being forced over a cliff by evil # producers from Disney. microversion = None if utils.supports_microversion(session, '2.45'): microversion = '2.45' response = self._action(session, body, microversion) try: # There might be a body, there might not be response_body = response.json() except Exception: response_body = None if response_body and 'image_id' in response_body: image_id = response_body['image_id'] else: image_id = response.headers['Location'].rsplit('/', 1)[1] return image_id def add_security_group(self, session, security_group_name): """Add a security group to the server. :param session: The session to use for making this request. :param security_group_name: The security group to add to the server. :returns: None """ body = {"addSecurityGroup": {"name": security_group_name}} self._action(session, body) def remove_security_group(self, session, security_group_name): """Remove a security group from the server. :param session: The session to use for making this request. :param security_group_name: The security group to remove from the server. :returns: None """ body = {"removeSecurityGroup": {"name": security_group_name}} self._action(session, body) def reset_state(self, session, state): """Reset server state. This is admin-only by default. :param session: The session to use for making this request. :param state: The state to set on the server. :returns: None """ body = {"os-resetState": {"state": state}} self._action(session, body) def add_fixed_ip(self, session, network_id): """Add a fixed IP to the server. This is effectively an alias for adding a network. :param session: The session to use for making this request. :param network_id: The network to connect the server to. :returns: None """ body = {"addFixedIp": {"networkId": network_id}} self._action(session, body) def remove_fixed_ip(self, session, address): """Remove a fixed IP from the server. This is effectively an alias from removing a port from the server. :param session: The session to use for making this request. :param network_id: The address to remove from the server. :returns: None """ body = {"removeFixedIp": {"address": address}} self._action(session, body) def add_floating_ip(self, session, address, fixed_address=None): """Add a floating IP to the server. :param session: The session to use for making this request. :param address: The floating IP address to associate with the server. :param fixed_address: A fixed IP address with which to associated the floating IP. (Optional) :returns: None """ body = {"addFloatingIp": {"address": address}} if fixed_address is not None: body['addFloatingIp']['fixed_address'] = fixed_address self._action(session, body) def remove_floating_ip(self, session, address): """Remove a floating IP from the server. :param session: The session to use for making this request. :param address: The floating IP address to disassociate from the server. :returns: None """ body = {"removeFloatingIp": {"address": address}} self._action(session, body) def backup(self, session, name, backup_type, rotation): """Create a backup of the server. :param session: The session to use for making this request. :param name: The name to use for the backup image. :param backup_type: The type of backup. The value and meaning of this atribute is user-defined and can be used to separate backups of different types. For example, this could be used to distinguish between ``daily`` and ``weekly`` backups. :param rotation: The number of backups to retain. All images older than the rotation'th image will be deleted. :returns: None """ body = { "createBackup": { "name": name, "backup_type": backup_type, "rotation": rotation, } } self._action(session, body) def pause(self, session): """Pause the server. :param session: The session to use for making this request. :returns: None """ body = {"pause": None} self._action(session, body) def unpause(self, session): """Unpause the server. :param session: The session to use for making this request. :returns: None """ body = {"unpause": None} self._action(session, body) def suspend(self, session): """Suspend the server. :param session: The session to use for making this request. :returns: None """ body = {"suspend": None} self._action(session, body) def resume(self, session): """Resume the server. :param session: The session to use for making this request. :returns: None """ body = {"resume": None} self._action(session, body) def lock(self, session, locked_reason=None): """Lock the server. :param session: The session to use for making this request. :param locked_reason: The reason for locking the server. :returns: None """ body: ty.Dict[str, ty.Any] = {"lock": None} if locked_reason is not None: body["lock"] = { "locked_reason": locked_reason, } self._action(session, body) def unlock(self, session): """Unlock the server. :param session: The session to use for making this request. :returns: None """ body = {"unlock": None} self._action(session, body) def rescue(self, session, admin_pass=None, image_ref=None): """Rescue the server. This is admin-only by default. :param session: The session to use for making this request. :param admin_pass: A new admin password to set on the rescued server. (Optional) :param image_ref: The image to use when rescuing the server. If not provided, the server will use the existing image. (Optional) :returns: None """ body: ty.Dict[str, ty.Any] = {"rescue": {}} if admin_pass is not None: body["rescue"]["adminPass"] = admin_pass if image_ref is not None: body["rescue"]["rescue_image_ref"] = image_ref self._action(session, body) def unrescue(self, session): """Unrescue the server. This is admin-only by default. :param session: The session to use for making this request. :returns: None """ body = {"unrescue": None} self._action(session, body) def evacuate( self, session, host=None, admin_pass=None, force=None, on_shared_storage=None, ): """Evacuate the server. :param session: The session to use for making this request. :param host: The host to evacuate the instance to. (Optional) :param admin_pass: The admin password to set on the evacuated instance. (Optional) :param force: Whether to force evacuation. :param on_shared_storage: Whether the host is using shared storage. (Optional) (Only supported before microversion 2.14) :returns: None """ body: ty.Dict[str, ty.Any] = {"evacuate": {}} if host is not None: body["evacuate"]["host"] = host if admin_pass is not None: body["evacuate"]["adminPass"] = admin_pass if force is not None: body["evacuate"]["force"] = force if on_shared_storage is not None: body["evacuate"]["onSharedStorage"] = on_shared_storage self._action(session, body) def start(self, session): """Start the server. :param session: The session to use for making this request. :returns: None """ body = {"os-start": None} self._action(session, body) def stop(self, session): """Stop the server. :param session: The session to use for making this request. :returns: None """ body = {"os-stop": None} self._action(session, body) def restore(self, session): """Restore the server. This is only supported if the server is soft-deleted. This is cloud-specific. :param session: The session to use for making this request. :returns: None """ body = {"restore": None} self._action(session, body) def shelve(self, session): """Shelve the server. :param session: The session to use for making this request. :returns: None """ body = {"shelve": None} self._action(session, body) def shelve_offload(self, session): """Shelve-offload the server. :param session: The session to use for making this request. :returns: None """ body = {"shelveOffload": None} self._action(session, body) def unshelve(self, session, availability_zone=_sentinel, host=None): """Unshelve the server. :param session: The session to use for making this request. :param availability_zone: If specified the instance will be unshelved to the availability_zone. If None is passed the instance defined availability_zone is unpin and the instance will be scheduled to any availability_zone (free scheduling). If not specified the instance will be unshelved to either its defined availability_zone or any availability_zone (free scheduling). :param host: If specified the host to unshelve the instance. """ data = {} if host: data["host"] = host if availability_zone is None or isinstance(availability_zone, str): data["availability_zone"] = availability_zone body = {'unshelve': data or None} self._action(session, body) def migrate(self, session): """Migrate the server. :param session: The session to use for making this request. :returns: None """ body = {"migrate": None} self._action(session, body) def trigger_crash_dump(self, session): """Trigger a crash dump for the server. :param session: The session to use for making this request. :returns: None """ body = {"trigger_crash_dump": None} self._action(session, body) def get_console_output(self, session, length=None): """Get console output for the server. :param session: The session to use for making this request. :param length: The max length of the console output to return. (Optional) :returns: None """ body: ty.Dict[str, ty.Any] = {"os-getConsoleOutput": {}} if length is not None: body["os-getConsoleOutput"]["length"] = length resp = self._action(session, body) return resp.json() def get_console_url(self, session, console_type): """Get the console URL for the server. :param session: The session to use for making this request. :param console_type: The type of console to return. This is cloud-specific. One of: ``novnc``, ``xvpvnc``, ``spice-html5``, ``rdp-html5``, ``serial``. :returns: None """ action = CONSOLE_TYPE_ACTION_MAPPING.get(console_type) if not action: raise ValueError("Unsupported console type %s" % console_type) body = {action: {'type': console_type}} resp = self._action(session, body) return resp.json().get('console') def live_migrate( self, session, host, force, block_migration, disk_over_commit=False, ): """Live migrate the server. :param session: The session to use for making this request. :param host: The host to live migrate the server to. (Optional) :param force: Whether to force the migration. (Optional) :param block_migration: Whether to do block migration. One of: ``True``, ``False``, ``'auto'``. (Optional) :param disk_over_commit: Whether to allow disk over-commit on the destination host. (Optional) :returns: None """ if utils.supports_microversion(session, '2.30'): return self._live_migrate_30( session, host, force=force, block_migration=block_migration, ) elif utils.supports_microversion(session, '2.25'): return self._live_migrate_25( session, host, force=force, block_migration=block_migration, ) else: return self._live_migrate( session, host, force=force, block_migration=block_migration, disk_over_commit=disk_over_commit, ) def _live_migrate_30(self, session, host, force, block_migration): microversion = '2.30' body = {'host': None} if block_migration is None: block_migration = 'auto' body['block_migration'] = block_migration if host: body['host'] = host if force: body['force'] = force self._action( session, {'os-migrateLive': body}, microversion=microversion, ) def _live_migrate_25(self, session, host, force, block_migration): microversion = '2.25' body = {'host': None} if block_migration is None: block_migration = 'auto' body['block_migration'] = block_migration if host: body['host'] = host if not force: raise ValueError( "Live migration on this cloud implies 'force'" " if the 'host' option has been given and it is not" " possible to disable. It is recommended to not use 'host'" " at all on this cloud as it is inherently unsafe, but if" " it is unavoidable, please supply 'force=True' so that it" " is clear you understand the risks." ) self._action( session, {'os-migrateLive': body}, microversion=microversion, ) def _live_migrate( self, session, host, force, block_migration, disk_over_commit, ): microversion = None body: ty.Dict[str, ty.Any] = { 'host': None, } if block_migration == 'auto': raise ValueError( "Live migration on this cloud does not support 'auto' as" " a parameter to block_migration, but only True and False." ) body['block_migration'] = block_migration or False body['disk_over_commit'] = disk_over_commit or False if host: body['host'] = host if not force: raise ValueError( "Live migration on this cloud implies 'force'" " if the 'host' option has been given and it is not" " possible to disable. It is recommended to not use 'host'" " at all on this cloud as it is inherently unsafe, but if" " it is unavoidable, please supply 'force=True' so that it" " is clear you understand the risks." ) self._action( session, {'os-migrateLive': body}, microversion=microversion, ) def fetch_topology(self, session): """Fetch the topology information for the server. :param session: The session to use for making this request. :returns: None """ utils.require_microversion(session, 2.78) url = utils.urljoin(Server.base_path, self.id, 'topology') response = session.get(url) exceptions.raise_from_response(response) try: return response.json() except ValueError: pass def fetch_security_groups(self, session): """Fetch security groups of the server. :param session: The session to use for making this request. :returns: Updated Server instance. """ url = utils.urljoin(Server.base_path, self.id, 'os-security-groups') response = session.get(url) exceptions.raise_from_response(response) try: data = response.json() if 'security_groups' in data: self.security_groups = data['security_groups'] except ValueError: pass return self ServerDetail = Server ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/server_action.py0000664000175000017500000000664500000000000023451 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ServerActionEvent(resource.Resource): # Added the 'details' field in 2.84 _max_microversion = '2.84' #: The name of the event event = resource.Body('event') #: The date and time when the event was started. The date and time stamp #: format is ISO 8601 start_time = resource.Body('start_time') #: The date and time when the event finished. The date and time stamp #: format is ISO 8601 finish_time = resource.Body('finish_time') #: The result of the event result = resource.Body('result') #: The traceback stack if an error occurred in this event. #: This is only visible to cloud admins by default. traceback = resource.Body('traceback') #: The name of the host on which the event occurred. #: This is only visible to cloud admins by default. host = resource.Body('host') #: An obfuscated hashed host ID string, or the empty string if there is no #: host for the event. This is a hashed value so will not actually look #: like a hostname, and is hashed with data from the project_id, so the #: same physical host as seen by two different project_ids will be #: different. This is useful when within the same project you need to #: determine if two events occurred on the same or different physical #: hosts. host_id = resource.Body('hostId') #: Details of the event. May be unset. details = resource.Body('details') class ServerAction(resource.Resource): resource_key = 'instanceAction' resources_key = 'instanceActions' base_path = '/servers/%(server_id)s/os-instance-actions' # capabilities allow_fetch = True allow_list = True # Properties #: The ID of the server that this action relates to. server_id = resource.URI('server_id') #: The name of the action. action = resource.Body('action') # FIXME(stephenfin): This conflicts since there is a server ID in the URI # *and* in the body. We need a field that handles both or we need to use # different names. # #: The ID of the server that this action relates to. # server_id = resource.Body('instance_uuid') #: The ID of the request that this action related to. request_id = resource.Body('request_id', alternate_id=True) #: The ID of the user which initiated the server action. user_id = resource.Body('user_id') #: The ID of the project that this server belongs to. project_id = resource.Body('project_id') start_time = resource.Body('start_time') #: The related error message for when an action fails. message = resource.Body('message') #: Events events = resource.Body('events', type=list, list_type=ServerActionEvent) # events.details field added in 2.84 _max_microversion = '2.84' _query_mapping = resource.QueryParameters( changes_since="changes-since", changes_before="changes-before", ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/server_diagnostics.py0000664000175000017500000000411500000000000024471 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ServerDiagnostics(resource.Resource): resource_key = 'diagnostics' base_path = '/servers/%(server_id)s/diagnostics' # capabilities allow_fetch = True requires_id = False _max_microversion = '2.48' #: Indicates whether or not a config drive was used for this server. has_config_drive = resource.Body('config_drive') #: The current state of the VM. state = resource.Body('state') #: The driver on which the VM is running. driver = resource.Body('driver') #: The hypervisor on which the VM is running. hypervisor = resource.Body('hypervisor') #: The hypervisor OS. hypervisor_os = resource.Body('hypervisor_os') #: The amount of time in seconds that the VM has been running. uptime = resource.Body('uptime') #: The number of vCPUs. num_cpus = resource.Body('num_cpus') #: The number of disks. num_disks = resource.Body('num_disks') #: The number of vNICs. num_nics = resource.Body('num_nics') #: The dictionary with information about VM memory usage. memory_details = resource.Body('memory_details') #: The list of dictionaries with detailed information about VM CPUs. cpu_details = resource.Body('cpu_details') #: The list of dictionaries with detailed information about VM disks. disk_details = resource.Body('disk_details') #: The list of dictionaries with detailed information about VM NICs. nic_details = resource.Body('nic_details') #: The ID for the server. server_id = resource.URI('server_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/server_group.py0000664000175000017500000001301000000000000023310 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class ServerGroup(resource.Resource): resource_key = 'server_group' resources_key = 'server_groups' base_path = '/os-server-groups' _query_mapping = resource.QueryParameters("all_projects") _max_microversion = '2.64' # capabilities allow_create = True allow_fetch = True allow_delete = True allow_list = True # Properties #: A name identifying the server group name = resource.Body('name') #: The list of policies supported by the server group (till 2.63) policies = resource.Body('policies') #: The policy field represents the name of the policy (from 2.64) policy = resource.Body('policy') #: The list of members in the server group member_ids = resource.Body('members') #: The metadata associated with the server group. This is always empty and #: only used for preserving compatibility. metadata = resource.Body('metadata') #: The project ID who owns the server group. project_id = resource.Body('project_id') #: The rules field, which is a dict, can be applied to the policy. #: Currently, only the max_server_per_host rule is supported for the #: anti-affinity policy. The max_server_per_host rule allows specifying how #: many members of the anti-affinity group can reside on the same compute #: host. If not specified, only one member from the same anti-affinity #: group can reside on a given host. rules = resource.Body('rules', type=dict) #: The user ID who owns the server group user_id = resource.Body('user_id') # TODO(stephenfin): It would be nice to have a hookpoint to do this # microversion-based request manipulation, but we don't have anything like # that right now def create(self, session, prepend_key=True, base_path=None, **params): """Create a remote resource based on this instance. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param prepend_key: A boolean indicating whether the resource_key should be prepended in a resource creation request. Default to True. :param str base_path: Base part of the URI for creating resources, if different from :data:`~openstack.resource.Resource.base_path`. :param dict params: Additional params to pass. :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_create` is not set to ``True``. """ if not self.allow_create: raise exceptions.MethodNotSupported(self, 'create') session = self._get_session(session) microversion = self._get_microversion(session, action='create') requires_id = ( self.create_requires_id if self.create_requires_id is not None else self.create_method == 'PUT' ) if self.create_exclude_id_from_body: self._body._dirty.discard("id") # `policy` and `rules` are added with mv=2.64. In it also # `policies` are removed. if utils.supports_microversion(session, '2.64'): if self.policies: if not self.policy and isinstance(self.policies, list): self.policy = self.policies[0] self._body.clean(only={'policies'}) microversion = self._max_microversion else: # microversion < 2.64 if self.rules: msg = ( "API version 2.64 is required to set rules, but " "it is not available." ) raise exceptions.NotSupported(msg) if self.policy: if not self.policies: self.policies = [self.policy] self._body.clean(only={'policy'}) if self.create_method == 'POST': request = self._prepare_request( requires_id=requires_id, prepend_key=prepend_key, base_path=base_path, ) response = session.post( request.url, json=request.body, headers=request.headers, microversion=microversion, params=params, ) else: raise exceptions.ResourceFailure( "Invalid create method: %s" % self.create_method ) has_body = ( self.has_body if self.create_returns_body is None else self.create_returns_body ) self.microversion = microversion self._translate_response(response, has_body=has_body) # direct comparision to False since we need to rule out None if self.has_body and self.create_returns_body is False: # fetch the body if it's required but not returned by create return self.fetch(session) return self ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/server_interface.py0000664000175000017500000000274300000000000024127 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ServerInterface(resource.Resource): resource_key = 'interfaceAttachment' resources_key = 'interfaceAttachments' base_path = '/servers/%(server_id)s/os-interface' # capabilities allow_create = True allow_fetch = True allow_commit = False allow_delete = True allow_list = True #: Fixed IP addresses with subnet IDs. fixed_ips = resource.Body('fixed_ips') #: The MAC address. mac_addr = resource.Body('mac_addr') #: The network ID. net_id = resource.Body('net_id') #: The ID of the port for which you want to create an interface. port_id = resource.Body('port_id', alternate_id=True) #: The port state. port_state = resource.Body('port_state') #: The ID for the server. server_id = resource.URI('server_id') #: Tags for the virtual interfaces. tag = resource.Body('tag') # tag introduced in 2.70 _max_microversion = '2.70' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/server_ip.py0000664000175000017500000000366500000000000022603 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource from openstack import utils class ServerIP(resource.Resource): resources_key = 'addresses' base_path = '/servers/%(server_id)s/ips' # capabilities allow_list = True # Properties #: The IP address. The format of the address depends on :attr:`version` address = resource.Body('addr') #: The network label, such as public or private. network_label = resource.URI('network_label') #: The ID for the server. server_id = resource.URI('server_id') # Version of the IP protocol. Currently either 4 or 6. version = resource.Body('version') @classmethod def list( cls, session, paginated=False, server_id=None, network_label=None, base_path=None, **params ): if base_path is None: base_path = cls.base_path url = base_path % {"server_id": server_id} if network_label is not None: url = utils.urljoin(url, network_label) resp = session.get(url) resp = resp.json() if network_label is None: resp = resp[cls.resources_key] for label, addresses in resp.items(): for address in addresses: yield cls.existing( network_label=label, address=address["addr"], version=address["version"], ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/server_migration.py0000664000175000017500000000731000000000000024153 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class ServerMigration(resource.Resource): resource_key = 'migration' resources_key = 'migrations' base_path = '/servers/%(server_id)s/migrations' # capabilities allow_fetch = True allow_list = True allow_delete = True #: The ID for the server from the URI of the resource server_id = resource.URI('server_id') #: The date and time when the resource was created. created_at = resource.Body('created_at') #: The target host of the migration. dest_host = resource.Body('dest_host') #: The target compute of the migration. dest_compute = resource.Body('dest_compute') #: The target node of the migration. dest_node = resource.Body('dest_node') #: The amount of disk, in bytes, that has been processed during the #: migration. disk_processed_bytes = resource.Body('disk_processed_bytes') #: The amount of disk, in bytes, that still needs to be migrated. disk_remaining_bytes = resource.Body('disk_remaining_bytes') #: The total amount of disk, in bytes, that needs to be migrated. disk_total_bytes = resource.Body('disk_total_bytes') #: The amount of memory, in bytes, that has been processed during the #: migration. memory_processed_bytes = resource.Body('memory_processed_bytes') #: The amount of memory, in bytes, that still needs to be migrated. memory_remaining_bytes = resource.Body('memory_remaining_bytes') #: The total amount of memory, in bytes, that needs to be migrated. memory_total_bytes = resource.Body('memory_total_bytes') #: The ID of the project that initiated the server migration (since #: microversion 2.80) project_id = resource.Body('project_id') #: The UUID of the server from the response body server_uuid = resource.Body('server_uuid') #: The source compute of the migration. source_compute = resource.Body('source_compute') #: The source node of the migration. source_node = resource.Body('source_node') #: The current status of the migration. status = resource.Body('status') #: The date and time when the resource was last updated. updated_at = resource.Body('updated_at') #: The ID of the user that initiated the server migration (since #: microversion 2.80) user_id = resource.Body('user_id') #: The UUID of the migration (since microversion 2.59) uuid = resource.Body('uuid', alternate_id=True) _max_microversion = '2.80' def _action(self, session, body): """Preform server migration actions given the message body.""" session = self._get_session(session) microversion = self._get_microversion(session, action='list') url = utils.urljoin( self.base_path % {'server_id': self.server_id}, self.id, 'action', ) response = session.post(url, microversion=microversion, json=body) exceptions.raise_from_response(response) return response def force_complete(self, session): """Force on-going live migration to complete.""" body = {'force_complete': None} self._action(session, body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/server_remote_console.py0000664000175000017500000000360000000000000025175 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource from openstack import utils CONSOLE_TYPE_PROTOCOL_MAPPING = { 'novnc': 'vnc', 'xvpvnc': 'vnc', 'spice-html5': 'spice', 'rdp-html5': 'rdp', 'serial': 'serial', 'webmks': 'mks', } class ServerRemoteConsole(resource.Resource): resource_key = 'remote_console' base_path = '/servers/%(server_id)s/remote-consoles' # capabilities allow_create = True allow_fetch = False allow_commit = False allow_delete = False allow_list = False _max_microversion = '2.8' #: Protocol of the remote console. protocol = resource.Body('protocol') #: Type of the remote console. type = resource.Body('type') #: URL used to connect to the console. url = resource.Body('url') #: The ID for the server. server_id = resource.URI('server_id') def create(self, session, prepend_key=True, base_path=None, **params): if not self.protocol: self.protocol = CONSOLE_TYPE_PROTOCOL_MAPPING.get(self.type) if ( not utils.supports_microversion(session, '2.8') and self.type == 'webmks' ): raise ValueError( 'Console type webmks is not supported on server side' ) return super().create( session, prepend_key=prepend_key, base_path=base_path, **params ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/service.py0000664000175000017500000001166300000000000022242 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class Service(resource.Resource): resource_key = 'service' resources_key = 'services' base_path = '/os-services' # capabilities allow_list = True allow_commit = True allow_delete = True _query_mapping = resource.QueryParameters( 'name', 'binary', 'host', name='binary', ) # Properties #: The availability zone of service availability_zone = resource.Body("zone") #: Binary name of service binary = resource.Body('binary') #: Disabled reason of service disabled_reason = resource.Body('disabled_reason') #: Whether or not this service was forced down manually by an administrator #: after the service was fenced is_forced_down = resource.Body('forced_down', type=bool) #: The name of the host where service runs host = resource.Body('host') #: Service name name = resource.Body('name', alias='binary') #: State of service state = resource.Body('state') #: Status of service status = resource.Body('status') #: The date and time when the resource was updated updated_at = resource.Body('updated_at') _max_microversion = '2.69' @classmethod def find(cls, session, name_or_id, ignore_missing=True, **params): # No direct request possible, thus go directly to list data = cls.list(session, **params) result = None for maybe_result in data: # Since ID might be both int and str force cast id_value = str(cls._get_id(maybe_result)) name_value = maybe_result.name if str(name_or_id) in (id_value, name_value): if 'host' in params and maybe_result['host'] != params['host']: continue # Only allow one resource to be found. If we already # found a match, raise an exception to show it. if result is None: result = maybe_result else: msg = "More than one %s exists with the name '%s'." msg = msg % (cls.__name__, name_or_id) raise exceptions.DuplicateResource(msg) if result is not None: return result if ignore_missing: return None raise exceptions.NotFoundException( f"No {cls.__name__} found for {name_or_id}" ) def commit(self, session, prepend_key=False, **kwargs): # we need to set prepend_key to false return super().commit( session, prepend_key=prepend_key, **kwargs, ) def _action(self, session, action, body, microversion=None): if not microversion: microversion = session.default_microversion url = utils.urljoin(Service.base_path, action) response = session.put(url, json=body, microversion=microversion) self._translate_response(response) return self def set_forced_down(self, session, host=None, binary=None, forced=False): """Update forced_down information of a service.""" microversion = session.default_microversion body = {} if not host: host = self.host if not binary: binary = self.binary body = { 'host': host, 'binary': binary, } if utils.supports_microversion(session, '2.11'): body['forced_down'] = forced # Using forced_down works only 2.11-2.52, therefore pin it microversion = '2.11' # This will not work with newest microversions return self._action( session, 'force-down', body, microversion=microversion, ) force_down = set_forced_down def enable(self, session, host, binary): """Enable service.""" body = { 'host': host, 'binary': binary, } return self._action(session, 'enable', body) def disable(self, session, host, binary, reason=None): """Disable service.""" body = { 'host': host, 'binary': binary, } if not reason: action = 'disable' else: body['disabled_reason'] = reason action = 'disable-log-reason' return self._action(session, action, body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/usage.py0000664000175000017500000000714000000000000021701 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ServerUsage(resource.Resource): resource_key = None resources_key = None # Capabilities allow_create = False allow_fetch = False allow_delete = False allow_list = False allow_commit = False # Properties #: The duration that the server exists (in hours). hours = resource.Body('hours') #: The display name of a flavor. flavor = resource.Body('flavor') #: The UUID of the server. instance_id = resource.Body('instance_id') #: The server name. name = resource.Body('name') #: The UUID of the project in a multi-tenancy cloud. project_id = resource.Body('tenant_id') #: The memory size of the server (in MiB). memory_mb = resource.Body('memory_mb') #: The sum of the root disk size of the server and the ephemeral disk size #: of it (in GiB). local_gb = resource.Body('local_gb') #: The number of virtual CPUs that the server uses. vcpus = resource.Body('vcpus') #: The date and time when the server was launched. started_at = resource.Body('started_at') #: The date and time when the server was deleted. ended_at = resource.Body('ended_at') #: The VM state. state = resource.Body('state') #: The uptime of the server. uptime = resource.Body('uptime') class Usage(resource.Resource): resource_key = 'tenant_usage' resources_key = 'tenant_usages' base_path = '/os-simple-tenant-usage' # Capabilities allow_create = False allow_fetch = True allow_delete = False allow_list = True allow_commit = False # TODO(stephenfin): Add 'start', 'end'. These conflict with the body # responses though. _query_mapping = resource.QueryParameters( "detailed", "limit", "marker", "start", "end", ) # Properties #: The UUID of the project in a multi-tenancy cloud. project_id = resource.Body('tenant_id') #: A list of the server usage objects. server_usages = resource.Body( 'server_usages', type=list, list_type=ServerUsage, ) #: Multiplying the server disk size (in GiB) by hours the server exists, #: and then adding that all together for each server. total_local_gb_usage = resource.Body('total_local_gb_usage') #: Multiplying the number of virtual CPUs of the server by hours the server #: exists, and then adding that all together for each server. total_vcpus_usage = resource.Body('total_vcpus_usage') #: Multiplying the server memory size (in MiB) by hours the server exists, #: and then adding that all together for each server. total_memory_mb_usage = resource.Body('total_memory_mb_usage') #: The total duration that servers exist (in hours). total_hours = resource.Body('total_hours') #: The beginning time to calculate usage statistics on compute and storage #: resources. start = resource.Body('start') #: The ending time to calculate usage statistics on compute and storage #: resources. stop = resource.Body('stop') _max_microversion = '2.75' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/v2/volume_attachment.py0000664000175000017500000000403000000000000024307 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class VolumeAttachment(resource.Resource): resource_key = 'volumeAttachment' resources_key = 'volumeAttachments' base_path = '/servers/%(server_id)s/os-volume_attachments' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters("limit", "offset") #: The ID for the server. server_id = resource.URI('server_id') #: Name of the device such as, /dev/vdb. device = resource.Body('device') #: The ID of the attachment. id = resource.Body('id') # FIXME(stephenfin): This conflicts since there is a server ID in the URI # *and* in the body. We need a field that handles both or we need to use # different names. # #: The UUID of the server # server_id = resource.Body('server_uuid') #: The ID of the attached volume. volume_id = resource.Body('volumeId', alternate_id=True) #: The UUID of the associated volume attachment in Cinder. attachment_id = resource.Body('attachment_id') #: The ID of the block device mapping record for the attachment bdm_id = resource.Body('bdm_uuid') #: Virtual device tags for the attachment. tag = resource.Body('tag') #: Indicates whether to delete the volume when server is destroyed delete_on_termination = resource.Body('delete_on_termination') # attachment_id (in responses) and bdm_id introduced in 2.89 _max_microversion = '2.89' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/compute/version.py0000664000175000017500000000154000000000000021731 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Version(resource.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' # capabilities allow_list = True # Properties links = resource.Body('links') status = resource.Body('status') updated = resource.Body('updated') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2493153 openstacksdk-4.0.0/openstack/config/0000775000175000017500000000000000000000000017463 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/__init__.py0000664000175000017500000000240000000000000021570 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from openstack.config.loader import OpenStackConfig # noqa def get_cloud_region( service_key=None, options=None, app_name=None, app_version=None, load_yaml_config=True, load_envvars=True, **kwargs ): config = OpenStackConfig( load_yaml_config=load_yaml_config, load_envvars=load_envvars, app_name=app_name, app_version=app_version, ) if options: config.register_argparse_arguments(options, sys.argv, service_key) parsed_options = options.parse_known_args(sys.argv) else: parsed_options = None return config.get_one(options=parsed_options, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/_util.py0000664000175000017500000000362300000000000021155 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def normalize_keys(config): new_config = {} for key, value in config.items(): key = key.replace('-', '_') if isinstance(value, dict): new_config[key] = normalize_keys(value) elif isinstance(value, bool): new_config[key] = value elif isinstance(value, int) and key not in ( 'verbose_level', 'api_timeout', ): new_config[key] = str(value) elif isinstance(value, float): new_config[key] = str(value) else: new_config[key] = value return new_config def merge_clouds(old_dict, new_dict): """Like dict.update, except handling nested dicts.""" ret = old_dict.copy() for k, v in new_dict.items(): if isinstance(v, dict): if k in ret: ret[k] = merge_clouds(ret[k], v) else: ret[k] = v.copy() else: ret[k] = v return ret class VersionRequest: def __init__( self, version=None, min_api_version=None, max_api_version=None, default_microversion=None, ): self.version = version self.min_api_version = min_api_version self.max_api_version = max_api_version self.default_microversion = default_microversion ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/cloud_config.py0000664000175000017500000000155400000000000022475 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(mordred) This is only here to ease the OSC transition from openstack.config import cloud_region class CloudConfig(cloud_region.CloudRegion): def __init__(self, name, region, config, **kwargs): super().__init__(name, region, config, **kwargs) self.region = region ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/cloud_region.py0000664000175000017500000013622100000000000022513 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os.path import typing as ty from urllib import parse import warnings try: import keyring except ImportError: keyring = None from keystoneauth1 import discover import keystoneauth1.exceptions.catalog from keystoneauth1.loading import adapter as ks_load_adap from keystoneauth1 import session as ks_session import os_service_types import requestsexceptions try: import statsd except ImportError: statsd = None try: import prometheus_client except ImportError: prometheus_client = None try: import influxdb except ImportError: influxdb = None from openstack import _log from openstack.config import _util from openstack.config import defaults as config_defaults from openstack import exceptions from openstack import proxy from openstack import version as openstack_version from openstack import warnings as os_warnings _logger = _log.setup_logging('openstack') SCOPE_KEYS = { 'domain_id', 'domain_name', 'project_id', 'project_name', 'system_scope', } # Sentinel for nonexistence _ENOENT = object() def _make_key(key, service_type): if not service_type: return key else: service_type = service_type.lower().replace('-', '_') return "_".join([service_type, key]) def _disable_service(config, service_type, reason=None): service_type = service_type.lower().replace('-', '_') key = f'has_{service_type}' config[key] = False if reason: d_key = _make_key('disabled_reason', service_type) config[d_key] = reason def _get_implied_microversion(version): if not version: return if '.' in version: # Some services historically had a .0 in their normal api version. # Neutron springs to mind with version "2.0". If a user has "2.0" # set in a variable or config file just because history, we don't # need to send any microversion headers. if version.split('.')[1] != "0": return version def from_session( session, name=None, region_name=None, force_ipv4=False, app_name=None, app_version=None, **kwargs, ): """Construct a CloudRegion from an existing `keystoneauth1.session.Session` When a Session already exists, we don't actually even need to go through the OpenStackConfig.get_one_cloud dance. We have a Session with Auth info. The only parameters that are really needed are adapter/catalog related. :param keystoneauth1.session.session session: An existing authenticated Session to use. :param str name: A name to use for this cloud region in logging. If left empty, the hostname of the auth_url found in the Session will be used. :param str region_name: The region name to connect to. :param bool force_ipv4: Whether or not to disable IPv6 support. Defaults to False. :param str app_name: Name of the application to be added to User Agent. :param str app_version: Version of the application to be added to User Agent. :param kwargs: Config settings for this cloud region. """ config_dict = config_defaults.get_defaults() config_dict.update(**kwargs) return CloudRegion( name=name, session=session, config=config_dict, region_name=region_name, force_ipv4=force_ipv4, app_name=app_name, app_version=app_version, ) def from_conf(conf, session=None, service_types=None, **kwargs): """Create a CloudRegion from oslo.config ConfigOpts. :param oslo_config.cfg.ConfigOpts conf: An oslo.config ConfigOpts containing keystoneauth1.Adapter options in sections named according to project (e.g. [nova], not [compute]). TODO: Current behavior is to use defaults if no such section exists, which may not be what we want long term. :param keystoneauth1.session.Session session: An existing authenticated Session to use. This is currently required. TODO: Load this (and auth) from the conf. :param service_types: A list/set of service types for which to look for and process config opts. If None, all known service types are processed. Note that we will not error if a supplied service type can not be processed successfully (unless you try to use the proxy, of course). This tolerates uses where the consuming code has paths for a given service, but those paths are not exercised for given end user setups, and we do not want to generate errors for e.g. missing/invalid conf sections in those cases. We also don't check to make sure your service types are spelled correctly - caveat implementor. :param kwargs: Additional keyword arguments to be passed directly to the CloudRegion constructor. :raise openstack.exceptions.ConfigException: If session is not specified. :return: An openstack.config.cloud_region.CloudRegion. """ if not session: # TODO(mordred) Fill this in - not needed for first stab with nova raise exceptions.ConfigException("A Session must be supplied.") config_dict = kwargs.pop('config', config_defaults.get_defaults()) stm = os_service_types.ServiceTypes() for st in stm.all_types_by_service_type: if service_types is not None and st not in service_types: _disable_service( config_dict, st, reason="Not in the list of requested service_types.", ) continue project_name = stm.get_project_name(st) if project_name not in conf: if '-' in project_name: project_name = project_name.replace('-', '_') if project_name not in conf: _disable_service( config_dict, st, reason="No section for project '{project}' (service type " "'{service_type}') was present in the config.".format( project=project_name, service_type=st ), ) continue opt_dict: ty.Dict[str, str] = {} # Populate opt_dict with (appropriately processed) Adapter conf opts try: ks_load_adap.process_conf_options(conf[project_name], opt_dict) except Exception as e: # NOTE(efried): This is for (at least) a couple of scenarios: # (1) oslo_config.cfg.NoSuchOptError when ksa adapter opts are not # registered in this section. # (2) TypeError, when opts are registered but bogus (e.g. # 'interface' and 'valid_interfaces' are both present). # We may want to consider (providing a kwarg giving the caller the # option of) blowing up right away for (2) rather than letting them # get all the way to the point of trying the service and having # *that* blow up. reason = ( "Encountered an exception attempting to process config " "for project '{project}' (service type " "'{service_type}'): {exception}".format( project=project_name, service_type=st, exception=e ) ) _logger.warning( "Disabling service '{service_type}': " "{reason}".format(service_type=st, reason=reason) ) _disable_service(config_dict, st, reason=reason) continue # Load them into config_dict under keys prefixed by ${service_type}_ for raw_name, opt_val in opt_dict.items(): config_name = _make_key(raw_name, st) config_dict[config_name] = opt_val return CloudRegion(session=session, config=config_dict, **kwargs) class CloudRegion: # TODO(efried): Doc the rest of the kwargs """The configuration for a Region of an OpenStack Cloud. A CloudRegion encapsulates the config information needed for connections to all of the services in a Region of a Cloud. :param str region_name: The default region name for all services in this CloudRegion. If both ``region_name`` and ``config['region_name']`` are specified, the kwarg takes precedence. May be overridden for a given ${service} via a ${service}_region_name key in the ``config`` dict. :param dict config: A dict of configuration values for the CloudRegion and its services. The key for a ${config_option} for a specific ${service} should be ${service}_${config_option}. For example, to configure the endpoint_override for the block_storage service, the ``config`` dict should contain:: 'block_storage_endpoint_override': 'http://...' To provide a default to be used if no service-specific override is present, just use the unprefixed ${config_option} as the service key, e.g.:: 'interface': 'public' """ def __init__( self, name=None, region_name=None, config=None, force_ipv4=False, auth_plugin=None, openstack_config=None, session_constructor=None, app_name=None, app_version=None, session=None, discovery_cache=None, extra_config=None, cache_expiration_time=0, cache_expirations=None, cache_path=None, cache_class='dogpile.cache.null', cache_arguments=None, password_callback=None, statsd_host=None, statsd_port=None, statsd_prefix=None, influxdb_config=None, collector_registry=None, cache_auth=False, ): self._name = name self.config = _util.normalize_keys(config) # NOTE(efried): For backward compatibility: a) continue to accept the # region_name kwarg; b) make it take precedence over (non-service_type- # specific) region_name set in the config dict. if region_name is not None: self.config['region_name'] = region_name self._extra_config = extra_config or {} self.log = _log.setup_logging('openstack.config') self._force_ipv4 = force_ipv4 self._auth = auth_plugin self._cache_auth = cache_auth self.load_auth_from_cache() self._openstack_config = openstack_config self._keystone_session = session self._session_constructor = session_constructor or ks_session.Session self._app_name = app_name self._app_version = app_version self._discovery_cache = discovery_cache or None self._cache_expiration_time = cache_expiration_time self._cache_expirations = cache_expirations or {} self._cache_path = cache_path self._cache_class = cache_class self._cache_arguments = cache_arguments self._password_callback = password_callback self._statsd_host = statsd_host self._statsd_port = statsd_port self._statsd_prefix = statsd_prefix self._statsd_client = None self._influxdb_config = influxdb_config self._influxdb_client = None self._collector_registry = collector_registry self._service_type_manager = os_service_types.ServiceTypes() def __getattr__(self, key): """Return arbitrary attributes.""" if key.startswith('os_'): key = key[3:] if key in [attr.replace('-', '_') for attr in self.config]: return self.config[key] else: return None def __iter__(self): return self.config.__iter__() def __eq__(self, other): return self.name == other.name and self.config == other.config def __ne__(self, other): return not self == other @property def name(self): if self._name is None: try: self._name = parse.urlparse( self.get_session().auth.auth_url ).hostname except Exception: self._name = self._app_name or '' return self._name @property def full_name(self): """Return a string that can be used as an identifier. Always returns a valid string. It will have name and region_name or just one of the two if only one is set, or else 'unknown'. """ region_name = self.get_region_name() if self.name and region_name: return ":".join([self.name, region_name]) elif self.name and not region_name: return self.name elif not self.name and region_name: return region_name else: return 'unknown' def set_service_value(self, key, service_type, value): key = _make_key(key, service_type) self.config[key] = value def set_session_constructor(self, session_constructor): """Sets the Session constructor.""" self._session_constructor = session_constructor def get_requests_verify_args(self): """Return the verify and cert values for the requests library.""" insecure = self.config.get('insecure', False) verify = self.config.get('verify', True) cacert = self.config.get('cacert') # Insecure is the most aggressive setting, so it wins if insecure: verify = False if verify and cacert: verify = os.path.expanduser(cacert) else: if cacert: warnings.warn( f"You are specifying a cacert for the cloud " f"{self.full_name} but also to ignore the host " f"verification. The host SSL cert will not be verified.", os_warnings.ConfigurationWarning, ) cert = self.config.get('cert') if cert: cert = os.path.expanduser(cert) if self.config.get('key'): cert = (cert, os.path.expanduser(self.config.get('key'))) return (verify, cert) def get_services(self): """Return a list of service types we know something about.""" services = [] for key, val in self.config.items(): if ( key.endswith('api_version') or key.endswith('service_type') or key.endswith('service_name') ): services.append("_".join(key.split('_')[:-2])) return list(set(services)) def get_enabled_services(self): services = set() all_services = [ k['service_type'] for k in self._service_type_manager.services ] all_services.extend( k[4:] for k in self.config.keys() if k.startswith('has_') ) for srv in all_services: ep = self.get_endpoint_from_catalog(srv) if ep: services.add(srv.replace('-', '_')) return services def get_auth_args(self): return self.config.get('auth', {}) def _get_config( self, key, service_type, default=None, fallback_to_unprefixed=False, converter=None, ): '''Get a config value for a service_type. Finds the config value for a key, looking first for it prefixed by the given service_type, then by any known aliases of that service_type. Finally, if fallback_to_unprefixed is True, a value will be looked for without a prefix to support the config values where a global default makes sense. For instance, ``_get_config('example', 'block-storage', True)`` would first look for ``block_storage_example``, then ``volumev3_example``, ``volumev2_example`` and ``volume_example``. If no value was found, it would look for ``example``. If none of that works, it returns the value in ``default``. ''' if service_type is None: return self.config.get(key) for st in self._service_type_manager.get_all_types(service_type): value = self.config.get(_make_key(key, st)) if value is not None: break else: if fallback_to_unprefixed: value = self.config.get(key) if value is None: return default else: if converter is not None: value = converter(value) return value def _get_service_config(self, key, service_type): config_dict = self.config.get(key) if not config_dict: return None if not isinstance(config_dict, dict): return config_dict for st in self._service_type_manager.get_all_types(service_type): if st in config_dict: return config_dict[st] def get_region_name(self, service_type=None): # If a region_name for the specific service_type is configured, use it; # else use the one configured for the CloudRegion as a whole. return self._get_config( 'region_name', service_type, fallback_to_unprefixed=True ) def get_interface(self, service_type=None): return self._get_config( 'interface', service_type, fallback_to_unprefixed=True ) def get_api_version(self, service_type): version = self._get_config('api_version', service_type) if version: try: float(version) except ValueError: if 'latest' in version: warnings.warn( "You have a configured API_VERSION with 'latest' in " "it. In the context of openstacksdk this doesn't make " "any sense.", os_warnings.ConfigurationWarning, ) return None return version def get_default_microversion(self, service_type): return self._get_config('default_microversion', service_type) def get_service_type(self, service_type): # People requesting 'volume' are doing so because os-client-config # let them. What they want is block-storage, not explicitly the # v1 of cinder. If someone actually wants v1, they'll have api_version # set to 1, in which case block-storage will still work properly. # Use service-types-manager to grab the official type name. _get_config # will still look for config by alias, but starting with the official # type will get us things in the right order. if self._service_type_manager.is_known(service_type): service_type = self._service_type_manager.get_service_type( service_type ) return self._get_config( 'service_type', service_type, default=service_type ) def get_service_name(self, service_type): return self._get_config('service_name', service_type) def get_endpoint(self, service_type): auth = self.config.get('auth', {}) value = self._get_config('endpoint_override', service_type) if not value: value = self._get_config('endpoint', service_type) if not value and self.config.get('auth_type') == 'none': # If endpoint is given and we're using the none auth type, # then the endpoint value is the endpoint_override for every # service. value = auth.get('endpoint') if ( not value and service_type == 'identity' and SCOPE_KEYS.isdisjoint(set(auth.keys())) ): # There are a small number of unscoped identity operations. # Specifically, looking up a list of projects/domains/system to # scope to. value = auth.get('auth_url') # Because of course. Seriously. # We have to override the Rackspace block-storage endpoint because # only v1 is in the catalog but the service actually does support # v2. But the endpoint needs the project_id. service_type = self._service_type_manager.get_service_type( service_type ) if ( value and self.config.get('profile') == 'rackspace' and service_type == 'block-storage' ): value = value + auth.get('project_id') return value def get_endpoint_from_catalog( self, service_type, interface=None, region_name=None ): """Return the endpoint for a given service as found in the catalog. For values respecting endpoint overrides, see :meth:`~openstack.connection.Connection.endpoint_for` :param service_type: Service Type of the endpoint to search for. :param interface: Interface of the endpoint to search for. Optional, defaults to the configured value for interface for this Connection. :param region_name: Region Name of the endpoint to search for. Optional, defaults to the configured value for region_name for this Connection. :returns: The endpoint of the service, or None if not found. """ interface = interface or self.get_interface(service_type) region_name = region_name or self.get_region_name(service_type) session = self.get_session() catalog = session.auth.get_access(session).service_catalog try: return catalog.url_for( service_type=service_type, interface=interface, region_name=region_name, ) except (keystoneauth1.exceptions.catalog.EndpointNotFound, ValueError): return None def get_connect_retries(self, service_type): return self._get_config( 'connect_retries', service_type, fallback_to_unprefixed=True, converter=int, ) def get_status_code_retries(self, service_type): return self._get_config( 'status_code_retries', service_type, fallback_to_unprefixed=True, converter=int, ) @property def prefer_ipv6(self): return not self._force_ipv4 @property def force_ipv4(self): return self._force_ipv4 def get_auth(self): """Return a keystoneauth plugin from the auth credentials.""" return self._auth def skip_auth_cache(self): return not keyring or not self._auth or not self._cache_auth def load_auth_from_cache(self): if self.skip_auth_cache(): return cache_id = self._auth.get_cache_id() # skip if the plugin does not support caching if not cache_id: return try: state = keyring.get_password('openstacksdk', cache_id) except RuntimeError: # the fail backend raises this self.log.debug('Failed to fetch auth from keyring') return self.log.debug('Reusing authentication from keyring') self._auth.set_auth_state(state) def set_auth_cache(self): if self.skip_auth_cache(): return cache_id = self._auth.get_cache_id() state = self._auth.get_auth_state() try: if state: # NOTE: under some conditions the method may be invoked when auth # is empty. This may lead to exception in the keyring lib, thus do # nothing. keyring.set_password('openstacksdk', cache_id, state) except RuntimeError: # the fail backend raises this self.log.debug('Failed to set auth into keyring') def insert_user_agent(self): """Set sdk information into the user agent of the Session. .. warning:: This method is here to be used by os-client-config. It exists as a hook point so that os-client-config can provice backwards compatibility and still be in the User Agent for people using os-client-config directly. Normal consumers of SDK should use app_name and app_version. However, if someone else writes a subclass of :class:`~openstack.config.cloud_region.CloudRegion` it may be desirable. """ self._keystone_session.additional_user_agent.append( ('openstacksdk', openstack_version.__version__) ) def get_session(self): """Return a keystoneauth session based on the auth credentials.""" if self._keystone_session is None: if not self._auth: raise exceptions.ConfigException( "Problem with auth parameters" ) verify, cert = self.get_requests_verify_args() # Turn off urllib3 warnings about insecure certs if we have # explicitly configured requests to tell it we do not want # cert verification if not verify: self.log.debug( "Turning off SSL warnings for {full_name}" " since verify=False".format(full_name=self.full_name) ) requestsexceptions.squelch_warnings(insecure_requests=not verify) self._keystone_session = self._session_constructor( auth=self._auth, verify=verify, cert=cert, timeout=self.config.get('api_timeout'), collect_timing=self.config.get('timing'), discovery_cache=self._discovery_cache, ) self.insert_user_agent() # Using old keystoneauth with new os-client-config fails if # we pass in app_name and app_version. Those are not essential, # nor a reason to bump our minimum, so just test for the session # having the attribute post creation and set them then. if hasattr(self._keystone_session, 'app_name'): self._keystone_session.app_name = self._app_name if hasattr(self._keystone_session, 'app_version'): self._keystone_session.app_version = self._app_version return self._keystone_session def get_service_catalog(self): """Helper method to grab the service catalog.""" return self._auth.get_access(self.get_session()).service_catalog def _get_version_request(self, service_type, version): """Translate OCC version args to those needed by ksa adapter. If no version is requested explicitly and we have a configured version, set the version parameter and let ksa deal with expanding that to min=ver.0, max=ver.latest. If version is set, pass it through. If version is not set and we don't have a configured version, default to latest. If version is set, contains a '.', and default_microversion is not set, also pass it as a default microversion. """ version_request = _util.VersionRequest() if version == 'latest': version_request.max_api_version = 'latest' return version_request if not version: version = self.get_api_version(service_type) # Octavia doens't have a version discovery document. Hard-code an # exception to this logic for now. if not version and service_type not in ('load-balancer',): version_request.max_api_version = 'latest' else: version_request.version = version default_microversion = self.get_default_microversion(service_type) implied_microversion = _get_implied_microversion(version) if ( implied_microversion and default_microversion and implied_microversion != default_microversion ): raise exceptions.ConfigException( "default_microversion of {default_microversion} was given" " for {service_type}, but api_version looks like a" " microversion as well. Please set api_version to just the" " desired major version, or omit default_microversion".format( default_microversion=default_microversion, service_type=service_type, ) ) if implied_microversion: default_microversion = implied_microversion # If we're inferring a microversion, don't pass the whole # string in as api_version, since that tells keystoneauth # we're looking for a major api version. version_request.version = version[0] version_request.default_microversion = default_microversion return version_request def get_all_version_data(self, service_type): # Seriously. Don't think about the existential crisis # that is the next line. You'll wind up in cthulhu's lair. service_type = self.get_service_type(service_type) region_name = self.get_region_name(service_type) versions = self.get_session().get_all_version_data( service_type=service_type, interface=self.get_interface(service_type), region_name=region_name, ) region_versions = versions.get(region_name, {}) interface_versions = region_versions.get( self.get_interface(service_type), {} ) return interface_versions.get(service_type, []) def _get_endpoint_from_catalog(self, service_type, constructor): adapter = constructor( session=self.get_session(), service_type=self.get_service_type(service_type), service_name=self.get_service_name(service_type), interface=self.get_interface(service_type), region_name=self.get_region_name(service_type), ) return adapter.get_endpoint() def _get_hardcoded_endpoint(self, service_type, constructor): endpoint = self._get_endpoint_from_catalog(service_type, constructor) if not endpoint.rstrip().rsplit('/')[-1] == 'v2.0': if not endpoint.endswith('/'): endpoint += '/' endpoint = parse.urljoin(endpoint, 'v2.0') return endpoint def get_session_client( self, service_type, version=None, constructor=proxy.Proxy, **kwargs ): """Return a prepped keystoneauth Adapter for a given service. This is useful for making direct requests calls against a 'mounted' endpoint. That is, if you do: client = get_session_client('compute') then you can do: client.get('/flavors') and it will work like you think. """ version_request = self._get_version_request(service_type, version) kwargs.setdefault('region_name', self.get_region_name(service_type)) kwargs.setdefault( 'connect_retries', self.get_connect_retries(service_type) ) kwargs.setdefault( 'status_code_retries', self.get_status_code_retries(service_type) ) kwargs.setdefault('statsd_prefix', self.get_statsd_prefix()) kwargs.setdefault('statsd_client', self.get_statsd_client()) kwargs.setdefault('prometheus_counter', self.get_prometheus_counter()) kwargs.setdefault( 'prometheus_histogram', self.get_prometheus_histogram() ) kwargs.setdefault('influxdb_config', self._influxdb_config) kwargs.setdefault('influxdb_client', self.get_influxdb_client()) endpoint_override = self.get_endpoint(service_type) version = version_request.version min_api_version = ( kwargs.pop('min_version', None) or version_request.min_api_version ) max_api_version = ( kwargs.pop('max_version', None) or version_request.max_api_version ) # Older neutron has inaccessible discovery document. Nobody noticed # because neutronclient hard-codes an append of v2.0. YAY! # Also, older octavia has a similar issue. if service_type in ('network', 'load-balancer'): version = None min_api_version = None max_api_version = None if endpoint_override is None: endpoint_override = self._get_hardcoded_endpoint( service_type, constructor ) client = constructor( session=self.get_session(), service_type=self.get_service_type(service_type), service_name=self.get_service_name(service_type), interface=self.get_interface(service_type), version=version, min_version=min_api_version, max_version=max_api_version, endpoint_override=endpoint_override, default_microversion=version_request.default_microversion, rate_limit=self.get_rate_limit(service_type), concurrency=self.get_concurrency(service_type), **kwargs, ) if version_request.default_microversion: default_microversion = version_request.default_microversion info = client.get_endpoint_data() if not discover.version_between( info.min_microversion, info.max_microversion, default_microversion, ): if self.get_default_microversion(service_type): raise exceptions.ConfigException( "A default microversion for service {service_type} of" " {default_microversion} was requested, but the cloud" " only supports a minimum of {min_microversion} and" " a maximum of {max_microversion}.".format( service_type=service_type, default_microversion=default_microversion, min_microversion=discover.version_to_string( info.min_microversion ), max_microversion=discover.version_to_string( info.max_microversion ), ) ) else: raise exceptions.ConfigException( "A default microversion for service {service_type} of" " {default_microversion} was requested, but the cloud" " only supports a minimum of {min_microversion} and" " a maximum of {max_microversion}. The default" " microversion was set because a microversion" " formatted version string, '{api_version}', was" " passed for the api_version of the service. If it" " was not intended to set a default microversion" " please remove anything other than an integer major" " version from the version setting for" " the service.".format( service_type=service_type, api_version=self.get_api_version(service_type), default_microversion=default_microversion, min_microversion=discover.version_to_string( info.min_microversion ), max_microversion=discover.version_to_string( info.max_microversion ), ) ) return client def get_session_endpoint( self, service_type, min_version=None, max_version=None ): """Return the endpoint from config or the catalog. If a configuration lists an explicit endpoint for a service, return that. Otherwise, fetch the service catalog from the keystone session and return the appropriate endpoint. :param service_type: Official service type of service """ override_endpoint = self.get_endpoint(service_type) if override_endpoint: return override_endpoint region_name = self.get_region_name(service_type) service_name = self.get_service_name(service_type) interface = self.get_interface(service_type) session = self.get_session() # Do this as kwargs because of os-client-config unittest mocking version_kwargs = {} if min_version: version_kwargs['min_version'] = min_version if max_version: version_kwargs['max_version'] = max_version try: # Return the highest version we find that matches # the request endpoint = session.get_endpoint( service_type=service_type, region_name=region_name, interface=interface, service_name=service_name, **version_kwargs, ) except keystoneauth1.exceptions.catalog.EndpointNotFound: endpoint = None if not endpoint: self.log.warning( "Keystone catalog entry not found (" "service_type=%s,service_name=%s," "interface=%s,region_name=%s)", service_type, service_name, interface, region_name, ) return endpoint def get_cache_expiration_time(self): # TODO(mordred) We should be validating/transforming this on input return int(self._cache_expiration_time) def get_cache_path(self): return self._cache_path def get_cache_class(self): return self._cache_class def get_cache_arguments(self): return copy.deepcopy(self._cache_arguments) def get_cache_expirations(self): return copy.deepcopy(self._cache_expirations) def get_cache_resource_expiration(self, resource, default=None): """Get expiration time for a resource :param resource: Name of the resource type :param default: Default value to return if not found (optional, defaults to None) :returns: Expiration time for the resource type as float or default """ if resource not in self._cache_expirations: return default return float(self._cache_expirations[resource]) def requires_floating_ip(self): """Return whether or not this cloud requires floating ips. :returns: True of False if know, None if discovery is needed. If requires_floating_ip is not configured but the cloud is known to not provide floating ips, will return False. """ if self.config['floating_ip_source'] == "None": return False return self.config.get('requires_floating_ip') def get_external_networks(self): """Get list of network names for external networks.""" return [ net['name'] for net in self.config.get('networks', []) if net['routes_externally'] ] def get_external_ipv4_networks(self): """Get list of network names for external IPv4 networks.""" return [ net['name'] for net in self.config.get('networks', []) if net['routes_ipv4_externally'] ] def get_external_ipv6_networks(self): """Get list of network names for external IPv6 networks.""" return [ net['name'] for net in self.config.get('networks', []) if net['routes_ipv6_externally'] ] def get_internal_networks(self): """Get list of network names for internal networks.""" return [ net['name'] for net in self.config.get('networks', []) if not net['routes_externally'] ] def get_internal_ipv4_networks(self): """Get list of network names for internal IPv4 networks.""" return [ net['name'] for net in self.config.get('networks', []) if not net['routes_ipv4_externally'] ] def get_internal_ipv6_networks(self): """Get list of network names for internal IPv6 networks.""" return [ net['name'] for net in self.config.get('networks', []) if not net['routes_ipv6_externally'] ] def get_default_network(self): """Get network used for default interactions.""" for net in self.config.get('networks', []): if net['default_interface']: return net['name'] return None def get_nat_destination(self): """Get network used for NAT destination.""" for net in self.config.get('networks', []): if net['nat_destination']: return net['name'] return None def get_nat_source(self): """Get network used for NAT source.""" for net in self.config.get('networks', []): if net.get('nat_source'): return net['name'] return None def _get_extra_config(self, key, defaults=None): """Fetch an arbitrary extra chunk of config, laying in defaults. :param string key: name of the config section to fetch :param dict defaults: (optional) default values to merge under the found config """ defaults = _util.normalize_keys(defaults or {}) if not key: return defaults return _util.merge_clouds( defaults, _util.normalize_keys(self._extra_config.get(key, {})) ) def get_client_config(self, name=None, defaults=None): """Get config settings for a named client. Settings will also be looked for in a section called 'client'. If settings are found in both, they will be merged with the settings from the named section winning over the settings from client section, and both winning over provided defaults. :param string name: Name of the config section to look for. :param dict defaults: Default settings to use. :returns: A dict containing merged settings from the named section, the client section and the defaults. """ return self._get_extra_config( name, self._get_extra_config('client', defaults) ) def get_password_callback(self): return self._password_callback def get_rate_limit(self, service_type=None): return self._get_service_config( 'rate_limit', service_type=service_type ) def get_concurrency(self, service_type=None): return self._get_service_config( 'concurrency', service_type=service_type ) def get_statsd_client(self): if not statsd: if self._statsd_host: self.log.warning( 'StatsD python library is not available. ' 'Reporting disabled' ) return None statsd_args = {} if self._statsd_host: statsd_args['host'] = self._statsd_host if self._statsd_port: statsd_args['port'] = self._statsd_port if statsd_args: try: return statsd.StatsClient(**statsd_args) except Exception: self.log.warning('Cannot establish connection to statsd') return None else: return None def get_statsd_prefix(self): return self._statsd_prefix or 'openstack.api' def get_prometheus_registry(self): if not self._collector_registry and prometheus_client: self._collector_registry = prometheus_client.REGISTRY return self._collector_registry def get_prometheus_histogram(self): registry = self.get_prometheus_registry() if not registry or not prometheus_client: return # We have to hide a reference to the histogram on the registry # object, because it's collectors must be singletons for a given # registry but register at creation time. hist = getattr(registry, '_openstacksdk_histogram', None) if not hist: hist = prometheus_client.Histogram( 'openstack_http_response_time', 'Time taken for an http response to an OpenStack service', labelnames=[ 'method', 'endpoint', 'service_type', 'status_code', ], registry=registry, ) registry._openstacksdk_histogram = hist return hist def get_prometheus_counter(self): registry = self.get_prometheus_registry() if not registry or not prometheus_client: return counter = getattr(registry, '_openstacksdk_counter', None) if not counter: counter = prometheus_client.Counter( 'openstack_http_requests', 'Number of HTTP requests made to an OpenStack service', labelnames=[ 'method', 'endpoint', 'service_type', 'status_code', ], registry=registry, ) registry._openstacksdk_counter = counter return counter def has_service(self, service_type): service_type = service_type.lower().replace('-', '_') key = f'has_{service_type}' return self.config.get( key, self._service_type_manager.is_official(service_type) ) def disable_service(self, service_type, reason=None): _disable_service(self.config, service_type, reason=reason) def enable_service(self, service_type): service_type = service_type.lower().replace('-', '_') key = f'has_{service_type}' self.config[key] = True def get_disabled_reason(self, service_type): service_type = service_type.lower().replace('-', '_') d_key = _make_key('disabled_reason', service_type) return self.config.get(d_key) def get_influxdb_client(self): influx_args = {} if not self._influxdb_config: return None use_udp = bool(self._influxdb_config.get('use_udp', False)) port = self._influxdb_config.get('port') if use_udp: influx_args['use_udp'] = True if 'port' in self._influxdb_config: if use_udp: influx_args['udp_port'] = port else: influx_args['port'] = port for key in ['host', 'username', 'password', 'database', 'timeout']: if key in self._influxdb_config: influx_args[key] = self._influxdb_config[key] if influxdb and influx_args: try: return influxdb.InfluxDBClient(**influx_args) except Exception: self.log.warning('Cannot establish connection to InfluxDB') else: self.log.warning( 'InfluxDB configuration is present, ' 'but no client library is found.' ) return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/defaults.json0000664000175000017500000000066700000000000022176 0ustar00zuulzuul00000000000000{ "auth_type": "password", "baremetal_status_code_retries": 5, "baremetal_introspection_status_code_retries": 5, "image_status_code_retries": 5, "disable_vendor_agent": {}, "interface": "public", "floating_ip_source": "neutron", "image_api_use_tasks": false, "image_format": "qcow2", "message": "", "network_api_version": "2", "object_store_api_version": "1", "secgroup_source": "neutron", "status": "active" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/defaults.py0000664000175000017500000000350600000000000021650 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os import threading _json_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'defaults.json' ) _defaults = None _defaults_lock = threading.Lock() # json_path argument is there for os-client-config def get_defaults(json_path=_json_path): global _defaults if _defaults is not None: return _defaults.copy() with _defaults_lock: if _defaults is not None: # Did someone else just finish filling it? return _defaults.copy() # Python language specific defaults # These are defaults related to use of python libraries, they are # not qualities of a cloud. # # NOTE(harlowja): update a in-memory dict, before updating # the global one so that other callers of get_defaults do not # see the partially filled one. tmp_defaults = dict( api_timeout=None, verify=True, cacert=None, cert=None, key=None, ) with open(json_path) as json_file: updates = json.load(json_file) if updates is not None: tmp_defaults.update(updates) _defaults = tmp_defaults return tmp_defaults.copy() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/exceptions.py0000664000175000017500000000127300000000000022221 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions OpenStackConfigException = exceptions.ConfigException ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/loader.py0000664000175000017500000016004100000000000021305 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # alias because we already had an option named argparse import argparse as argparse_mod import collections import copy import errno import json import os import re import sys import typing as ty import warnings from keystoneauth1 import adapter from keystoneauth1 import loading import platformdirs import yaml from openstack import _log from openstack.config import _util from openstack.config import cloud_region from openstack.config import defaults from openstack.config import vendors from openstack import exceptions from openstack import warnings as os_warnings PLATFORMDIRS = platformdirs.PlatformDirs( 'openstack', 'OpenStack', multipath='/etc' ) CONFIG_HOME = PLATFORMDIRS.user_config_dir CACHE_PATH = PLATFORMDIRS.user_cache_dir # snaps do set $HOME to something like # /home/$USER/snap/openstackclients/$SNAP_VERSION # the real home (usually /home/$USERNAME) is stored in $SNAP_REAL_HOME # see https://snapcraft.io/docs/environment-variables SNAP_REAL_HOME = os.getenv('SNAP_REAL_HOME') if SNAP_REAL_HOME: UNIX_CONFIG_HOME = os.path.join( os.path.join(SNAP_REAL_HOME, '.config'), 'openstack' ) else: UNIX_CONFIG_HOME = os.path.join( os.path.expanduser(os.path.join('~', '.config')), 'openstack' ) UNIX_SITE_CONFIG_HOME = '/etc/openstack' SITE_CONFIG_HOME = PLATFORMDIRS.site_config_dir CONFIG_SEARCH_PATH = [ os.getcwd(), CONFIG_HOME, UNIX_CONFIG_HOME, SITE_CONFIG_HOME, UNIX_SITE_CONFIG_HOME, ] YAML_SUFFIXES = ('.yaml', '.yml') JSON_SUFFIXES = ('.json',) CONFIG_FILES = [ os.path.join(d, 'clouds' + s) for d in CONFIG_SEARCH_PATH for s in YAML_SUFFIXES + JSON_SUFFIXES ] SECURE_FILES = [ os.path.join(d, 'secure' + s) for d in CONFIG_SEARCH_PATH for s in YAML_SUFFIXES + JSON_SUFFIXES ] VENDOR_FILES = [ os.path.join(d, 'clouds-public' + s) for d in CONFIG_SEARCH_PATH for s in YAML_SUFFIXES + JSON_SUFFIXES ] BOOL_KEYS = ('insecure', 'cache') CSV_KEYS = ('auth_methods',) FORMAT_EXCLUSIONS = frozenset(['password']) def get_boolean(value): if value is None: return False if type(value) is bool: return value if value.lower() == 'true': return True return False def _auth_update(old_dict, new_dict_source): """Like dict.update, except handling the nested dict called auth.""" new_dict = copy.deepcopy(new_dict_source) for k, v in new_dict.items(): if k == 'auth': if k in old_dict: old_dict[k].update(v) else: old_dict[k] = v.copy() else: old_dict[k] = v return old_dict def _fix_argv(argv): # Transform any _ characters in arg names to - so that we don't # have to throw billions of compat argparse arguments around all # over the place. processed = collections.defaultdict(set) for index in range(0, len(argv)): # If the value starts with '--' and has '-' or '_' in it, then # it's worth looking at it if re.match('^--.*(_|-)+.*', argv[index]): split_args = argv[index].split('=') orig = split_args[0] new = orig.replace('_', '-') if orig != new: split_args[0] = new argv[index] = "=".join(split_args) # Save both for later so we can throw an error about dupes processed[new].add(orig) overlap: ty.List[str] = [] for new, old in processed.items(): if len(old) > 1: overlap.extend(old) if overlap: raise exceptions.ConfigException( "The following options were given: '{options}' which contain" " duplicates except that one has _ and one has -. There is" " no sane way for us to know what you're doing. Remove the" " duplicate option and try again".format(options=','.join(overlap)) ) class OpenStackConfig: # These two attribute are to allow os-client-config to plumb in its # local versions for backwards compat. # They should not be used by anyone else. _cloud_region_class = cloud_region.CloudRegion _defaults_module = defaults def __init__( self, config_files=None, vendor_files=None, override_defaults=None, force_ipv4=None, envvar_prefix=None, secure_files=None, pw_func=None, session_constructor=None, app_name=None, app_version=None, load_yaml_config=True, load_envvars=True, statsd_host=None, statsd_port=None, statsd_prefix=None, influxdb_config=None, ): self.log = _log.setup_logging('openstack.config') self._session_constructor = session_constructor self._app_name = app_name self._app_version = app_version self._load_envvars = load_envvars if load_yaml_config: # "if config_files" is not sufficient to process empty list if config_files is not None: self._config_files = config_files else: self._config_files = CONFIG_FILES if secure_files is not None: self._secure_files = secure_files else: self._secure_files = SECURE_FILES if vendor_files is not None: self._vendor_files = vendor_files else: self._vendor_files = VENDOR_FILES else: self._config_files = [] self._secure_files = [] self._vendor_files = [] config_file_override = self._get_envvar('OS_CLIENT_CONFIG_FILE') if config_file_override: self._config_files.insert(0, config_file_override) secure_file_override = self._get_envvar('OS_CLIENT_SECURE_FILE') if secure_file_override: self._secure_files.insert(0, secure_file_override) self.defaults = self._defaults_module.get_defaults() if override_defaults: self.defaults.update(override_defaults) # First, use a config file if it exists where expected self.config_filename, self.cloud_config = self._load_config_file() _, secure_config = self._load_secure_file() if secure_config: self.cloud_config = _util.merge_clouds( self.cloud_config, secure_config ) if not self.cloud_config: self.cloud_config = {'clouds': {}} if 'clouds' not in self.cloud_config: self.cloud_config['clouds'] = {} # Save the other config self.extra_config = copy.deepcopy(self.cloud_config) self.extra_config.pop('clouds', None) # Grab ipv6 preference settings from env client_config = self.cloud_config.get('client', {}) if force_ipv4 is not None: # If it's passed in to the constructor, honor it. self.force_ipv4 = force_ipv4 else: # Get the backwards compat value prefer_ipv6 = get_boolean( self._get_envvar( 'OS_PREFER_IPV6', client_config.get( 'prefer_ipv6', client_config.get('prefer-ipv6', True) ), ) ) force_ipv4 = get_boolean( self._get_envvar( 'OS_FORCE_IPV4', client_config.get( 'force_ipv4', client_config.get('broken-ipv6', False) ), ) ) self.force_ipv4 = force_ipv4 if not prefer_ipv6: # this will only be false if someone set it explicitly # honor their wishes self.force_ipv4 = True # Next, process environment variables and add them to the mix self.envvar_key = self._get_envvar('OS_CLOUD_NAME', 'envvars') if self.envvar_key in self.cloud_config['clouds']: raise exceptions.ConfigException( '"{0}" defines a cloud named "{1}", but' ' OS_CLOUD_NAME is also set to "{1}". Please rename' ' either your environment based cloud, or one of your' ' file-based clouds.'.format( self.config_filename, self.envvar_key ) ) self.default_cloud = self._get_envvar('OS_CLOUD') if load_envvars: envvars = self._get_os_environ(envvar_prefix=envvar_prefix) if envvars: self.cloud_config['clouds'][self.envvar_key] = envvars if not self.default_cloud: self.default_cloud = self.envvar_key if not self.default_cloud and self.cloud_config['clouds']: if len(self.cloud_config['clouds'].keys()) == 1: # If there is only one cloud just use it. This matches envvars # behavior and allows for much less typing. # TODO(mordred) allow someone to mark a cloud as "default" in # clouds.yaml. # The next/iter thing is for python3 compat where dict.keys # returns an iterator but in python2 it's a list. self.default_cloud = next( iter(self.cloud_config['clouds'].keys()) ) # Finally, fall through and make a cloud that starts with defaults # because we need somewhere to put arguments, and there are neither # config files or env vars if not self.cloud_config['clouds']: self.cloud_config = dict(clouds=dict(defaults=dict(self.defaults))) self.default_cloud = 'defaults' self._cache_auth = False self._cache_expiration_time = 0 self._cache_path = CACHE_PATH self._cache_class = 'dogpile.cache.null' self._cache_arguments: ty.Dict[str, ty.Any] = {} self._cache_expirations: ty.Dict[str, int] = {} self._influxdb_config = {} if 'cache' in self.cloud_config: cache_settings = _util.normalize_keys(self.cloud_config['cache']) self._cache_auth = get_boolean( cache_settings.get('auth', self._cache_auth) ) # expiration_time used to be 'max_age' but the dogpile setting # is expiration_time. Support max_age for backwards compat. self._cache_expiration_time = cache_settings.get( 'expiration_time', cache_settings.get('max_age', self._cache_expiration_time), ) # If cache class is given, use that. If not, but if cache time # is given, default to memory. Otherwise, default to nothing. # to memory. if self._cache_expiration_time: self._cache_class = 'dogpile.cache.memory' self._cache_class = self.cloud_config['cache'].get( 'class', self._cache_class ) self._cache_path = os.path.expanduser( cache_settings.get('path', self._cache_path) ) self._cache_arguments = cache_settings.get( 'arguments', self._cache_arguments ) self._cache_expirations = cache_settings.get( 'expiration', self._cache_expirations ) if load_yaml_config: metrics_config = self.cloud_config.get('metrics', {}) statsd_config = metrics_config.get('statsd', {}) statsd_host = statsd_host or statsd_config.get('host') statsd_port = statsd_port or statsd_config.get('port') statsd_prefix = statsd_prefix or statsd_config.get('prefix') influxdb_cfg = metrics_config.get('influxdb', {}) # Parse InfluxDB configuration if not influxdb_config: influxdb_config = influxdb_cfg else: influxdb_config.update(influxdb_cfg) if influxdb_config: config = {} if 'use_udp' in influxdb_config: use_udp = influxdb_config['use_udp'] if isinstance(use_udp, str): use_udp = use_udp.lower() in ('true', 'yes', '1') elif not isinstance(use_udp, bool): use_udp = False self.log.warning( 'InfluxDB.use_udp value type is not ' 'supported. Use one of ' '[true|false|yes|no|1|0]' ) config['use_udp'] = use_udp for key in [ 'host', 'port', 'username', 'password', 'database', 'measurement', 'timeout', ]: if key in influxdb_config: config[key] = influxdb_config[key] self._influxdb_config = config if load_envvars: statsd_host = statsd_host or os.environ.get('STATSD_HOST') statsd_port = statsd_port or os.environ.get('STATSD_PORT') statsd_prefix = statsd_prefix or os.environ.get('STATSD_PREFIX') self._statsd_host = statsd_host self._statsd_port = statsd_port self._statsd_prefix = statsd_prefix # Flag location to hold the peeked value of an argparse timeout value self._argv_timeout = False # Save the password callback # password = self._pw_callback(prompt="Password: ") self._pw_callback = pw_func def _get_os_environ(self, envvar_prefix=None): ret = self._defaults_module.get_defaults() if not envvar_prefix: # This makes the or below be OS_ or OS_ which is a no-op envvar_prefix = 'OS_' environkeys = [ k for k in os.environ.keys() if (k.startswith('OS_') or k.startswith(envvar_prefix)) and not k.startswith('OS_TEST') # infra CI var and not k.startswith('OS_STD') # oslotest var and not k.startswith('OS_LOG') # oslotest var ] for k in environkeys: newkey = k.split('_', 1)[-1].lower() ret[newkey] = os.environ[k] # If the only environ keys are selectors or behavior modification, # don't return anything selectors = { 'OS_CLOUD', 'OS_REGION_NAME', 'OS_CLIENT_CONFIG_FILE', 'OS_CLIENT_SECURE_FILE', 'OS_CLOUD_NAME', } if set(environkeys) - selectors: return ret return None def _get_envvar(self, key, default=None): if not self._load_envvars: return default return os.environ.get(key, default) def get_extra_config(self, key, defaults=None): """Fetch an arbitrary extra chunk of config, laying in defaults. :param string key: name of the config section to fetch :param dict defaults: (optional) default values to merge under the found config """ defaults = _util.normalize_keys(defaults or {}) if not key: return defaults return _util.merge_clouds( defaults, _util.normalize_keys(self.cloud_config.get(key, {})) ) def _load_config_file(self): return self._load_yaml_json_file(self._config_files) def _load_secure_file(self): return self._load_yaml_json_file(self._secure_files) def _load_vendor_file(self): return self._load_yaml_json_file(self._vendor_files) def _load_yaml_json_file(self, filelist): for path in filelist: if os.path.exists(path): try: with open(path) as f: if path.endswith('json'): return path, json.load(f) else: return path, yaml.safe_load(f) except OSError as e: if e.errno == errno.EACCES: # Can't access file so let's continue to the next # file continue return (None, {}) def _expand_region_name(self, region_name): return {'name': region_name, 'values': {}} def _expand_regions(self, regions): ret = [] for region in regions: if isinstance(region, dict): # i.e. must have name key, and only name,values keys if 'name' not in region or not {'name', 'values'} >= set( region ): raise exceptions.ConfigException( 'Invalid region entry at: %s' % region ) if 'values' not in region: region['values'] = {} ret.append(copy.deepcopy(region)) else: ret.append(self._expand_region_name(region)) return ret def _get_regions(self, cloud): if cloud not in self.cloud_config['clouds']: return [self._expand_region_name('')] regions = self._get_known_regions(cloud) if not regions: # We don't know of any regions use a workable default. regions = [self._expand_region_name('')] return regions def _get_known_regions(self, cloud): config = _util.normalize_keys(self.cloud_config['clouds'][cloud]) if 'regions' in config: return self._expand_regions(config['regions']) elif 'region_name' in config: if isinstance(config['region_name'], list): regions = config['region_name'] else: regions = config['region_name'].split(',') if len(regions) > 1: warnings.warn( f"Comma separated lists in region_name are deprecated. " f"Please use a yaml list in the regions " f"parameter in {self.config_filename} instead.", os_warnings.OpenStackDeprecationWarning, ) return self._expand_regions(regions) else: # crappit. we don't have a region defined. new_cloud: ty.Dict[str, ty.Any] = {} our_cloud = self.cloud_config['clouds'].get(cloud, {}) self._expand_vendor_profile(cloud, new_cloud, our_cloud) if 'regions' in new_cloud and new_cloud['regions']: return self._expand_regions(new_cloud['regions']) elif 'region_name' in new_cloud and new_cloud['region_name']: return [self._expand_region_name(new_cloud['region_name'])] def _get_region(self, cloud=None, region_name=''): if region_name is None: region_name = '' if not cloud: return self._expand_region_name(region_name) regions = self._get_known_regions(cloud) if not regions: return self._expand_region_name(region_name) if not region_name: return regions[0] for region in regions: if region['name'] == region_name: return region raise exceptions.ConfigException( 'Region {region_name} is not a valid region name for cloud' ' {cloud}. Valid choices are {region_list}. Please note that' ' region names are case sensitive.'.format( region_name=region_name, region_list=','.join([r['name'] for r in regions]), cloud=cloud, ) ) def get_cloud_names(self): return self.cloud_config['clouds'].keys() def _get_base_cloud_config(self, name, profile=None): cloud = dict() # Only validate cloud name if one was given if name and name not in self.cloud_config['clouds']: raise exceptions.ConfigException(f"Cloud {name} was not found.") our_cloud = self.cloud_config['clouds'].get(name, dict()) if profile: our_cloud['profile'] = profile # Get the defaults cloud.update(self.defaults) self._expand_vendor_profile(name, cloud, our_cloud) if 'auth' not in cloud: cloud['auth'] = dict() _auth_update(cloud, our_cloud) if 'cloud' in cloud: del cloud['cloud'] return cloud def _expand_vendor_profile(self, name, cloud, our_cloud): # Expand a profile if it exists. 'cloud' is an old confusing name # for this. profile_name = our_cloud.get('profile', our_cloud.get('cloud', None)) if not profile_name or profile_name == self.envvar_key: return if 'cloud' in our_cloud: warnings.warn( f"{self.config_filename} uses the keyword 'cloud' to " f"reference a known vendor profile. This has been deprecated " f"in favor of the 'profile' keyword.", os_warnings.OpenStackDeprecationWarning, ) vendor_filename, vendor_file = self._load_vendor_file() if ( vendor_file and 'public-clouds' in vendor_file and profile_name in vendor_file['public-clouds'] ): _auth_update(cloud, vendor_file['public-clouds'][profile_name]) else: profile_data = vendors.get_profile(profile_name) if profile_data: nested_profile = profile_data.pop('profile', None) if nested_profile: nested_profile_data = vendors.get_profile(nested_profile) if nested_profile_data: profile_data = nested_profile_data status = profile_data.pop('status', 'active') message = profile_data.pop('message', '') if status == 'deprecated': warnings.warn( f"{profile_name} is deprecated: {message}", os_warnings.OpenStackDeprecationWarning, ) elif status == 'shutdown': raise exceptions.ConfigException( "{profile_name} references a cloud that no longer" " exists: {message}".format( profile_name=profile_name, message=message ) ) _auth_update(cloud, profile_data) else: # Can't find the requested vendor config, go about business warnings.warn( f"Couldn't find the vendor profile {profile_name} for " f"the cloud {name}", os_warnings.ConfigurationWarning, ) def _project_scoped(self, cloud): return ( 'project_id' in cloud or 'project_name' in cloud or 'project_id' in cloud['auth'] or 'project_name' in cloud['auth'] ) def _validate_networks(self, networks, key): value = None for net in networks: if value and net[key]: raise exceptions.ConfigException( "Duplicate network entries for {key}: {net1} and {net2}." " Only one network can be flagged with {key}".format( key=key, net1=value['name'], net2=net['name'] ) ) if not value and net[key]: value = net def _fix_backwards_networks(self, cloud): # Leave the external_network and internal_network keys in the # dict because consuming code might be expecting them. networks = [] # Normalize existing network entries for net in cloud.get('networks', []): name = net.get('name') if not name: raise exceptions.ConfigException( 'Entry in network list is missing required field "name".' ) network = dict( name=name, routes_externally=get_boolean(net.get('routes_externally')), nat_source=get_boolean(net.get('nat_source')), nat_destination=get_boolean(net.get('nat_destination')), default_interface=get_boolean(net.get('default_interface')), ) # routes_ipv4_externally defaults to the value of routes_externally network['routes_ipv4_externally'] = get_boolean( net.get('routes_ipv4_externally', network['routes_externally']) ) # routes_ipv6_externally defaults to the value of routes_externally network['routes_ipv6_externally'] = get_boolean( net.get('routes_ipv6_externally', network['routes_externally']) ) networks.append(network) for key in ('external_network', 'internal_network'): external = key.startswith('external') if key in cloud and 'networks' in cloud: raise exceptions.ConfigException( "Both {key} and networks were specified in the config." " Please remove {key} from the config and use the network" " list to configure network behavior.".format(key=key) ) if key in cloud: warnings.warn( f"{key} is deprecated. Please replace with an entry in " f"a dict inside of the networks list with name: " f"{cloud[key]} and routes_externally: {external}", os_warnings.OpenStackDeprecationWarning, ) networks.append( dict( name=cloud[key], routes_externally=external, nat_destination=not external, default_interface=external, ) ) # Validate that we don't have duplicates self._validate_networks(networks, 'nat_destination') self._validate_networks(networks, 'default_interface') cloud['networks'] = networks return cloud def _handle_domain_id(self, cloud): # Allow people to just specify domain once if it's the same mappings = { 'domain_id': ('user_domain_id', 'project_domain_id'), 'domain_name': ('user_domain_name', 'project_domain_name'), } for target_key, possible_values in mappings.items(): if not self._project_scoped(cloud): if target_key in cloud and target_key not in cloud['auth']: cloud['auth'][target_key] = cloud.pop(target_key) continue for key in possible_values: if target_key in cloud['auth'] and key not in cloud['auth']: cloud['auth'][key] = cloud['auth'][target_key] cloud.pop(target_key, None) cloud['auth'].pop(target_key, None) return cloud def _fix_backwards_project(self, cloud): # Do the lists backwards so that project_name is the ultimate winner # Also handle moving domain names into auth so that domain mapping # is easier mappings = { 'domain_id': ('domain_id', 'domain-id'), 'domain_name': ('domain_name', 'domain-name'), 'user_domain_id': ('user_domain_id', 'user-domain-id'), 'user_domain_name': ('user_domain_name', 'user-domain-name'), 'project_domain_id': ('project_domain_id', 'project-domain-id'), 'project_domain_name': ( 'project_domain_name', 'project-domain-name', ), 'token': ('auth-token', 'auth_token', 'token'), } if cloud.get('auth_type', None) == 'v2password': # If v2password is explcitly requested, this is to deal with old # clouds. That's fine - we need to map settings in the opposite # direction mappings['tenant_id'] = ( 'project_id', 'project-id', 'tenant_id', 'tenant-id', ) mappings['tenant_name'] = ( 'project_name', 'project-name', 'tenant_name', 'tenant-name', ) else: mappings['project_id'] = ( 'tenant_id', 'tenant-id', 'project_id', 'project-id', ) mappings['project_name'] = ( 'tenant_name', 'tenant-name', 'project_name', 'project-name', ) for target_key, possible_values in mappings.items(): target = None for key in possible_values: # Prefer values from the 'auth' section # as they may contain cli or environment overrides. # See story 2010784 for context. if key in cloud['auth']: target = str(cloud['auth'][key]) del cloud['auth'][key] if key in cloud: target = str(cloud[key]) del cloud[key] if target: cloud['auth'][target_key] = target return cloud def _fix_backwards_auth_plugin(self, cloud): # Do the lists backwards so that auth_type is the ultimate winner mappings = { 'auth_type': ('auth_plugin', 'auth_type'), } for target_key, possible_values in mappings.items(): target = None for key in possible_values: if key in cloud: target = cloud[key] del cloud[key] cloud[target_key] = target # Because we force alignment to v3 nouns, we want to force # use of the auth plugin that can do auto-selection and dealing # with that based on auth parameters. v2password is basically # completely broken return cloud def register_argparse_arguments(self, parser, argv, service_keys=None): """Register all of the common argparse options needed. Given an argparse parser, register the keystoneauth Session arguments, the keystoneauth Auth Plugin Options and os-cloud. Also, peek in the argv to see if all of the auth plugin options should be registered or merely the ones already configured. :param argparse.ArgumentParser: parser to attach argparse options to :param argv: the arguments provided to the application :param string service_keys: Service or list of services this argparse should be specialized for, if known. The first item in the list will be used as the default value for service_type (optional) :raises exceptions.ConfigException if an invalid auth-type is requested """ if service_keys is None: service_keys = [] # Fix argv in place - mapping any keys with embedded _ in them to - _fix_argv(argv) local_parser = argparse_mod.ArgumentParser(add_help=False) for p in (parser, local_parser): p.add_argument( '--os-cloud', metavar='', default=self._get_envvar('OS_CLOUD', None), help='Named cloud to connect to', ) # we need to peek to see if timeout was actually passed, since # the keystoneauth declaration of it has a default, which means # we have no clue if the value we get is from the ksa default # for from the user passing it explicitly. We'll stash it for later local_parser.add_argument('--timeout', metavar='') # We need for get_one to be able to peek at whether a token # was passed so that we can swap the default from password to # token if it was. And we need to also peek for --os-auth-token # for novaclient backwards compat local_parser.add_argument('--os-token') local_parser.add_argument('--os-auth-token') # Peek into the future and see if we have an auth-type set in # config AND a cloud set, so that we know which command line # arguments to register and show to the user (the user may want # to say something like: # openstack --os-cloud=foo --os-oidctoken=bar # although I think that user is the cause of my personal pain options, _args = local_parser.parse_known_args(argv) if options.timeout: self._argv_timeout = True # validate = False because we're not _actually_ loading here # we're only peeking, so it's the wrong time to assert that # the rest of the arguments given are invalid for the plugin # chosen (for instance, --help may be requested, so that the # user can see what options he may want to give cloud_region = self.get_one(argparse=options, validate=False) default_auth_type = cloud_region.config['auth_type'] try: loading.register_auth_argparse_arguments( parser, argv, default=default_auth_type ) except Exception: # Hidiing the keystoneauth exception because we're not actually # loading the auth plugin at this point, so the error message # from it doesn't actually make sense to os-client-config users options, _args = parser.parse_known_args(argv) plugin_names = loading.get_available_plugin_names() raise exceptions.ConfigException( "An invalid auth-type was specified: {auth_type}." " Valid choices are: {plugin_names}.".format( auth_type=options.os_auth_type, plugin_names=",".join(plugin_names), ) ) if service_keys: primary_service = service_keys[0] else: primary_service = None loading.register_session_argparse_arguments(parser) adapter.register_adapter_argparse_arguments( parser, service_type=primary_service ) for service_key in service_keys: # legacy clients have un-prefixed api-version options parser.add_argument( '--{service_key}-api-version'.format( service_key=service_key.replace('_', '-') ), help=argparse_mod.SUPPRESS, ) adapter.register_service_adapter_argparse_arguments( parser, service_type=service_key ) # Backwards compat options for legacy clients parser.add_argument('--http-timeout', help=argparse_mod.SUPPRESS) parser.add_argument('--os-endpoint-type', help=argparse_mod.SUPPRESS) parser.add_argument('--endpoint-type', help=argparse_mod.SUPPRESS) def _fix_backwards_interface(self, cloud): new_cloud = {} for key in cloud.keys(): if key.endswith('endpoint_type'): target_key = key.replace('endpoint_type', 'interface') else: target_key = key new_cloud[target_key] = cloud[key] return new_cloud def _fix_backwards_api_timeout(self, cloud): new_cloud = {} # requests can only have one timeout, which means that in a single # cloud there is no point in different timeout values. However, # for some reason many of the legacy clients decided to shove their # service name in to the arg name for reasons surpassin sanity. If # we find any values that are not api_timeout, overwrite api_timeout # with the value service_timeout = None for key in cloud.keys(): if key.endswith('timeout') and not ( key == 'timeout' or key == 'api_timeout' ): service_timeout = cloud[key] else: new_cloud[key] = cloud[key] if service_timeout is not None: new_cloud['api_timeout'] = service_timeout # The common argparse arg from keystoneauth is called timeout, but # os-client-config expects it to be called api_timeout if self._argv_timeout: if 'timeout' in new_cloud and new_cloud['timeout']: new_cloud['api_timeout'] = new_cloud.pop('timeout') return new_cloud def get_all(self): clouds = [] for cloud in self.get_cloud_names(): for region in self._get_regions(cloud): if region: clouds.append( self.get_one(cloud, region_name=region['name']) ) return clouds # TODO(mordred) Backwards compat for OSC transition get_all_clouds = get_all def _fix_args(self, args=None, argparse=None): """Massage the passed-in options Replace - with _ and strip os_ prefixes. Convert an argparse Namespace object to a dict, removing values that are either None or ''. """ if not args: args = {} if argparse: # Convert the passed-in Namespace o_dict = vars(argparse) parsed_args = dict() for k in o_dict: if o_dict[k] is not None and o_dict[k] != '': parsed_args[k] = o_dict[k] args.update(parsed_args) os_args = dict() new_args = dict() for key, val in iter(args.items()): if type(args[key]) is dict: # dive into the auth dict new_args[key] = self._fix_args(args[key]) continue key = key.replace('-', '_') if key.startswith('os_'): os_args[key[3:]] = val else: new_args[key] = val new_args.update(os_args) return new_args def _find_winning_auth_value(self, opt, config): opt_name = opt.name.replace('-', '_') if opt_name in config: return config[opt_name] else: deprecated = getattr( opt, 'deprecated', getattr(opt, 'deprecated_opts', []) ) for d_opt in deprecated: d_opt_name = d_opt.name.replace('-', '_') if d_opt_name in config: return config[d_opt_name] def auth_config_hook(self, config): """Allow examination of config values before loading auth plugin OpenStackClient will override this to perform additional checks on auth_type. """ return config def _get_auth_loader(self, config): # Use the 'none' plugin for variants of None specified, # since it does not look up endpoints or tokens but rather # does a passthrough. This is useful for things like Ironic # that have a keystoneless operational mode, but means we're # still dealing with a keystoneauth Session object, so all the # _other_ things (SSL arg handling, timeout) all work consistently if config['auth_type'] in (None, "None", ''): config['auth_type'] = 'none' elif config['auth_type'] == 'token_endpoint': # Humans have been trained to use a thing called token_endpoint # That it does not exist in keystoneauth is irrelvant- it not # doing what they want causes them sorrow. config['auth_type'] = 'admin_token' loader = loading.get_plugin_loader(config['auth_type']) # As the name would suggest, v3multifactor uses multiple factors for # authentication. As a result, we need to register the configuration # options for each required auth method. Normally, this is handled by # the 'MultiFactor.load_from_options' method but there doesn't appear # to be a way to "register" the auth methods without actually loading # the plugin. As a result, if we encounter this auth type then we need # to do this registration of extra options manually. # FIXME(stephenfin): We need to provide a mechanism to extend the # options in keystoneauth1.loading._plugins.identity.v3.MultiAuth # without calling 'load_from_options'. if config['auth_type'] == 'v3multifactor': # We use '.get' since we can't be sure this key is set yet - # validation happens later, in _validate_auth loader._methods = config.get('auth_methods') return loader def _validate_auth(self, config, loader): # May throw a keystoneauth1.exceptions.NoMatchingPlugin plugin_options = loader.get_options() for p_opt in plugin_options: # if it's in config.auth, win, kill it from config dict # if it's in config and not in config.auth, move it # deprecated loses to current # provided beats default, deprecated or not winning_value = self._find_winning_auth_value( p_opt, config['auth'], ) if not winning_value: winning_value = self._find_winning_auth_value( p_opt, config, ) config = self._clean_up_after_ourselves( config, p_opt, winning_value, ) if winning_value: # Prefer the plugin configuration dest value if the value's key # is marked as deprecated. if p_opt.dest is None: good_name = p_opt.name.replace('-', '_') config['auth'][good_name] = winning_value else: config['auth'][p_opt.dest] = winning_value # See if this needs a prompting config = self.option_prompt(config, p_opt) return config def _validate_auth_correctly(self, config, loader): # May throw a keystoneauth1.exceptions.NoMatchingPlugin plugin_options = loader.get_options() for p_opt in plugin_options: # if it's in config, win, move it and kill it from config dict # if it's in config.auth but not in config it's good # deprecated loses to current # provided beats default, deprecated or not winning_value = self._find_winning_auth_value( p_opt, config, ) if not winning_value: winning_value = self._find_winning_auth_value( p_opt, config['auth'], ) config = self._clean_up_after_ourselves( config, p_opt, winning_value, ) # See if this needs a prompting config = self.option_prompt(config, p_opt) return config def option_prompt(self, config, p_opt): """Prompt user for option that requires a value""" if ( getattr(p_opt, 'prompt', None) is not None and p_opt.dest not in config['auth'] and self._pw_callback is not None ): config['auth'][p_opt.dest] = self._pw_callback(p_opt.prompt) return config def _clean_up_after_ourselves(self, config, p_opt, winning_value): # Clean up after ourselves for opt in [p_opt.name] + [o.name for o in p_opt.deprecated]: opt = opt.replace('-', '_') config.pop(opt, None) config['auth'].pop(opt, None) if winning_value: # Prefer the plugin configuration dest value if the value's key # is marked as depreciated. if p_opt.dest is None: config['auth'][p_opt.name.replace('-', '_')] = winning_value else: config['auth'][p_opt.dest] = winning_value return config def magic_fixes(self, config): """Perform the set of magic argument fixups""" # Infer token plugin if a token was given if ( ('auth' in config and 'token' in config['auth']) or ('auth_token' in config and config['auth_token']) or ('token' in config and config['token']) ): config.setdefault('token', config.pop('auth_token', None)) # Infer passcode if it was given separately # This is generally absolutely impractical to require setting passcode # in the clouds.yaml if 'auth' in config and 'passcode' in config: config['auth']['passcode'] = config.pop('passcode', None) # These backwards compat values are only set via argparse. If it's # there, it's because it was passed in explicitly, and should win config = self._fix_backwards_api_timeout(config) if 'endpoint_type' in config: config['interface'] = config.pop('endpoint_type') config = self._fix_backwards_auth_plugin(config) config = self._fix_backwards_project(config) config = self._fix_backwards_interface(config) config = self._fix_backwards_networks(config) config = self._handle_domain_id(config) for key in BOOL_KEYS: if key in config: if type(config[key]) is not bool: config[key] = get_boolean(config[key]) for key in CSV_KEYS: if key in config: if isinstance(config[key], str): config[key] = config[key].split(',') # TODO(mordred): Special casing auth_url here. We should # come back to this betterer later so that it's # more generalized if 'auth' in config and 'auth_url' in config['auth']: config['auth']['auth_url'] = config['auth']['auth_url'].format( **config ) return config def get_one(self, cloud=None, validate=True, argparse=None, **kwargs): """Retrieve a single CloudRegion and merge additional options :param string cloud: The name of the configuration to load from clouds.yaml :param boolean validate: Validate the config. Setting this to False causes no auth plugin to be created. It's really only useful for testing. :param Namespace argparse: An argparse Namespace object; allows direct passing in of argparse options to be added to the cloud config. Values of None and '' will be removed. :param region_name: Name of the region of the cloud. :param kwargs: Additional configuration options :returns: openstack.config.cloud_region.CloudRegion :raises: keystoneauth1.exceptions.MissingRequiredOptions on missing required auth parameters """ profile = kwargs.pop('profile', None) args = self._fix_args(kwargs, argparse=argparse) if cloud is None: if 'cloud' in args: cloud = args['cloud'] else: cloud = self.default_cloud config = self._get_base_cloud_config(cloud, profile) # Get region specific settings if 'region_name' not in args: args['region_name'] = '' region = self._get_region(cloud=cloud, region_name=args['region_name']) args['region_name'] = region['name'] region_args = copy.deepcopy(region['values']) # Regions is a list that we can use to create a list of cloud/region # objects. It does not belong in the single-cloud dict config.pop('regions', None) # Can't just do update, because None values take over for arg_list in region_args, args: for key, val in iter(arg_list.items()): if val is not None: if key == 'auth' and config[key] is not None: config[key] = _auth_update(config[key], val) else: config[key] = val config = self.magic_fixes(config) config = _util.normalize_keys(config) # NOTE(dtroyer): OSC needs a hook into the auth args before the # plugin is loaded in order to maintain backward- # compatible behaviour config = self.auth_config_hook(config) if validate: loader = self._get_auth_loader(config) config = self._validate_auth(config, loader) auth_plugin = loader.load_from_options(**config['auth']) else: auth_plugin = None # If any of the defaults reference other values, we need to expand for key, value in config.items(): if hasattr(value, 'format') and key not in FORMAT_EXCLUSIONS: config[key] = value.format(**config) force_ipv4 = config.pop('force_ipv4', self.force_ipv4) prefer_ipv6 = config.pop('prefer_ipv6', True) if not prefer_ipv6: force_ipv4 = True # Override global metrics config with more specific per-cloud # details. metrics_config = config.get('metrics', {}) statsd_config = metrics_config.get('statsd', {}) statsd_host = statsd_config.get('host') or self._statsd_host statsd_port = statsd_config.get('port') or self._statsd_port statsd_prefix = statsd_config.get('prefix') or self._statsd_prefix influxdb_config = metrics_config.get('influxdb', {}) if influxdb_config: merged_influxdb = copy.deepcopy(self._influxdb_config) merged_influxdb.update(influxdb_config) influxdb_config = merged_influxdb else: influxdb_config = self._influxdb_config if cloud is None: cloud_name = '' else: cloud_name = str(cloud) return self._cloud_region_class( name=cloud_name, region_name=config['region_name'], config=config, extra_config=self.extra_config, force_ipv4=force_ipv4, auth_plugin=auth_plugin, openstack_config=self, session_constructor=self._session_constructor, app_name=self._app_name, app_version=self._app_version, cache_auth=self._cache_auth, cache_expiration_time=self._cache_expiration_time, cache_expirations=self._cache_expirations, cache_path=self._cache_path, cache_class=self._cache_class, cache_arguments=self._cache_arguments, password_callback=self._pw_callback, statsd_host=statsd_host, statsd_port=statsd_port, statsd_prefix=statsd_prefix, influxdb_config=influxdb_config, ) # TODO(mordred) Backwards compat for OSC transition get_one_cloud = get_one def get_one_cloud_osc( self, cloud=None, validate=True, argparse=None, **kwargs ): """Retrieve a single CloudRegion and merge additional options :param string cloud: The name of the configuration to load from clouds.yaml :param boolean validate: Validate the config. Setting this to False causes no auth plugin to be created. It's really only useful for testing. :param Namespace argparse: An argparse Namespace object; allows direct passing in of argparse options to be added to the cloud config. Values of None and '' will be removed. :param region_name: Name of the region of the cloud. :param kwargs: Additional configuration options :raises: keystoneauth1.exceptions.MissingRequiredOptions on missing required auth parameters """ args = self._fix_args(kwargs, argparse=argparse) if cloud is None: if 'cloud' in args: cloud = args['cloud'] else: cloud = self.default_cloud config = self._get_base_cloud_config(cloud) # Get region specific settings if 'region_name' not in args: args['region_name'] = '' region = self._get_region(cloud=cloud, region_name=args['region_name']) args['region_name'] = region['name'] region_args = copy.deepcopy(region['values']) # Regions is a list that we can use to create a list of cloud/region # objects. It does not belong in the single-cloud dict config.pop('regions', None) # Can't just do update, because None values take over for arg_list in region_args, args: for key, val in iter(arg_list.items()): if val is not None: if key == 'auth' and config[key] is not None: config[key] = _auth_update(config[key], val) else: config[key] = val config = self.magic_fixes(config) # NOTE(dtroyer): OSC needs a hook into the auth args before the # plugin is loaded in order to maintain backward- # compatible behaviour config = self.auth_config_hook(config) if validate: loader = self._get_auth_loader(config) config = self._validate_auth_correctly(config, loader) auth_plugin = loader.load_from_options(**config['auth']) else: auth_plugin = None # If any of the defaults reference other values, we need to expand for key, value in config.items(): if hasattr(value, 'format') and key not in FORMAT_EXCLUSIONS: config[key] = value.format(**config) force_ipv4 = config.pop('force_ipv4', self.force_ipv4) prefer_ipv6 = config.pop('prefer_ipv6', True) if not prefer_ipv6: force_ipv4 = True if cloud is None: cloud_name = '' else: cloud_name = str(cloud) return self._cloud_region_class( name=cloud_name, region_name=config['region_name'], config=config, extra_config=self.extra_config, force_ipv4=force_ipv4, auth_plugin=auth_plugin, openstack_config=self, cache_auth=self._cache_auth, cache_expiration_time=self._cache_expiration_time, cache_expirations=self._cache_expirations, cache_path=self._cache_path, cache_class=self._cache_class, cache_arguments=self._cache_arguments, password_callback=self._pw_callback, ) @staticmethod def set_one_cloud(config_file, cloud, set_config=None): """Set a single cloud configuration. :param string config_file: The path to the config file to edit. If this file does not exist it will be created. :param string cloud: The name of the configuration to save to clouds.yaml :param dict set_config: Configuration options to be set """ set_config = set_config or {} cur_config = {} try: with open(config_file) as fh: cur_config = yaml.safe_load(fh) except OSError as e: # Not no such file if e.errno != 2: raise pass clouds_config = cur_config.get('clouds', {}) cloud_config = _auth_update(clouds_config.get(cloud, {}), set_config) clouds_config[cloud] = cloud_config cur_config['clouds'] = clouds_config with open(config_file, 'w') as fh: yaml.safe_dump(cur_config, fh, default_flow_style=False) if __name__ == '__main__': config = OpenStackConfig().get_all_clouds() for cloud in config: print_cloud = False if len(sys.argv) == 1: print_cloud = True elif len(sys.argv) == 3 and ( sys.argv[1] == cloud.name and sys.argv[2] == cloud.region ): print_cloud = True elif len(sys.argv) == 2 and (sys.argv[1] == cloud.name): print_cloud = True if print_cloud: print(cloud.name, cloud.region, cloud.config) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/schema.json0000664000175000017500000000672600000000000021631 0ustar00zuulzuul00000000000000{ "$schema": "http://json-schema.org/draft-04/schema#", "id": "https://opendev.org/openstack/openstacksdk/raw/branch/master/openstack/config/schema.json", "type": "object", "properties": { "auth_type": { "name": "Auth Type", "description": "Name of authentication plugin to be used", "default": "password", "type": "string" }, "disable_vendor_agent": { "name": "Disable Vendor Agent Properties", "description": "Image properties required to disable vendor agent", "type": "object", "properties": {} }, "floating_ip_source": { "name": "Floating IP Source", "description": "Which service provides Floating IPs", "enum": [ "neutron", "nova", "None" ], "default": "neutron" }, "image_api_use_tasks": { "name": "Image Task API", "description": "Does the cloud require the Image Task API", "default": false, "type": "boolean" }, "image_format": { "name": "Image Format", "description": "Format for uploaded Images", "default": "qcow2", "type": "string" }, "interface": { "name": "API Interface", "description": "Which API Interface should connections hit", "default": "public", "enum": [ "public", "internal", "admin" ] }, "secgroup_source": { "name": "Security Group Source", "description": "Which service provides security groups", "default": "neutron", "enum": [ "neutron", "nova", "None" ] }, "baremetal_api_version": { "name": "Baremetal API Service Type", "description": "Baremetal API Service Type", "default": "1", "type": "string" }, "block_storage_api_version": { "name": "Block Storage API Version", "description": "Block Storage API Version", "default": "2", "type": "string" }, "compute_api_version": { "name": "Compute API Version", "description": "Compute API Version", "default": "2", "type": "string" }, "database_api_version": { "name": "Database API Version", "description": "Database API Version", "default": "1.0", "type": "string" }, "dns_api_version": { "name": "DNS API Version", "description": "DNS API Version", "default": "2", "type": "string" }, "identity_api_version": { "name": "Identity API Version", "description": "Identity API Version", "default": "2", "type": "string" }, "image_api_version": { "name": "Image API Version", "description": "Image API Version", "default": "1", "type": "string" }, "network_api_version": { "name": "Network API Version", "description": "Network API Version", "default": "2", "type": "string" }, "object_store_api_version": { "name": "Object Storage API Version", "description": "Object Storage API Version", "default": "1", "type": "string" }, "volume_api_version": { "name": "Volume API Version", "description": "Volume API Version", "default": "2", "type": "string" }, "vendor_hook": { "name": "Hook for vendor customization", "description": "A possibility for a vendor to alter connection object", "type": "string" } }, "required": [ "auth_type", "disable_vendor_agent", "floating_ip_source", "image_api_use_tasks", "image_format", "interface", "secgroup_source" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendor-schema.json0000664000175000017500000001700500000000000023114 0ustar00zuulzuul00000000000000{ "$schema": "http://json-schema.org/draft-04/schema#", "id": "https://opendev.org/openstack/openstacksdk/raw/branch/master/openstack/config/vendor-schema.json#", "type": "object", "properties": { "name": { "type": "string" }, "profile": { "type": "object", "properties": { "auth": { "type": "object", "properties": { "auth_url": { "name": "Auth URL", "description": "URL of the primary Keystone endpoint", "type": "string" } } }, "auth_type": { "name": "Auth Type", "description": "Name of authentication plugin to be used", "default": "password", "type": "string" }, "disable_vendor_agent": { "name": "Disable Vendor Agent Properties", "description": "Image properties required to disable vendor agent", "type": "object", "properties": {} }, "floating_ip_source": { "name": "Floating IP Source", "description": "Which service provides Floating IPs", "enum": [ "neutron", "nova", "None" ], "default": "neutron" }, "image_api_use_tasks": { "name": "Image Task API", "description": "Does the cloud require the Image Task API", "default": false, "type": "boolean" }, "image_format": { "name": "Image Format", "description": "Format for uploaded Images", "default": "qcow2", "type": "string" }, "interface": { "name": "API Interface", "description": "Which API Interface should connections hit", "default": "public", "enum": [ "public", "internal", "admin" ] }, "message": { "name": "Status message", "description": "Optional message with information related to status", "type": "string" }, "requires_floating_ip": { "name": "Requires Floating IP", "description": "Whether the cloud requires a floating IP to route traffic off of the cloud", "default": null, "type": ["boolean", "null"] }, "secgroup_source": { "name": "Security Group Source", "description": "Which service provides security groups", "enum": [ "neutron", "nova", "None" ], "default": "neutron" }, "status": { "name": "Vendor status", "description": "Status of the vendor's cloud", "enum": [ "active", "deprecated", "shutdown"], "default": "active" }, "compute_service_name": { "name": "Compute API Service Name", "description": "Compute API Service Name", "type": "string" }, "database_service_name": { "name": "Database API Service Name", "description": "Database API Service Name", "type": "string" }, "dns_service_name": { "name": "DNS API Service Name", "description": "DNS API Service Name", "type": "string" }, "identity_service_name": { "name": "Identity API Service Name", "description": "Identity API Service Name", "type": "string" }, "image_service_name": { "name": "Image API Service Name", "description": "Image API Service Name", "type": "string" }, "volume_service_name": { "name": "Volume API Service Name", "description": "Volume API Service Name", "type": "string" }, "network_service_name": { "name": "Network API Service Name", "description": "Network API Service Name", "type": "string" }, "object_service_name": { "name": "Object Storage API Service Name", "description": "Object Storage API Service Name", "type": "string" }, "baremetal_service_name": { "name": "Baremetal API Service Name", "description": "Baremetal API Service Name", "type": "string" }, "compute_service_type": { "name": "Compute API Service Type", "description": "Compute API Service Type", "type": "string" }, "database_service_type": { "name": "Database API Service Type", "description": "Database API Service Type", "type": "string" }, "dns_service_type": { "name": "DNS API Service Type", "description": "DNS API Service Type", "type": "string" }, "identity_service_type": { "name": "Identity API Service Type", "description": "Identity API Service Type", "type": "string" }, "image_service_type": { "name": "Image API Service Type", "description": "Image API Service Type", "type": "string" }, "volume_service_type": { "name": "Volume API Service Type", "description": "Volume API Service Type", "type": "string" }, "network_service_type": { "name": "Network API Service Type", "description": "Network API Service Type", "type": "string" }, "object_service_type": { "name": "Object Storage API Service Type", "description": "Object Storage API Service Type", "type": "string" }, "baremetal_service_type": { "name": "Baremetal API Service Type", "description": "Baremetal API Service Type", "type": "string" }, "block_storage_api_version": { "name": "Block Storage API Version", "description": "Block Storage API Version", "type": "string" }, "compute_api_version": { "name": "Compute API Version", "description": "Compute API Version", "type": "string" }, "database_api_version": { "name": "Database API Version", "description": "Database API Version", "type": "string" }, "dns_api_version": { "name": "DNS API Version", "description": "DNS API Version", "type": "string" }, "identity_api_version": { "name": "Identity API Version", "description": "Identity API Version", "type": "string" }, "image_api_version": { "name": "Image API Version", "description": "Image API Version", "type": "string" }, "volume_api_version": { "name": "Volume API Version", "description": "Volume API Version", "type": "string" }, "network_api_version": { "name": "Network API Version", "description": "Network API Version", "type": "string" }, "object_api_version": { "name": "Object Storage API Version", "description": "Object Storage API Version", "type": "string" }, "baremetal_api_version": { "name": "Baremetal API Version", "description": "Baremetal API Version", "type": "string" }, "vendor_hook": { "name": "Hook for vendor customization", "description": "A possibility for a vendor to alter connection object", "type": "string" } } } }, "required": [ "name", "profile" ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.257319 openstacksdk-4.0.0/openstack/config/vendors/0000775000175000017500000000000000000000000021143 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/__init__.py0000664000175000017500000000616300000000000023262 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glob import json import os import typing as ty import urllib import requests import yaml from openstack.config import _util from openstack import exceptions _VENDORS_PATH = os.path.dirname(os.path.realpath(__file__)) _VENDOR_DEFAULTS: ty.Dict[str, ty.Dict] = {} _WELL_KNOWN_PATH = "{scheme}://{netloc}/.well-known/openstack/api" def _get_vendor_defaults(): global _VENDOR_DEFAULTS if not _VENDOR_DEFAULTS: for vendor in glob.glob(os.path.join(_VENDORS_PATH, '*.yaml')): with open(vendor) as f: vendor_data = yaml.safe_load(f) _VENDOR_DEFAULTS[vendor_data['name']] = vendor_data['profile'] for vendor in glob.glob(os.path.join(_VENDORS_PATH, '*.json')): with open(vendor) as f: vendor_data = json.load(f) _VENDOR_DEFAULTS[vendor_data['name']] = vendor_data['profile'] return _VENDOR_DEFAULTS def get_profile(profile_name): vendor_defaults = _get_vendor_defaults() if profile_name in vendor_defaults: return vendor_defaults[profile_name].copy() profile_url = urllib.parse.urlparse(profile_name) if not profile_url.netloc: # This isn't a url, and we already don't have it. return well_known_url = _WELL_KNOWN_PATH.format( scheme=profile_url.scheme, netloc=profile_url.netloc, ) response = requests.get(well_known_url) if not response.ok: raise exceptions.ConfigException( "{profile_name} is a remote profile that could not be fetched:" " {status_code} {reason}".format( profile_name=profile_name, status_code=response.status_code, reason=response.reason, ) ) vendor_defaults[profile_name] = None return vendor_data = response.json() name = vendor_data['name'] # Merge named and url cloud config, but make named config override the # config from the cloud so that we can supply local overrides if needed. profile = _util.merge_clouds( vendor_data['profile'], vendor_defaults.get(name, {}) ) # If there is (or was) a profile listed in a named config profile, it # might still be here. We just merged in content from a URL though, so # pop the key to prevent doing it again in the future. profile.pop('profile', None) # Save the data under both names so we don't reprocess this, no matter # how we're called. vendor_defaults[profile_name] = profile vendor_defaults[name] = profile return profile ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/auro.json0000664000175000017500000000044200000000000023004 0ustar00zuulzuul00000000000000{ "name": "auro", "profile": { "auth": { "auth_url": "https://api.van2.auro.io:5000/v3", "user_domain_name": "Default", "project_domain_name": "Default" }, "identity_api_version": "3", "region_name": "RegionOne", "requires_floating_ip": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/betacloud.json0000664000175000017500000000040400000000000023776 0ustar00zuulzuul00000000000000{ "name": "betacloud", "profile": { "auth": { "auth_url": "https://api-1.betacloud.de:5000" }, "regions": [ "betacloud-1" ], "identity_api_version": "3", "image_format": "raw", "block_storage_api_version": "3" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/binero.json0000664000175000017500000000035000000000000023312 0ustar00zuulzuul00000000000000{ "name": "binero", "profile": { "auth": { "auth_url": "https://auth.binero.cloud:5000/v3" }, "identity_api_version": "3", "block_storage_api_version": "3", "regions": [ "europe-se-1" ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/bluebox.json0000664000175000017500000000016100000000000023474 0ustar00zuulzuul00000000000000{ "name": "bluebox", "profile": { "block_storage_api_version": "1", "region_name": "RegionOne" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/catalyst.json0000664000175000017500000000043300000000000023662 0ustar00zuulzuul00000000000000{ "name": "catalyst", "profile": { "auth": { "auth_url": "https://api.cloud.catalyst.net.nz:5000/v2.0" }, "regions": [ "nz-por-1", "nz_wlg_2" ], "image_api_version": "1", "block_storage_api_version": "1", "image_format": "raw" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/citycloud.json0000664000175000017500000000056100000000000024037 0ustar00zuulzuul00000000000000{ "name": "citycloud", "profile": { "auth": { "auth_url": "https://{region_name}.citycloud.com:5000/v3/" }, "regions": [ "Buf1", "Fra1", "Sto2", "Kna1", "dx1", "tky1" ], "requires_floating_ip": true, "block_storage_api_version": "3", "identity_api_version": "3", "image_format": "raw" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/conoha.json0000664000175000017500000000033600000000000023307 0ustar00zuulzuul00000000000000{ "name": "conoha", "profile": { "auth": { "auth_url": "https://identity.{region_name}.conoha.io" }, "regions": [ "sin1", "sjc1", "tyo1" ], "identity_api_version": "2" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/dreamcompute.json0000664000175000017500000000032000000000000024516 0ustar00zuulzuul00000000000000{ "name": "dreamcompute", "profile": { "auth": { "auth_url": "https://iad2.dream.io:5000" }, "identity_api_version": "3", "region_name": "RegionOne", "image_format": "raw" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/elastx.json0000664000175000017500000000026200000000000023336 0ustar00zuulzuul00000000000000{ "name": "elastx", "profile": { "auth": { "auth_url": "https://ops.elastx.cloud:5000/v3" }, "identity_api_version": "3", "region_name": "se-sto" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/entercloudsuite.json0000664000175000017500000000045400000000000025257 0ustar00zuulzuul00000000000000{ "name": "entercloudsuite", "profile": { "auth": { "auth_url": "https://api.entercloudsuite.com/" }, "identity_api_version": "3", "image_api_version": "1", "block_storage_api_version": "1", "regions": [ "it-mil1", "nl-ams1", "de-fra1" ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/fuga.json0000664000175000017500000000045700000000000022766 0ustar00zuulzuul00000000000000{ "name": "fuga", "profile": { "auth": { "auth_url": "https://identity.api.fuga.io:5000", "user_domain_name": "Default", "project_domain_name": "Default" }, "regions": [ "cystack" ], "identity_api_version": "3", "block_storage_api_version": "3" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/ibmcloud.json0000664000175000017500000000034700000000000023640 0ustar00zuulzuul00000000000000{ "name": "ibmcloud", "profile": { "auth": { "auth_url": "https://identity.open.softlayer.com" }, "block_storage_api_version": "2", "identity_api_version": "3", "regions": [ "london" ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/internap.json0000664000175000017500000000043500000000000023660 0ustar00zuulzuul00000000000000{ "name": "internap", "profile": { "auth": { "auth_url": "https://identity.api.cloud.inap.com" }, "regions": [ "ams01", "da01", "nyj01", "sin01", "sjc01" ], "identity_api_version": "3", "floating_ip_source": "None" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/limestonenetworks.yaml0000664000175000017500000000203000000000000025616 0ustar00zuulzuul00000000000000--- name: limestonenetworks profile: auth: auth_url: https://auth.cloud.lstn.net:5000/v3 regions: - name: us-dfw-1 values: networks: - name: Public Internet routes_externally: true default_interface: true nat_source: true - name: DDoS Protected routes_externally: true - name: Private Network (10.0.0.0/8 only) routes_externally: false - name: Private Network (Floating Public) routes_externally: false nat_destination: true - name: us-slc values: networks: - name: Public Internet routes_externally: true default_interface: true nat_source: true - name: Private Network (10.0.0.0/8 only) routes_externally: false - name: Private Network (Floating Public) routes_externally: false nat_destination: true identity_api_version: '3' image_format: raw volume_api_version: '3' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/otc-swiss.json0000664000175000017500000000045200000000000023772 0ustar00zuulzuul00000000000000{ "name": "otc-swiss", "profile": { "auth": { "auth_url": "iam-pub.eu-ch2.sc.otc.t-systems.com/v3" }, "regions": [ "eu-ch2" ], "identity_api_version": "3", "interface": "public", "image_format": "qcow2", "vendor_hook": "otcextensions.sdk:load" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/otc.json0000664000175000017500000000047200000000000022626 0ustar00zuulzuul00000000000000{ "name": "otc", "profile": { "auth": { "auth_url": "https://iam.{region_name}.otc.t-systems.com/v3" }, "regions": [ "eu-de", "eu-nl" ], "identity_api_version": "3", "interface": "public", "image_format": "qcow2", "vendor_hook": "otcextensions.sdk:load" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/ovh-us.json0000664000175000017500000000055100000000000023260 0ustar00zuulzuul00000000000000{ "name": "ovh-us", "profile": { "auth": { "auth_url": "https://auth.cloud.ovh.us/", "user_domain_name": "Default", "project_domain_name": "Default" }, "regions": [ "US-EAST-VA-1", "US-WEST-OR-1", "US-EAST-VA", "US-WEST-OR" ], "identity_api_version": "3", "floating_ip_source": "None" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/ovh.json0000664000175000017500000000011500000000000022627 0ustar00zuulzuul00000000000000{ "name": "ovh", "profile": { "profile": "https://ovhcloud.com" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/rackspace.json0000664000175000017500000000142500000000000023774 0ustar00zuulzuul00000000000000{ "name": "rackspace", "profile": { "auth": { "auth_url": "https://identity.api.rackspacecloud.com/v2.0/" }, "identity_api_version": "2.0", "regions": [ "DFW", "HKG", "IAD", "ORD", "SYD", "LON" ], "database_service_type": "rax:database", "compute_service_name": "cloudServersOpenStack", "image_api_use_tasks": true, "image_format": "vhd", "floating_ip_source": "None", "secgroup_source": "None", "requires_floating_ip": false, "block_storage_endpoint_override": "https://{region_name}.blockstorage.api.rackspacecloud.com/v2/", "block_storage_api_version": "2", "disable_vendor_agent": { "vm_mode": "hvm", "xenapi_use_agent": "False" }, "has_network": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/switchengines.json0000664000175000017500000000035600000000000024714 0ustar00zuulzuul00000000000000{ "name": "switchengines", "profile": { "auth": { "auth_url": "https://keystone.cloud.switch.ch:5000/v3" }, "regions": [ "LS", "ZH" ], "identity_api_version": "3", "image_format": "raw" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/ultimum.json0000664000175000017500000000034300000000000023532 0ustar00zuulzuul00000000000000{ "name": "ultimum", "profile": { "auth": { "auth_url": "https://console.ultimum-cloud.com:5000/" }, "identity_api_version": "3", "block_storage_api_version": "1", "region-name": "RegionOne" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/unitedstack.json0000664000175000017500000000046000000000000024354 0ustar00zuulzuul00000000000000{ "name": "unitedstack", "profile": { "auth": { "auth_url": "https://identity.api.ustack.com/v3" }, "regions": [ "bj1", "gd1" ], "block_storage_api_version": "1", "identity_api_version": "3", "image_format": "raw", "floating_ip_source": "None" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/vexxhost.json0000664000175000017500000000012300000000000023722 0ustar00zuulzuul00000000000000{ "name": "vexxhost", "profile": { "profile": "https://vexxhost.com" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/config/vendors/zetta.json0000664000175000017500000000033000000000000023161 0ustar00zuulzuul00000000000000{ "name": "zetta", "profile": { "auth": { "auth_url": "https://identity.api.zetta.io/v3" }, "regions": [ "no-osl1" ], "identity_api_version": "3", "dns_api_version": "2" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/connection.py0000664000175000017500000006344700000000000020745 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The :class:`~openstack.connection.Connection` class is the primary interface to the Python SDK. It maintains a context for a connection to a region of a cloud provider. The :class:`~openstack.connection.Connection` has an attribute to access each OpenStack service. At a minimum, the :class:`~openstack.connection.Connection` class needs to be created with a config or the parameters to build one. While the overall system is very flexible, there are four main use cases for different ways to create a :class:`~openstack.connection.Connection`. * Using config settings and keyword arguments as described in :ref:`openstack-config` * Using only keyword arguments passed to the constructor ignoring config files and environment variables. * Using an existing authenticated `keystoneauth1.session.Session`, such as might exist inside of an OpenStack service operational context. * Using an existing :class:`~openstack.config.cloud_region.CloudRegion`. Creating the Connection ----------------------- Using config settings ~~~~~~~~~~~~~~~~~~~~~ For users who want to create a :class:`~openstack.connection.Connection` making use of named clouds in ``clouds.yaml`` files, ``OS_`` environment variables and python keyword arguments, the :func:`openstack.connect` factory function is the recommended way to go: .. code-block:: python import openstack conn = openstack.connect(cloud='example', region_name='earth1') If the application in question is a command line application that should also accept command line arguments, an `argparse.Namespace` can be passed to :func:`openstack.connect` that will have relevant arguments added to it and then subsequently consumed by the constructor: .. code-block:: python import argparse import openstack options = argparse.ArgumentParser(description='Awesome OpenStack App') conn = openstack.connect(options=options) Using only keyword arguments ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the application wants to avoid loading any settings from ``clouds.yaml`` or environment variables, use the :class:`~openstack.connection.Connection` constructor directly. As long as the ``cloud`` argument is omitted or ``None``, the :class:`~openstack.connection.Connection` constructor will not load settings from files or the environment. .. note:: This is a different default behavior than the :func:`~openstack.connect` factory function. In :func:`~openstack.connect` if ``cloud`` is omitted or ``None``, a default cloud will be loaded, defaulting to the ``envvars`` cloud if it exists. .. code-block:: python from openstack import connection conn = connection.Connection( region_name='example-region', auth={ 'auth_url': 'https://auth.example.com', 'username': 'amazing-user', 'password': 'super-secret-password', 'project_id': '33aa1afc-03fe-43b8-8201-4e0d3b4b8ab5', 'user_domain_id': '054abd68-9ad9-418b-96d3-3437bb376703' }, compute_api_version='2', identity_interface='internal', ) Per-service settings as needed by `keystoneauth1.adapter.Adapter` such as ``api_version``, ``service_name``, and ``interface`` can be set, as seen above, by prefixing them with the official ``service-type`` name of the service. ``region_name`` is a setting for the entire :class:`~openstack.config.cloud_region.CloudRegion` and cannot be set per service. From existing authenticated Session ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For applications that already have an authenticated Session, simply passing it to the :class:`~openstack.connection.Connection` constructor is all that is needed: .. code-block:: python from openstack import connection conn = connection.Connection( session=session, region_name='example-region', compute_api_version='2', identity_interface='internal', ) From oslo.conf CONF object ~~~~~~~~~~~~~~~~~~~~~~~~~~ For applications that have an oslo.config ``CONF`` object that has been populated with ``keystoneauth1.loading.register_adapter_conf_options`` in groups named by the OpenStack service's project name, it is possible to construct a Connection with the ``CONF`` object and an authenticated Session. .. note:: This is primarily intended for use by OpenStack services to talk amongst themselves. .. code-block:: python from keystoneauth1 import loading as ks_loading from oslo_config import cfg from openstack import connection CONF = cfg.CONF group = cfg.OptGroup('neutron') ks_loading.register_session_conf_options(CONF, group) ks_loading.register_auth_conf_options(CONF, group) ks_loading.register_adapter_conf_options(CONF, group) CONF() auth = ks_loading.load_auth_from_conf_options(CONF, 'neutron') sess = ks_loading.load_session_from_conf_options(CONF, 'neutron', auth=auth) conn = connection.Connection( session=sess, oslo_conf=CONF, ) This can then be used with an appropriate configuration file. .. code-block:: ini [neutron] region_name = RegionOne auth_strategy = keystone project_domain_name = Default project_name = service user_domain_name = Default password = password username = neutron auth_url = http://10.0.110.85/identity auth_type = password service_metadata_proxy = True default_floating_pool = public You may also wish to configure a service user. As discussed in the `Keystone documentation`__, service users are users with specific roles that identify the user as a service. The use of service users can avoid issues caused by the expiration of the original user's token during long running operations, as a fresh token issued for the service user will always accompany the user's token, which may have expired. .. code-block:: python from keystoneauth1 import loading as ks_loading from keystoneauth1 import service_token from oslo_config import cfg import openstack from openstack import connection CONF = cfg.CONF neutron_group = cfg.OptGroup('neutron') ks_loading.register_session_conf_options(CONF, neutron_group) ks_loading.register_auth_conf_options(CONF, neutron_group) ks_loading.register_adapter_conf_options(CONF, neutron_group) service_group = cfg.OptGroup('service_user') ks_loading.register_session_conf_options(CONF, service_group) ks_loading.register_auth_conf_options(CONF, service_group) CONF() user_auth = ks_loading.load_auth_from_conf_options(CONF, 'neutron') service_auth = ks_loading.load_auth_from_conf_options(CONF, 'service_user') auth = service_token.ServiceTokenAuthWrapper(user_auth, service_auth) sess = ks_loading.load_session_from_conf_options(CONF, 'neutron', auth=auth) conn = connection.Connection( session=sess, oslo_conf=CONF, ) This will necessitate an additional section in the configuration file used. .. code-block:: ini [service_user] auth_strategy = keystone project_domain_name = Default project_name = service user_domain_name = Default password = password username = nova auth_url = http://10.0.110.85/identity auth_type = password .. __: https://docs.openstack.org/keystone/latest/admin/manage-services.html From existing CloudRegion ~~~~~~~~~~~~~~~~~~~~~~~~~ If you already have an :class:`~openstack.config.cloud_region.CloudRegion` you can pass it in instead: .. code-block:: python from openstack import connection import openstack.config config = openstack.config.get_cloud_region( cloud='example', region_name='earth', ) conn = connection.Connection(config=config) Using the Connection -------------------- Services are accessed through an attribute named after the service's official service-type. List ~~~~ An iterator containing a list of all the projects is retrieved in this manner: .. code-block:: python projects = conn.identity.projects() Find or create ~~~~~~~~~~~~~~ If you wanted to make sure you had a network named 'zuul', you would first try to find it and if that fails, you would create it:: network = conn.network.find_network("zuul") if network is None: network = conn.network.create_network(name="zuul") Additional information about the services can be found in the :ref:`service-proxies` documentation. """ import copy import importlib.metadata as importlib_metadata import warnings import keystoneauth1.exceptions import requestsexceptions from openstack import _log from openstack.cloud import _accelerator from openstack.cloud import _baremetal from openstack.cloud import _block_storage from openstack.cloud import _coe from openstack.cloud import _compute from openstack.cloud import _dns from openstack.cloud import _identity from openstack.cloud import _image from openstack.cloud import _network from openstack.cloud import _object_store from openstack.cloud import _orchestration from openstack.cloud import _shared_file_system from openstack import config as _config import openstack.config.cloud_region from openstack import exceptions from openstack import service_description __all__ = [ 'from_config', 'Connection', ] if requestsexceptions.SubjectAltNameWarning: warnings.filterwarnings( 'ignore', category=requestsexceptions.SubjectAltNameWarning ) _logger = _log.setup_logging('openstack') def from_config(cloud=None, config=None, options=None, **kwargs): """Create a Connection using openstack.config :param str cloud: Use the `cloud` configuration details when creating the Connection. :param openstack.config.cloud_region.CloudRegion config: An existing CloudRegion configuration. If no `config` is provided, `openstack.config.OpenStackConfig` will be called, and the provided `name` will be used in determining which cloud's configuration details will be used in creation of the `Connection` instance. :param argparse.Namespace options: Allows direct passing in of options to be added to the cloud config. This does not have to be an actual instance of argparse.Namespace, despite the naming of the `openstack.config.loader.OpenStackConfig.get_one` argument to which it is passed. :rtype: :class:`~openstack.connection.Connection` """ # TODO(mordred) Backwards compat while we transition cloud = kwargs.pop('cloud_name', cloud) config = kwargs.pop('cloud_config', config) if config is None: config = _config.OpenStackConfig().get_one( cloud=cloud, argparse=options, **kwargs ) return Connection(config=config) class Connection( _accelerator.AcceleratorCloudMixin, _baremetal.BaremetalCloudMixin, _block_storage.BlockStorageCloudMixin, _compute.ComputeCloudMixin, _coe.CoeCloudMixin, _dns.DnsCloudMixin, _identity.IdentityCloudMixin, _image.ImageCloudMixin, _network.NetworkCloudMixin, _object_store.ObjectStoreCloudMixin, _orchestration.OrchestrationCloudMixin, _shared_file_system.SharedFileSystemCloudMixin, ): def __init__( self, cloud=None, config=None, session=None, app_name=None, app_version=None, extra_services=None, strict=False, use_direct_get=None, task_manager=None, rate_limit=None, oslo_conf=None, service_types=None, global_request_id=None, strict_proxies=False, pool_executor=None, **kwargs, ): """Create a connection to a cloud. A connection needs information about how to connect, how to authenticate and how to select the appropriate services to use. The recommended way to provide this information is by referencing a named cloud config from an existing `clouds.yaml` file. The cloud name ``envvars`` may be used to consume a cloud configured via ``OS_`` environment variables. A pre-existing :class:`~openstack.config.cloud_region.CloudRegion` object can be passed in lieu of a cloud name, for cases where the user already has a fully formed CloudRegion and just wants to use it. Similarly, if for some reason the user already has a :class:`~keystoneauth1.session.Session` and wants to use it, it may be passed in. :param str cloud: Name of the cloud from config to use. :param config: CloudRegion object representing the config for the region of the cloud in question. :type config: :class:`~openstack.config.cloud_region.CloudRegion` :param session: A session object compatible with :class:`~keystoneauth1.session.Session`. :type session: :class:`~keystoneauth1.session.Session` :param str app_name: Name of the application to be added to User Agent. :param str app_version: Version of the application to be added to User Agent. :param extra_services: List of :class:`~openstack.service_description.ServiceDescription` objects describing services that openstacksdk otherwise does not know about. :param bool use_direct_get: For get methods, make specific REST calls for server-side filtering instead of making list calls and filtering client-side. Default false. :param task_manager: Ignored. Exists for backwards compat during transition. Rate limit parameters should be passed directly to the `rate_limit` parameter. :param rate_limit: Client-side rate limit, expressed in calls per second. The parameter can either be a single float, or it can be a dict with keys as service-type and values as floats expressing the calls per second for that service. Defaults to None, which means no rate-limiting is performed. :param oslo_conf: An oslo.config ``CONF`` object that has been populated with ``keystoneauth1.loading.register_adapter_conf_options`` in groups named by the OpenStack service's project name. :type oslo_conf: :class:`~oslo_config.cfg.ConfigOpts` :param service_types: A list/set of service types this Connection should support. All other service types will be disabled (will error if used). **Currently only supported in conjunction with the ``oslo_conf`` kwarg.** :param strict_proxies: Throw an ``openstack.exceptions.ServiceDiscoveryException`` if the endpoint for a given service doesn't work. This is useful for OpenStack services using sdk to talk to other OpenStack services where it can be expected that the deployer config is correct and errors should be reported immediately. Default false. :type strict_proxies: bool :param global_request_id: A Request-id to send with all interactions. :type global_request_id: str :param pool_executor: A futurist ``Executor`` object to be used for concurrent background activities. Defaults to None in which case a ThreadPoolExecutor will be created if needed. :type pool_executor: :class:`~futurist.Executor` :param kwargs: If a config is not provided, the rest of the parameters provided are assumed to be arguments to be passed to the CloudRegion constructor. """ super().__init__( cloud=cloud, config=config, session=session, app_name=app_name, app_version=app_version, extra_services=extra_services, strict=strict, use_direct_get=use_direct_get, task_manager=task_manager, rate_limit=rate_limit, oslo_conf=oslo_conf, service_types=service_types, global_request_id=global_request_id, strict_proxies=strict_proxies, pool_executor=pool_executor, **kwargs, ) # Allow vendors to provide hooks. They will normally only receive a # connection object and a responsible to register additional services vendor_hook = kwargs.get('vendor_hook') if not vendor_hook and 'vendor_hook' in self.config.config: # Get the one from profile vendor_hook = self.config.config.get('vendor_hook') if vendor_hook: try: # NOTE(gtema): no class name in the hook, plain module:function # Split string hook into module and function try: package_name, function = vendor_hook.rsplit(':') if package_name and function: ep = importlib_metadata.EntryPoint( name='vendor_hook', value=vendor_hook, group='vendor_hook', ) hook = ep.load() hook(self) except ValueError: self.log.warning( 'Hook should be in the entrypoint ' 'module:attribute format' ) except (ImportError, TypeError, AttributeError) as e: self.log.warning( 'Configured hook %s cannot be executed: %s', vendor_hook, e ) # Add additional metrics into the configuration according to the # selected connection. We don't want to deal with overall config in the # proxy, just pass required part. if ( self.config._influxdb_config and 'additional_metric_tags' in self.config.config ): self.config._influxdb_config['additional_metric_tags'] = ( self.config.config['additional_metric_tags'] ) def add_service(self, service): """Add a service to the Connection. Attaches an instance of the :class:`~openstack.proxy.Proxy` class contained in :class:`~openstack.service_description.ServiceDescription`. The :class:`~openstack.proxy.Proxy` will be attached to the `Connection` by its ``service_type`` and by any ``aliases`` that may be specified. :param openstack.service_description.ServiceDescription service: Object describing the service to be attached. As a convenience, if ``service`` is a string it will be treated as a ``service_type`` and a basic :class:`~openstack.service_description.ServiceDescription` will be created. """ # If we don't have a proxy, just instantiate Proxy so that # we get an adapter. if isinstance(service, str): service = service_description.ServiceDescription(service) # Directly invoke descriptor of the ServiceDescription def getter(self): return service.__get__(self, service) # Register the ServiceDescription class (as property) # with every known alias for a "runtime descriptor" for attr_name in service.all_types: setattr( self.__class__, attr_name.replace('-', '_'), property(fget=getter), ) self.config.enable_service(service.service_type) def authorize(self): """Authorize this Connection .. note:: This method is optional. When an application makes a call to any OpenStack service, this method allows you to request a token manually before attempting to do anything else. :returns: A string token. :raises: :class:`~openstack.exceptions.HttpException` if the authorization fails due to reasons like the credentials provided are unable to be authorized or the `auth_type` argument is missing, etc. """ try: return self.session.get_token() except keystoneauth1.exceptions.ClientException as e: raise exceptions.SDKException(e) def connect_as(self, **kwargs): """Make a new Connection object with new auth context. Take the existing settings from the current cloud and construct a new Connection object with some of the auth settings overridden. This is useful for getting an object to perform tasks with as another user, or in the context of a different project. .. code-block:: python conn = openstack.connect(cloud='example') # Work normally servers = conn.list_servers() conn2 = conn.connect_as(username='different-user', password='') # Work as different-user servers = conn2.list_servers() :param kwargs: keyword arguments can contain anything that would normally go in an auth dict. They will override the same settings from the parent cloud as appropriate. Entries that do not want to be overridden can be ommitted. """ if self.config._openstack_config: config = self.config._openstack_config else: # TODO(mordred) Replace this with from_session config = openstack.config.OpenStackConfig( app_name=self.config._app_name, app_version=self.config._app_version, load_yaml_config=False, ) params = copy.deepcopy(self.config.config) # Remove profile from current cloud so that overridding works params.pop('profile', None) # Utility function to help with the stripping below. def pop_keys(params, auth, name_key, id_key): if name_key in auth or id_key in auth: params['auth'].pop(name_key, None) params['auth'].pop(id_key, None) # If there are user, project or domain settings in the incoming auth # dict, strip out both id and name so that a user can say: # cloud.connect_as(project_name='foo') # and have that work with clouds that have a project_id set in their # config. for prefix in ('user', 'project'): if prefix == 'user': name_key = 'username' else: name_key = 'project_name' id_key = f'{prefix}_id' pop_keys(params, kwargs, name_key, id_key) id_key = f'{prefix}_domain_id' name_key = f'{prefix}_domain_name' pop_keys(params, kwargs, name_key, id_key) for key, value in kwargs.items(): params['auth'][key] = value cloud_region = config.get_one(**params) # Attach the discovery cache from the old session so we won't # double discover. cloud_region._discovery_cache = self.session._discovery_cache # Override the cloud name so that logging/location work right cloud_region._name = self.name cloud_region.config['profile'] = self.name # Use self.__class__ so that we return whatever this if, like if it's # a subclass in the case of shade wrapping sdk. return self.__class__(config=cloud_region) def connect_as_project(self, project): """Make a new Connection object with a new project. Take the existing settings from the current cloud and construct a new Connection object with the project settings overridden. This is useful for getting an object to perform tasks with as another user, or in the context of a different project. .. code-block:: python cloud = openstack.connect(cloud='example') # Work normally servers = cloud.list_servers() cloud2 = cloud.connect_as_project('different-project') # Work in different-project servers = cloud2.list_servers() :param project: Either a project name or a project dict as returned by ``list_projects``. """ auth = {} if isinstance(project, dict): auth['project_id'] = project.get('id') auth['project_name'] = project.get('name') if project.get('domain_id'): auth['project_domain_id'] = project['domain_id'] else: auth['project_name'] = project return self.connect_as(**auth) def endpoint_for(self, service_type, interface=None, region_name=None): """Return the endpoint for a given service. Respects config values for Connection, including ``*_endpoint_override``. For direct values from the catalog regardless of overrides, see :meth:`~openstack.config.cloud_region.CloudRegion.get_endpoint_from_catalog` :param service_type: Service Type of the endpoint to search for. :param interface: Interface of the endpoint to search for. Optional, defaults to the configured value for interface for this Connection. :param region_name: Region Name of the endpoint to search for. Optional, defaults to the configured value for region_name for this Connection. :returns: The endpoint of the service, or None if not found. """ endpoint_override = self.config.get_endpoint(service_type) if endpoint_override: return endpoint_override return self.config.get_endpoint_from_catalog( service_type=service_type, interface=interface, region_name=region_name, ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.257319 openstacksdk-4.0.0/openstack/container_infrastructure_management/0000775000175000017500000000000000000000000025534 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/container_infrastructure_management/__init__.py0000664000175000017500000000000000000000000027633 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=openstacksdk-4.0.0/openstack/container_infrastructure_management/container_infrastructure_management_service.py 22 mtime=1725296355.0 openstacksdk-4.0.0/openstack/container_infrastructure_management/container_infrastructure_management0000664000175000017500000000155100000000000034777 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.container_infrastructure_management.v1 import _proxy from openstack import service_description class ContainerInfrastructureManagementService( service_description.ServiceDescription, ): """The container infrastructure management service.""" supported_versions = { '1': _proxy.Proxy, } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.257319 openstacksdk-4.0.0/openstack/container_infrastructure_management/v1/0000775000175000017500000000000000000000000026062 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/container_infrastructure_management/v1/__init__.py0000664000175000017500000000000000000000000030161 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/container_infrastructure_management/v1/_proxy.py0000664000175000017500000002431200000000000027756 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.container_infrastructure_management.v1 import ( cluster as _cluster, ) from openstack.container_infrastructure_management.v1 import ( cluster_certificate as _cluster_cert, ) from openstack.container_infrastructure_management.v1 import ( cluster_template as _cluster_template, ) from openstack.container_infrastructure_management.v1 import ( service as _service, ) from openstack import proxy class Proxy(proxy.Proxy): _resource_registry = { "cluster": _cluster.Cluster, "cluster_template": _cluster_template.ClusterTemplate, "service": _service.Service, } def create_cluster(self, **attrs): """Create a new cluster from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.container_infrastructure_management.v1.cluster.Cluster`, comprised of the properties on the Cluster class. :returns: The results of cluster creation :rtype: :class:`~openstack.container_infrastructure_management.v1.cluster.Cluster` """ return self._create(_cluster.Cluster, **attrs) def delete_cluster(self, cluster, ignore_missing=True): """Delete a cluster :param cluster: The value can be either the ID of a cluster or a :class:`~openstack.container_infrastructure_management.v1.cluster.Cluster` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the cluster does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent cluster. :returns: ``None`` """ self._delete(_cluster.Cluster, cluster, ignore_missing=ignore_missing) def find_cluster(self, name_or_id, ignore_missing=True): """Find a single cluster :param name_or_id: The name or ID of a cluster. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.container_infrastructure_management.v1.cluster.Cluster` or None """ return self._find( _cluster.Cluster, name_or_id, ignore_missing=ignore_missing, ) def get_cluster(self, cluster): """Get a single cluster :param cluster: The value can be the ID of a cluster or a :class:`~openstack.container_infrastructure_management.v1.cluster.Cluster` instance. :returns: One :class:`~openstack.container_infrastructure_management.v1.cluster.Cluster` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_cluster.Cluster, cluster) def clusters(self, **query): """Return a generator of clusters :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of cluster objects :rtype: :class:`~openstack.container_infrastructure_management.v1.cluster.Cluster` """ return self._list(_cluster.Cluster, **query) def update_cluster(self, cluster, **attrs): """Update a cluster :param cluster: Either the id of a cluster or a :class:`~openstack.container_infrastructure_management.v1.cluster.Cluster` instance. :param attrs: The attributes to update on the cluster represented by ``cluster``. :returns: The updated cluster :rtype: :class:`~openstack.container_infrastructure_management.v1.cluster.Cluster` """ return self._update(_cluster.Cluster, cluster, **attrs) # ============== Cluster Templates ============== def create_cluster_template(self, **attrs): """Create a new cluster_template from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate`, comprised of the properties on the ClusterTemplate class. :returns: The results of cluster_template creation :rtype: :class:`~openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate` """ return self._create(_cluster_template.ClusterTemplate, **attrs) def delete_cluster_template(self, cluster_template, ignore_missing=True): """Delete a cluster_template :param cluster_template: The value can be either the ID of a cluster_template or a :class:`~openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the cluster_template does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent cluster_template. :returns: ``None`` """ self._delete( _cluster_template.ClusterTemplate, cluster_template, ignore_missing=ignore_missing, ) def find_cluster_template(self, name_or_id, ignore_missing=True): """Find a single cluster_template :param name_or_id: The name or ID of a cluster_template. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate` or None """ return self._find( _cluster_template.ClusterTemplate, name_or_id, ignore_missing=ignore_missing, ) def get_cluster_template(self, cluster_template): """Get a single cluster_template :param cluster_template: The value can be the ID of a cluster_template or a :class:`~openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate` instance. :returns: One :class:`~openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_cluster_template.ClusterTemplate, cluster_template) def cluster_templates(self, **query): """Return a generator of cluster_templates :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of cluster_template objects :rtype: :class:`~openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate` """ return self._list(_cluster_template.ClusterTemplate, **query) def update_cluster_template(self, cluster_template, **attrs): """Update a cluster_template :param cluster_template: Either the id of a cluster_template or a :class:`~openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate` instance. :param attrs: The attributes to update on the cluster_template represented by ``cluster_template``. :returns: The updated cluster_template :rtype: :class:`~openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate` """ return self._update( _cluster_template.ClusterTemplate, cluster_template, **attrs ) # ============== Cluster Certificates ============== def create_cluster_certificate(self, **attrs): """Create a new cluster_certificate from CSR :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.container_infrastructure_management.v1.cluster_certificate.ClusterCertificate`, comprised of the properties on the ClusterCertificate class. :returns: The results of cluster_certificate creation :rtype: :class:`~openstack.container_infrastructure_management.v1.cluster_certificate.ClusterCertificate` """ return self._create(_cluster_cert.ClusterCertificate, **attrs) def get_cluster_certificate(self, cluster_certificate): """Get a single cluster_certificate :param cluster_certificate: The value can be the ID of a cluster_certificate or a :class:`~openstack.container_infrastructure_management.v1.cluster_certificate.ClusterCertificate` instance. :returns: One :class:`~openstack.container_infrastructure_management.v1.cluster_certificate.ClusterCertificate` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_cluster_cert.ClusterCertificate, cluster_certificate) # ============== Services ============== def services(self): """Return a generator of services :returns: A generator of service objects :rtype: :class:`~openstack.container_infrastructure_management.v1.service.Service` """ return self._list(_service.Service) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/container_infrastructure_management/v1/cluster.py0000664000175000017500000001770400000000000030126 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class Cluster(resource.Resource): resources_key = 'clusters' base_path = '/clusters' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_patch = True commit_method = 'PATCH' commit_jsonpatch = True #: The endpoint URL of COE API exposed to end-users. api_address = resource.Body('api_address') #: The UUID of the cluster template. cluster_template_id = resource.Body('cluster_template_id') #: Version info of chosen COE in bay/cluster for helping client in picking #: the right version of client. coe_version = resource.Body('coe_version') #: The timeout for cluster creation in minutes. The value expected is a #: positive integer. If the timeout is reached during cluster creation #: process, the operation will be aborted and the cluster status will be #: set to CREATE_FAILED. Defaults to 60. create_timeout = resource.Body('create_timeout', type=int) #: The date and time when the resource was created. The date and time stamp #: format is ISO 8601:: #: #: CCYY-MM-DDThh:mm:ss±hh:mm #: #: For example, `2015-08-27T09:49:58-05:00`. The ±hh:mm value, if included, #: is the time zone as an offset from UTC. created_at = resource.Body('created_at') #: The custom discovery url for node discovery. This is used by the COE to #: discover the servers that have been created to host the containers. The #: actual discovery mechanism varies with the COE. In some cases, the #: service fills in the server info in the discovery service. In other #: cases,if the discovery_url is not specified, the service will use the #: public discovery service at https://discovery.etcd.io. In this case, the #: service will generate a unique url here for each bay and store the info #: for the servers. discovery_url = resource.Body('discovery_url') #: The name or ID of the network to provide connectivity to the internal #: network for the bay/cluster. fixed_network = resource.Body('fixed_network') #: The fixed subnet to use when allocating network addresses for nodes in #: bay/cluster. fixed_subnet = resource.Body('fixed_subnet') #: The flavor name or ID to use when booting the node servers. Defaults to #: m1.small. flavor_id = resource.Body('flavor_id') #: Whether to enable using the floating IP of cloud provider. Some cloud #: providers use floating IPs while some use public IPs. When set to true, #: floating IPs will be used. If this value is not provided, the value of #: ``floating_ip_enabled`` provided in the template will be used. is_floating_ip_enabled = resource.Body('floating_ip_enabled', type=bool) #: Whether to enable the master load balancer. Since multiple masters may #: exist in a bay/cluster, a Neutron load balancer is created to provide #: the API endpoint for the bay/cluster and to direct requests to the #: masters. In some cases, such as when the LBaaS service is not available, #: this option can be set to false to create a bay/cluster without the load #: balancer. In this case, one of the masters will serve as the API #: endpoint. The default is true, i.e. to create the load balancer for the #: bay. is_master_lb_enabled = resource.Body('master_lb_enabled', type=bool) #: The name of the SSH keypair to configure in the bay/cluster servers for #: SSH access. Users will need the key to be able to ssh to the servers in #: the bay/cluster. The login name is specific to the bay/cluster driver. #: For example, with fedora-atomic image the default login name is fedora. keypair = resource.Body('keypair') #: Arbitrary labels. The accepted keys and valid values are defined in the #: bay/cluster drivers. They are used as a way to pass additional #: parameters that are specific to a bay/cluster driver. labels = resource.Body('labels', type=dict) #: A list of floating IPs of all master nodes. master_addresses = resource.Body('master_addresses', type=list) #: The number of servers that will serve as master for the bay/cluster. Set #: to more than 1 master to enable High Availability. If the option #: master-lb-enabled is specified in the baymodel/cluster template, the #: master servers will be placed in a load balancer pool. Defaults to 1. master_count = resource.Body('master_count', type=int) #: The flavor of the master node for this baymodel/cluster template. master_flavor_id = resource.Body('master_flavor_id') #: Name of the resource. name = resource.Body('name') #: The number of servers that will serve as node in the bay/cluster. #: Defaults to 1. node_count = resource.Body('node_count', type=int) #: A list of floating IPs of all servers that serve as nodes. node_addresses = resource.Body('node_addresses', type=list) #: The reference UUID of orchestration stack from Heat orchestration #: service. stack_id = resource.Body('stack_id') #: The current state of the bay/cluster. status = resource.Body('status') #: The reason of bay/cluster current status. status_reason = resource.Body('reason') #: The date and time when the resource was updated. The date and time stamp #: format is ISO 8601:: #: #: CCYY-MM-DDThh:mm:ss±hh:mm #: #: For example, `2015-08-27T09:49:58-05:00`. The ±hh:mm value, if included, #: is the time zone as an offset from UTC. If the updated_at date and time #: stamp is not set, its value is null. updated_at = resource.Body('updated_at') #: The UUID of the cluster. uuid = resource.Body('uuid', alternate_id=True) def resize(self, session, *, node_count, nodes_to_remove=None): """Resize the cluster. :param node_count: The number of servers that will serve as node in the bay/cluster. The default is 1. :param nodes_to_remove: The server ID list will be removed if downsizing the cluster. :returns: The UUID of the resized cluster. :raises: :exc:`~openstack.exceptions.NotFoundException` if the resource was not found. """ url = utils.urljoin(Cluster.base_path, self.id, 'actions', 'resize') headers = {'Accept': ''} body = { 'node_count': node_count, 'nodes_to_remove': nodes_to_remove, } response = session.post(url, json=body, headers=headers) exceptions.raise_from_response(response) return response['uuid'] def upgrade(self, session, *, cluster_template, max_batch_size=None): """Upgrade the cluster. :param cluster_template: The UUID of the cluster template. :param max_batch_size: The max batch size each time when doing upgrade. The default is 1 :returns: The UUID of the updated cluster. :raises: :exc:`~openstack.exceptions.NotFoundException` if the resource was not found. """ url = utils.urljoin(Cluster.base_path, self.id, 'actions', 'upgrade') headers = {'Accept': ''} body = { 'cluster_template': cluster_template, 'max_batch_size': max_batch_size, } response = session.post(url, json=body, headers=headers) exceptions.raise_from_response(response) return response['uuid'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/container_infrastructure_management/v1/cluster_certificate.py0000664000175000017500000000210500000000000032455 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ClusterCertificate(resource.Resource): base_path = '/certificates' # capabilities allow_create = True allow_list = False allow_fetch = True #: The UUID of the bay. bay_uuid = resource.Body('bay_uuid') #: The UUID of the cluster. cluster_uuid = resource.Body('cluster_uuid', alternate_id=True) #: Certificate Signing Request (CSR) for authenticating client key. csr = resource.Body('csr') #: CA certificate for the bay/cluster. pem = resource.Body('pem') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/container_infrastructure_management/v1/cluster_template.py0000664000175000017500000001305400000000000032013 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ClusterTemplate(resource.Resource): resources_key = 'clustertemplates' base_path = '/clustertemplates' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_patch = True commit_method = 'PATCH' commit_jsonpatch = True #: The exposed port of COE API server. apiserver_port = resource.Body('apiserver_port', type=int) #: Display the attribute os_distro defined as appropriate metadata in image #: for the bay/cluster driver. cluster_distro = resource.Body('cluster_distro') #: Specify the Container Orchestration Engine to use. Supported COEs #: include kubernetes, swarm, mesos. coe = resource.Body('coe') #: The date and time when the resource was created. created_at = resource.Body('created_at') #: The name of a driver to manage the storage for the images and the #: container’s writable layer. docker_storage_driver = resource.Body('docker_storage_driver') #: The size in GB for the local storage on each server for the Docker #: daemon to cache the images and host the containers. docker_volume_size = resource.Body('docker_volume_size', type=int) #: The DNS nameserver for the servers and containers in the bay/cluster to #: use. dns_nameserver = resource.Body('dns_nameserver') #: The name or network ID of a Neutron network to provide connectivity to #: the external internet for the bay/cluster. external_network_id = resource.Body('external_network_id') #: The name or network ID of a Neutron network to provide connectivity to #: the internal network for the bay/cluster. fixed_network = resource.Body('fixed_network') #: Fixed subnet that are using to allocate network address for nodes in #: bay/cluster. fixed_subnet = resource.Body('fixed_subnet') #: The nova flavor ID or name for booting the node servers. flavor_id = resource.Body('flavor_id') #: The IP address for a proxy to use when direct http access #: from the servers to sites on the external internet is blocked. #: This may happen in certain countries or enterprises, and the #: proxy allows the servers and containers to access these sites. #: The format is a URL including a port number. The default is #: None. http_proxy = resource.Body('http_proxy') #: The IP address for a proxy to use when direct https access from the #: servers to sites on the external internet is blocked. https_proxy = resource.Body('https_proxy') #: The name or UUID of the base image in Glance to boot the servers for the #: bay/cluster. image_id = resource.Body('image_id') #: The URL pointing to users’s own private insecure docker #: registry to deploy and run docker containers. insecure_registry = resource.Body('insecure_registry') #: Whether enable or not using the floating IP of cloud provider. is_floating_ip_enabled = resource.Body('floating_ip_enabled') #: Indicates whether the ClusterTemplate is hidden or not. is_hidden = resource.Body('hidden', type=bool) #: this option can be set to false to create a bay/cluster without the load #: balancer. is_master_lb_enabled = resource.Body('master_lb_enabled', type=bool) #: Specifying this parameter will disable TLS so that users can access the #: COE endpoints without a certificate. is_tls_disabled = resource.Body('tls_disabled', type=bool) #: Setting this flag makes the baymodel/cluster template public and #: accessible by other users. is_public = resource.Body('public', type=bool) #: This option provides an alternative registry based on the Registry V2 is_registry_enabled = resource.Body('registry_enabled', type=bool) #: The name of the SSH keypair to configure in the bay/cluster servers for #: ssh access. keypair_id = resource.Body('keypair_id') #: Arbitrary labels. The accepted keys and valid values are defined in the #: bay/cluster drivers. They are used as a way to pass additional #: parameters that are specific to a bay/cluster driver. labels = resource.Body('labels', type=dict) #: The flavor of the master node for this baymodel/cluster template. master_flavor_id = resource.Body('master_flavor_id') #: The name of a network driver for providing the networks for the #: containers. network_driver = resource.Body('network_driver') #: When a proxy server is used, some sites should not go through the proxy #: and should be accessed normally. no_proxy = resource.Body('no_proxy') #: The servers in the bay/cluster can be vm or baremetal. server_type = resource.Body('server_type') #: The date and time when the resource was updated. updated_at = resource.Body('updated_at') #: The UUID of the cluster template. uuid = resource.Body('uuid', alternate_id=True) #: The name of a volume driver for managing the persistent storage for the #: containers. volume_driver = resource.Body('volume_driver') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/container_infrastructure_management/v1/service.py0000664000175000017500000000261000000000000030073 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Service(resource.Resource): resources_key = 'mservices' base_path = '/mservices' # capabilities allow_list = True #: The name of the binary form of the Magnum service. binary = resource.Body('binary') #: The date and time when the resource was created. created_at = resource.Body('created_at') #: The disable reason of the service, null if the service is enabled or #: disabled without reason provided. disabled_reason = resource.Body('disabled_reason') #: The host for the service. host = resource.Body('host') #: The total number of report. report_count = resource.Body('report_count') #: The current state of Magnum services. state = resource.Body('state') #: The date and time when the resource was updated. updated_at = resource.Body('updated_at') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.257319 openstacksdk-4.0.0/openstack/database/0000775000175000017500000000000000000000000017762 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/database/__init__.py0000664000175000017500000000000000000000000022061 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/database/database_service.py0000664000175000017500000000142300000000000023620 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.database.v1 import _proxy from openstack import service_description class DatabaseService(service_description.ServiceDescription): """The database service.""" supported_versions = { '1': _proxy.Proxy, } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.261321 openstacksdk-4.0.0/openstack/database/v1/0000775000175000017500000000000000000000000020310 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/database/v1/__init__.py0000664000175000017500000000000000000000000022407 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/database/v1/_proxy.py0000664000175000017500000003251000000000000022203 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.database.v1 import database as _database from openstack.database.v1 import flavor as _flavor from openstack.database.v1 import instance as _instance from openstack.database.v1 import user as _user from openstack import proxy class Proxy(proxy.Proxy): _resource_registry = { "database": _database.Database, "flavor": _flavor.Flavor, "instance": _instance.Instance, "user": _user.User, } def create_database(self, instance, **attrs): """Create a new database from attributes :param instance: This can be either the ID of an instance or a :class:`~openstack.database.v1.instance.Instance` :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.database.v1.database.Database`, comprised of the properties on the Database class. :returns: The results of server creation :rtype: :class:`~openstack.database.v1.database.Database` """ instance = self._get_resource(_instance.Instance, instance) return self._create( _database.Database, instance_id=instance.id, **attrs ) def delete_database(self, database, instance=None, ignore_missing=True): """Delete a database :param database: The value can be either the ID of a database or a :class:`~openstack.database.v1.database.Database` instance. :param instance: This parameter needs to be specified when an ID is given as `database`. It can be either the ID of an instance or a :class:`~openstack.database.v1.instance.Instance` :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the database does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent database. :returns: ``None`` """ instance_id = self._get_uri_attribute( database, instance, "instance_id" ) self._delete( _database.Database, database, instance_id=instance_id, ignore_missing=ignore_missing, ) def find_database(self, name_or_id, instance, ignore_missing=True): """Find a single database :param name_or_id: The name or ID of a database. :param instance: This can be either the ID of an instance or a :class:`~openstack.database.v1.instance.Instance` :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.database.v1.database.Database` or None """ instance = self._get_resource(_instance.Instance, instance) return self._find( _database.Database, name_or_id, instance_id=instance.id, ignore_missing=ignore_missing, ) def databases(self, instance, **query): """Return a generator of databases :param instance: This can be either the ID of an instance or a :class:`~openstack.database.v1.instance.Instance` instance that the interface belongs to. :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of database objects :rtype: :class:`~openstack.database.v1.database.Database` """ instance = self._get_resource(_instance.Instance, instance) return self._list(_database.Database, instance_id=instance.id, **query) def get_database(self, database, instance=None): """Get a single database :param instance: This parameter needs to be specified when an ID is given as `database`. It can be either the ID of an instance or a :class:`~openstack.database.v1.instance.Instance` :param database: The value can be the ID of a database or a :class:`~openstack.database.v1.database.Database` instance. :returns: One :class:`~openstack.database.v1.database.Database` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_database.Database, database) def find_flavor(self, name_or_id, ignore_missing=True): """Find a single flavor :param name_or_id: The name or ID of a flavor. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.database.v1.flavor.Flavor` or None """ return self._find( _flavor.Flavor, name_or_id, ignore_missing=ignore_missing ) def get_flavor(self, flavor): """Get a single flavor :param flavor: The value can be the ID of a flavor or a :class:`~openstack.database.v1.flavor.Flavor` instance. :returns: One :class:`~openstack.database.v1.flavor.Flavor` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_flavor.Flavor, flavor) def flavors(self, **query): """Return a generator of flavors :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of flavor objects :rtype: :class:`~openstack.database.v1.flavor.Flavor` """ return self._list(_flavor.Flavor, **query) def create_instance(self, **attrs): """Create a new instance from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.database.v1.instance.Instance`, comprised of the properties on the Instance class. :returns: The results of server creation :rtype: :class:`~openstack.database.v1.instance.Instance` """ return self._create(_instance.Instance, **attrs) def delete_instance(self, instance, ignore_missing=True): """Delete an instance :param instance: The value can be either the ID of an instance or a :class:`~openstack.database.v1.instance.Instance` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the instance does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent instance. :returns: ``None`` """ self._delete( _instance.Instance, instance, ignore_missing=ignore_missing ) def find_instance(self, name_or_id, ignore_missing=True): """Find a single instance :param name_or_id: The name or ID of a instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.database.v1.instance.Instance` or None """ return self._find( _instance.Instance, name_or_id, ignore_missing=ignore_missing ) def get_instance(self, instance): """Get a single instance :param instance: The value can be the ID of an instance or a :class:`~openstack.database.v1.instance.Instance` instance. :returns: One :class:`~openstack.database.v1.instance.Instance` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_instance.Instance, instance) def instances(self, **query): """Return a generator of instances :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of instance objects :rtype: :class:`~openstack.database.v1.instance.Instance` """ return self._list(_instance.Instance, **query) def update_instance(self, instance, **attrs): """Update a instance :param instance: Either the id of a instance or a :class:`~openstack.database.v1.instance.Instance` instance. :param attrs: The attributes to update on the instance represented by ``instance``. :returns: The updated instance :rtype: :class:`~openstack.database.v1.instance.Instance` """ return self._update(_instance.Instance, instance, **attrs) def create_user(self, instance, **attrs): """Create a new user from attributes :param instance: This can be either the ID of an instance or a :class:`~openstack.database.v1.instance.Instance` :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.database.v1.user.User`, comprised of the properties on the User class. :returns: The results of server creation :rtype: :class:`~openstack.database.v1.user.User` """ instance = self._get_resource(_instance.Instance, instance) return self._create(_user.User, instance_id=instance.id, **attrs) def delete_user(self, user, instance=None, ignore_missing=True): """Delete a user :param user: The value can be either the ID of a user or a :class:`~openstack.database.v1.user.User` instance. :param instance: This parameter needs to be specified when an ID is given as `user`. It can be either the ID of an instance or a :class:`~openstack.database.v1.instance.Instance` :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the user does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent user. :returns: ``None`` """ instance = self._get_resource(_instance.Instance, instance) self._delete( _user.User, user, ignore_missing=ignore_missing, instance_id=instance.id, ) def find_user(self, name_or_id, instance, ignore_missing=True): """Find a single user :param name_or_id: The name or ID of a user. :param instance: This can be either the ID of an instance or a :class:`~openstack.database.v1.instance.Instance` :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.database.v1.user.User` or None """ instance = self._get_resource(_instance.Instance, instance) return self._find( _user.User, name_or_id, instance_id=instance.id, ignore_missing=ignore_missing, ) def users(self, instance, **query): """Return a generator of users :param instance: This can be either the ID of an instance or a :class:`~openstack.database.v1.instance.Instance` :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of user objects :rtype: :class:`~openstack.database.v1.user.User` """ instance = self._get_resource(_instance.Instance, instance) return self._list(_user.User, instance_id=instance.id, **query) def get_user(self, user, instance=None): """Get a single user :param user: The value can be the ID of a user or a :class:`~openstack.database.v1.user.User` instance. :param instance: This parameter needs to be specified when an ID is given as `database`. It can be either the ID of an instance or a :class:`~openstack.database.v1.instance.Instance` :returns: One :class:`~openstack.database.v1.user.User` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ instance = self._get_resource(_instance.Instance, instance) return self._get(_user.User, user) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/database/v1/database.py0000664000175000017500000000241300000000000022426 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Database(resource.Resource): resource_key = 'database' resources_key = 'databases' base_path = '/instances/%(instance_id)s/databases' # capabilities allow_create = True allow_delete = True allow_list = True # Properties #: Set of symbols and encodings. The default character set is ``utf8``. character_set = resource.Body('character_set') #: Set of rules for comparing characters in a character set. #: The default value for collate is ``utf8_general_ci``. collate = resource.Body('collate') #: The ID of the instance instance_id = resource.URI('instance_id') #: The name of the database name = resource.Body('name', alternate_id=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/database/v1/flavor.py0000664000175000017500000000174100000000000022156 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Flavor(resource.Resource): resource_key = 'flavor' resources_key = 'flavors' base_path = '/flavors' # capabilities allow_list = True allow_fetch = True # Properties #: Links associated with the flavor links = resource.Body('links') #: The name of the flavor name = resource.Body('name') #: The size in MB of RAM the flavor has ram = resource.Body('ram') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/database/v1/instance.py0000664000175000017500000000736300000000000022477 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource from openstack import utils class Instance(resource.Resource): resource_key = 'instance' resources_key = 'instances' base_path = '/instances' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: The flavor of the instance flavor = resource.Body('flavor') #: Links associated with the instance links = resource.Body('links') #: The name of the instance name = resource.Body('name') #: The status of the instance status = resource.Body('status') #: The size of the volume volume = resource.Body('volume') #: A dictionary of datastore details, often including 'type' and 'version' #: keys datastore = resource.Body('datastore', type=dict) #: The ID of this instance id = resource.Body('id') #: The region this instance resides in region = resource.Body('region') #: The name of the host hostname = resource.Body('hostname') #: The timestamp when this instance was created created_at = resource.Body('created') #: The timestamp when this instance was updated updated_at = resource.Body('updated') def enable_root_user(self, session): """Enable login for the root user. This operation enables login from any host for the root user and provides the user with a generated root password. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :returns: A dictionary with keys ``name`` and ``password`` specifying the login credentials. """ url = utils.urljoin(self.base_path, self.id, 'root') resp = session.post( url, ) return resp.json()['user'] def is_root_enabled(self, session): """Determine if root is enabled on an instance. Determine if root is enabled on this particular instance. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :returns: ``True`` if root user is enabled for a specified database instance or ``False`` otherwise. """ url = utils.urljoin(self.base_path, self.id, 'root') resp = session.get( url, ) return resp.json()['rootEnabled'] def restart(self, session): """Restart the database instance :returns: ``None`` """ body = {'restart': None} url = utils.urljoin(self.base_path, self.id, 'action') session.post(url, json=body) def resize(self, session, flavor_reference): """Resize the database instance :returns: ``None`` """ body = {'resize': {'flavorRef': flavor_reference}} url = utils.urljoin(self.base_path, self.id, 'action') session.post(url, json=body) def resize_volume(self, session, volume_size): """Resize the volume attached to the instance :returns: ``None`` """ body = {'resize': {'volume': volume_size}} url = utils.urljoin(self.base_path, self.id, 'action') session.post(url, json=body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/database/v1/user.py0000664000175000017500000000326400000000000021645 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource from openstack import utils class User(resource.Resource): resource_key = 'user' resources_key = 'users' base_path = '/instances/%(instance_id)s/users' # capabilities allow_create = True allow_delete = True allow_list = True instance_id = resource.URI('instance_id') # Properties #: Databases the user has access to databases = resource.Body('databases') #: The name of the user name = resource.Body('name', alternate_id=True) #: The password of the user password = resource.Body('password') def _prepare_request( self, requires_id=True, prepend_key=True, base_path=None, **kwargs ): """Prepare a request for the database service's create call User.create calls require the resources_key. The base_prepare_request would insert the resource_key (singular) """ body = {self.resources_key: self._body.dirty} if base_path is None: base_path = self.base_path uri = base_path % self._uri.attributes uri = utils.urljoin(uri, self.id) return resource._Request(uri, body, None) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.261321 openstacksdk-4.0.0/openstack/dns/0000775000175000017500000000000000000000000017002 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/dns/__init__.py0000664000175000017500000000000000000000000021101 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/dns/dns_service.py0000664000175000017500000000140400000000000021657 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.dns.v2 import _proxy from openstack import service_description class DnsService(service_description.ServiceDescription): """The DNS service.""" supported_versions = { '2': _proxy.Proxy, } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.265323 openstacksdk-4.0.0/openstack/dns/v2/0000775000175000017500000000000000000000000017331 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/dns/v2/__init__.py0000664000175000017500000000000000000000000021430 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/dns/v2/_base.py0000664000175000017500000001155400000000000020762 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty import urllib.parse from openstack import exceptions from openstack import resource class Resource(resource.Resource): @classmethod def find(cls, session, name_or_id, ignore_missing=True, **params): """Find a resource by its name or id. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param name_or_id: This resource's identifier, if needed by the request. The default is ``None``. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict params: Any additional parameters to be passed into underlying methods, such as to :meth:`~openstack.resource.Resource.existing` in order to pass on URI parameters. :return: The :class:`Resource` object matching the given name or id or None if nothing matches. :raises: :class:`openstack.exceptions.DuplicateResource` if more than one resource is found for this request. :raises: :class:`openstack.exceptions.NotFoundException` if nothing is found and ignore_missing is ``False``. """ session = cls._get_session(session) # Try to short-circuit by looking directly for a matching ID. try: match = cls.existing( id=name_or_id, connection=session._get_connection(), **params ) return match.fetch(session) except exceptions.SDKException: # DNS may return 400 when we try to do GET with name pass if ( 'name' in cls._query_mapping._mapping.keys() and 'name' not in params ): params['name'] = name_or_id data = cls.list(session, **params) result = cls._get_one_match(name_or_id, data) if result is not None: return result if ignore_missing: return None raise exceptions.NotFoundException( f"No {cls.__name__} found for {name_or_id}" ) @classmethod def list( cls, session, project_id=None, all_projects=None, **params, ): headers: ty.Union[ty.Union[ty.Dict[str, str], None]] = ( {} if project_id or all_projects else None ) if headers is not None: if project_id: headers["x-auth-sudo-project-id"] = str(project_id) if all_projects: headers["x-auth-all-projects"] = str(all_projects) return super().list(session=session, headers=headers, **params) @classmethod def _get_next_link(cls, uri, response, data, marker, limit, total_yielded): next_link = None params: ty.Dict[str, ty.Union[ty.List[str], str]] = {} if isinstance(data, dict): links = data.get('links') if links: next_link = links.get('next') total = data.get('metadata', {}).get('total_count') if total: # We have a kill switch total_count = int(total) if total_count <= total_yielded: return None, params # Parse params from Link (next page URL) into params. # This prevents duplication of query parameters that with large # number of pages result in HTTP 414 error eventually. if next_link: parts = urllib.parse.urlparse(next_link) query_params = urllib.parse.parse_qs(parts.query) params.update(query_params) next_link = urllib.parse.urljoin(next_link, parts.path) # If we still have no link, and limit was given and is non-zero, # and the number of records yielded equals the limit, then the user # is playing pagination ball so we should go ahead and try once more. if not next_link and limit: next_link = uri params['marker'] = marker params['limit'] = limit return next_link, params ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/dns/v2/_proxy.py0000664000175000017500000006565200000000000021241 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.dns.v2 import floating_ip as _fip from openstack.dns.v2 import recordset as _rs from openstack.dns.v2 import zone as _zone from openstack.dns.v2 import zone_export as _zone_export from openstack.dns.v2 import zone_import as _zone_import from openstack.dns.v2 import zone_share as _zone_share from openstack.dns.v2 import zone_transfer as _zone_transfer from openstack import proxy class Proxy(proxy.Proxy): _resource_registry = { "floating_ip": _fip.FloatingIP, "recordset": _rs.Recordset, "zone": _zone.Zone, "zone_export": _zone_export.ZoneExport, "zone_import": _zone_import.ZoneImport, "zone_share": _zone_share.ZoneShare, "zone_transfer_request": _zone_transfer.ZoneTransferRequest, } # ======== Zones ======== def zones(self, **query): """Retrieve a generator of zones :param dict query: Optional query parameters to be sent to limit the resources being returned. * `name`: Zone Name field. * `type`: Zone Type field. * `email`: Zone email field. * `status`: Status of the zone. * `ttl`: TTL field filter.abs * `description`: Zone description field filter. :returns: A generator of zone :class:`~openstack.dns.v2.zone.Zone` instances. """ return self._list(_zone.Zone, **query) def create_zone(self, **attrs): """Create a new zone from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.dns.v2.zone.Zone`, comprised of the properties on the Zone class. :returns: The results of zone creation. :rtype: :class:`~openstack.dns.v2.zone.Zone` """ return self._create(_zone.Zone, prepend_key=False, **attrs) def get_zone(self, zone): """Get a zone :param zone: The value can be the ID of a zone or a :class:`~openstack.dns.v2.zone.Zone` instance. :returns: Zone instance. :rtype: :class:`~openstack.dns.v2.zone.Zone` """ return self._get(_zone.Zone, zone) def delete_zone(self, zone, ignore_missing=True, delete_shares=False): """Delete a zone :param zone: The value can be the ID of a zone or a :class:`~openstack.dns.v2.zone.Zone` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the zone does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent zone. :param bool delete_shares: When True, delete the zone shares along with the zone. :returns: Zone been deleted :rtype: :class:`~openstack.dns.v2.zone.Zone` """ return self._delete( _zone.Zone, zone, ignore_missing=ignore_missing, delete_shares=delete_shares, ) def update_zone(self, zone, **attrs): """Update zone attributes :param zone: The id or an instance of :class:`~openstack.dns.v2.zone.Zone`. :param dict attrs: attributes for update on :class:`~openstack.dns.v2.zone.Zone`. :rtype: :class:`~openstack.dns.v2.zone.Zone` """ return self._update(_zone.Zone, zone, **attrs) def find_zone(self, name_or_id, ignore_missing=True): """Find a single zone :param name_or_id: The name or ID of a zone :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the zone does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent zone. :returns: :class:`~openstack.dns.v2.zone.Zone` """ return self._find( _zone.Zone, name_or_id, ignore_missing=ignore_missing ) def abandon_zone(self, zone, **attrs): """Abandon Zone :param zone: The value can be the ID of a zone to be abandoned or a :class:`~openstack.dns.v2.zone_export.ZoneExport` instance. :returns: None """ zone = self._get_resource(_zone.Zone, zone) return zone.abandon(self) def xfr_zone(self, zone, **attrs): """Trigger update of secondary Zone :param zone: The value can be the ID of a zone to be abandoned or a :class:`~openstack.dns.v2.zone_export.ZoneExport` instance. :returns: None """ zone = self._get_resource(_zone.Zone, zone) return zone.xfr(self) # ======== Recordsets ======== def recordsets(self, zone=None, **query): """Retrieve a generator of recordsets :param zone: The optional value can be the ID of a zone or a :class:`~openstack.dns.v2.zone.Zone` instance. If it is not given all recordsets for all zones of the tenant would be retrieved :param dict query: Optional query parameters to be sent to limit the resources being returned. * `name`: Recordset Name field. * `type`: Type field. * `status`: Status of the recordset. * `ttl`: TTL field filter. * `description`: Recordset description field filter. :returns: A generator of zone (:class:`~openstack.dns.v2.recordset.Recordset`) instances """ base_path = None if not zone: base_path = '/recordsets' else: zone = self._get_resource(_zone.Zone, zone) query.update({'zone_id': zone.id}) return self._list(_rs.Recordset, base_path=base_path, **query) def create_recordset(self, zone, **attrs): """Create a new recordset in the zone :param zone: The value can be the ID of a zone or a :class:`~openstack.dns.v2.zone.Zone` instance. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.dns.v2.recordset.Recordset`, comprised of the properties on the Recordset class. :returns: The results of zone creation :rtype: :class:`~openstack.dns.v2.recordset.Recordset` """ zone = self._get_resource(_zone.Zone, zone) attrs.update({'zone_id': zone.id}) return self._create(_rs.Recordset, prepend_key=False, **attrs) def update_recordset(self, recordset, **attrs): """Update Recordset attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.dns.v2.recordset.Recordset`, comprised of the properties on the Recordset class. :returns: The results of zone creation :rtype: :class:`~openstack.dns.v2.recordset.Recordset` """ return self._update(_rs.Recordset, recordset, **attrs) def get_recordset(self, recordset, zone): """Get a recordset :param zone: The value can be the ID of a zone or a :class:`~openstack.dns.v2.zone.Zone` instance. :param recordset: The value can be the ID of a recordset or a :class:`~openstack.dns.v2.recordset.Recordset` instance. :returns: Recordset instance :rtype: :class:`~openstack.dns.v2.recordset.Recordset` """ zone = self._get_resource(_zone.Zone, zone) return self._get(_rs.Recordset, recordset, zone_id=zone.id) def delete_recordset(self, recordset, zone=None, ignore_missing=True): """Delete a zone :param recordset: The value can be the ID of a recordset or a :class:`~openstack.dns.v2.recordset.Recordset` instance. :param zone: The value can be the ID of a zone or a :class:`~openstack.dns.v2.zone.Zone` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the zone does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent zone. :returns: Recordset instance been deleted :rtype: :class:`~openstack.dns.v2.recordset.Recordset` """ if zone: zone = self._get_resource(_zone.Zone, zone) recordset = self._get(_rs.Recordset, recordset, zone_id=zone.id) return self._delete( _rs.Recordset, recordset, ignore_missing=ignore_missing ) def find_recordset(self, zone, name_or_id, ignore_missing=True, **query): """Find a single recordset :param zone: The value can be the ID of a zone or a :class:`~openstack.dns.v2.zone.Zone` instance. :param name_or_id: The name or ID of a zone :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the zone does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent zone. :returns: :class:`~openstack.dns.v2.recordset.Recordset` """ zone = self._get_resource(_zone.Zone, zone) return self._find( _rs.Recordset, name_or_id, ignore_missing=ignore_missing, zone_id=zone.id, **query, ) # ======== Zone Imports ======== def zone_imports(self, **query): """Retrieve a generator of zone imports :param dict query: Optional query parameters to be sent to limit the resources being returned. * `zone_id`: Zone I field. * `message`: Message field. * `status`: Status of the zone import record. :returns: A generator of zone :class:`~openstack.dns.v2.zone_import.ZoneImport` instances. """ return self._list(_zone_import.ZoneImport, **query) def create_zone_import(self, **attrs): """Create a new zone import from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.dns.v2.zone_import.ZoneImport`, comprised of the properties on the ZoneImport class. :returns: The results of zone creation. :rtype: :class:`~openstack.dns.v2.zone_import.ZoneImport` """ return self._create( _zone_import.ZoneImport, prepend_key=False, **attrs ) def get_zone_import(self, zone_import): """Get a zone import record :param zone: The value can be the ID of a zone import or a :class:`~openstack.dns.v2.zone_import.ZoneImport` instance. :returns: ZoneImport instance. :rtype: :class:`~openstack.dns.v2.zone_import.ZoneImport` """ return self._get(_zone_import.ZoneImport, zone_import) def delete_zone_import(self, zone_import, ignore_missing=True): """Delete a zone import :param zone_import: The value can be the ID of a zone import or a :class:`~openstack.dns.v2.zone_import.ZoneImport` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the zone does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent zone. :returns: None """ return self._delete( _zone_import.ZoneImport, zone_import, ignore_missing=ignore_missing ) # ======== Zone Exports ======== def zone_exports(self, **query): """Retrieve a generator of zone exports :param dict query: Optional query parameters to be sent to limit the resources being returned. * `zone_id`: Zone I field. * `message`: Message field. * `status`: Status of the zone import record. :returns: A generator of zone :class:`~openstack.dns.v2.zone_export.ZoneExport` instances. """ return self._list(_zone_export.ZoneExport, **query) def create_zone_export(self, zone, **attrs): """Create a new zone export from attributes :param zone: The value can be the ID of a zone to be exported or a :class:`~openstack.dns.v2.zone_export.ZoneExport` instance. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.dns.v2.zone_export.ZoneExport`, comprised of the properties on the ZoneExport class. :returns: The results of zone creation. :rtype: :class:`~openstack.dns.v2.zone_export.ZoneExport` """ zone = self._get_resource(_zone.Zone, zone) return self._create( _zone_export.ZoneExport, base_path='/zones/%(zone_id)s/tasks/export', prepend_key=False, zone_id=zone.id, **attrs, ) def get_zone_export(self, zone_export): """Get a zone export record :param zone: The value can be the ID of a zone import or a :class:`~openstack.dns.v2.zone_export.ZoneExport` instance. :returns: ZoneExport instance. :rtype: :class:`~openstack.dns.v2.zone_export.ZoneExport` """ return self._get(_zone_export.ZoneExport, zone_export) def get_zone_export_text(self, zone_export): """Get a zone export record as text :param zone: The value can be the ID of a zone import or a :class:`~openstack.dns.v2.zone_export.ZoneExport` instance. :returns: ZoneExport instance. :rtype: :class:`~openstack.dns.v2.zone_export.ZoneExport` """ return self._get( _zone_export.ZoneExport, zone_export, base_path='/zones/tasks/export/%(id)s/export', ) def delete_zone_export(self, zone_export, ignore_missing=True): """Delete a zone export :param zone_export: The value can be the ID of a zone import or a :class:`~openstack.dns.v2.zone_export.ZoneExport` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the zone does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent zone. :returns: None """ return self._delete( _zone_export.ZoneExport, zone_export, ignore_missing=ignore_missing ) # ======== FloatingIPs ======== def floating_ips(self, **query): """Retrieve a generator of recordsets :param dict query: Optional query parameters to be sent to limit the resources being returned. * `name`: Recordset Name field. * `type`: Type field. * `status`: Status of the recordset. * `ttl`: TTL field filter. * `description`: Recordset description field filter. :returns: A generator of floatingips (:class:`~openstack.dns.v2.floating_ip.FloatingIP`) instances """ return self._list(_fip.FloatingIP, **query) def get_floating_ip(self, floating_ip): """Get a Floating IP :param floating_ip: The value can be the ID of a floating ip or a :class:`~openstack.dns.v2.floating_ip.FloatingIP` instance. The ID is in format "region_name:floatingip_id" :returns: FloatingIP instance. :rtype: :class:`~openstack.dns.v2.floating_ip.FloatingIP` """ return self._get(_fip.FloatingIP, floating_ip) def update_floating_ip(self, floating_ip, **attrs): """Update floating ip attributes :param floating_ip: The id or an instance of :class:`~openstack.dns.v2.fip.FloatingIP`. :param dict attrs: attributes for update on :class:`~openstack.dns.v2.fip.FloatingIP`. :rtype: :class:`~openstack.dns.v2.fip.FloatingIP` """ return self._update(_fip.FloatingIP, floating_ip, **attrs) def unset_floating_ip(self, floating_ip): """Unset a Floating IP PTR record :param floating_ip: ID for the floatingip associated with the project. :returns: FloatingIP PTR record. :rtype: :class:`~openstack.dns.v2.fip.FloatipgIP` """ # concat `region:floating_ip_id` as id attrs = {'ptrdname': None} return self._update(_fip.FloatingIP, floating_ip, **attrs) # ======== Zone Transfer ======== def zone_transfer_requests(self, **query): """Retrieve a generator of zone transfer requests :param dict query: Optional query parameters to be sent to limit the resources being returned. * `status`: Status of the recordset. :returns: A generator of transfer requests (:class:`~openstack.dns.v2.zone_transfer.ZoneTransferRequest`) instances """ return self._list(_zone_transfer.ZoneTransferRequest, **query) def get_zone_transfer_request(self, request): """Get a ZoneTransfer Request info :param request: The value can be the ID of a transfer request or a :class:`~openstack.dns.v2.zone_transfer.ZoneTransferRequest` instance. :returns: Zone transfer request instance. :rtype: :class:`~openstack.dns.v2.zone_transfer.ZoneTransferRequest` """ return self._get(_zone_transfer.ZoneTransferRequest, request) def create_zone_transfer_request(self, zone, **attrs): """Create a new ZoneTransfer Request from attributes :param zone: The value can be the ID of a zone to be transferred or a :class:`~openstack.dns.v2.zone_export.ZoneExport` instance. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.dns.v2.zone_transfer.ZoneTransferRequest`, comprised of the properties on the ZoneTransferRequest class. :returns: The results of zone transfer request creation. :rtype: :class:`~openstack.dns.v2.zone_transfer.ZoneTransferRequest` """ zone = self._get_resource(_zone.Zone, zone) return self._create( _zone_transfer.ZoneTransferRequest, base_path='/zones/%(zone_id)s/tasks/transfer_requests', prepend_key=False, zone_id=zone.id, **attrs, ) def update_zone_transfer_request(self, request, **attrs): """Update ZoneTransfer Request attributes :param floating_ip: The id or an instance of :class:`~openstack.dns.v2.zone_transfer.ZoneTransferRequest`. :param dict attrs: attributes for update on :class:`~openstack.dns.v2.zone_transfer.ZoneTransferRequest`. :rtype: :class:`~openstack.dns.v2.zone_transfer.ZoneTransferRequest` """ return self._update( _zone_transfer.ZoneTransferRequest, request, **attrs ) def delete_zone_transfer_request(self, request, ignore_missing=True): """Delete a ZoneTransfer Request :param request: The value can be the ID of a zone transfer request or a :class:`~openstack.dns.v2.zone_transfer.ZoneTransferRequest` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the zone does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent zone. :returns: None """ return self._delete( _zone_transfer.ZoneTransferRequest, request, ignore_missing=ignore_missing, ) def zone_transfer_accepts(self, **query): """Retrieve a generator of zone transfer accepts :param dict query: Optional query parameters to be sent to limit the resources being returned. * `status`: Status of the recordset. :returns: A generator of transfer accepts (:class:`~openstack.dns.v2.zone_transfer.ZoneTransferAccept`) instances """ return self._list(_zone_transfer.ZoneTransferAccept, **query) def get_zone_transfer_accept(self, accept): """Get a ZoneTransfer Accept info :param request: The value can be the ID of a transfer accept or a :class:`~openstack.dns.v2.zone_transfer.ZoneTransferAccept` instance. :returns: Zone transfer request instance. :rtype: :class:`~openstack.dns.v2.zone_transfer.ZoneTransferAccept` """ return self._get(_zone_transfer.ZoneTransferAccept, accept) def create_zone_transfer_accept(self, **attrs): """Create a new ZoneTransfer Accept from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.dns.v2.zone_transfer.ZoneTransferAccept`, comprised of the properties on the ZoneTransferAccept class. :returns: The results of zone transfer request creation. :rtype: :class:`~openstack.dns.v2.zone_transfer.ZoneTransferAccept` """ return self._create(_zone_transfer.ZoneTransferAccept, **attrs) # ======== Zone Shares ======== def zone_shares(self, zone, **query): """Retrieve a generator of zone shares :param zone: The zone ID or a :class:`~openstack.dns.v2.zone.Zone` instance :param dict query: Optional query parameters to be sent to limit the resources being returned. * `target_project_id`: The target project ID field. :returns: A generator of zone shares :class:`~openstack.dns.v2.zone_share.ZoneShare` instances. """ zone_obj = self._get_resource(_zone.Zone, zone) return self._list(_zone_share.ZoneShare, zone_id=zone_obj.id, **query) def get_zone_share(self, zone, zone_share): """Get a zone share :param zone: The value can be the ID of a zone or a :class:`~openstack.dns.v2.zone.Zone` instance. :param zone_share: The zone_share can be either the ID of the zone share or a :class:`~openstack.dns.v2.zone_share.ZoneShare` instance that the zone share belongs to. :returns: ZoneShare instance. :rtype: :class:`~openstack.dns.v2.zone_share.ZoneShare` """ zone_obj = self._get_resource(_zone.Zone, zone) return self._get( _zone_share.ZoneShare, zone_share, zone_id=zone_obj.id ) def find_zone_share(self, zone, zone_share_id, ignore_missing=True): """Find a single zone share :param zone: The value can be the ID of a zone or a :class:`~openstack.dns.v2.zone.Zone` instance. :param zone_share_id: The zone share ID :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the zone share does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent zone share. :returns: :class:`~openstack.dns.v2.zone_share.ZoneShare` """ zone_obj = self._get_resource(_zone.Zone, zone) return self._find( _zone_share.ZoneShare, zone_share_id, ignore_missing=ignore_missing, zone_id=zone_obj.id, ) def create_zone_share(self, zone, **attrs): """Create a new zone share from attributes :param zone: The zone ID or a :class:`~openstack.dns.v2.zone.Zone` instance :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.dns.v2.zone_share.ZoneShare`, comprised of the properties on the ZoneShare class. :returns: The results of zone share creation :rtype: :class:`~openstack.dns.v2.zone_share.ZoneShare` """ zone_obj = self._get_resource(_zone.Zone, zone) return self._create( _zone_share.ZoneShare, zone_id=zone_obj.id, **attrs ) def delete_zone_share(self, zone, zone_share, ignore_missing=True): """Delete a zone share :param zone: The zone ID or a :class:`~openstack.dns.v2.zone.Zone` instance :param zone_share: The zone_share can be either the ID of the zone share or a :class:`~openstack.dns.v2.zone_share.ZoneShare` instance that the zone share belongs to. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the zone share does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent zone share. :returns: ``None`` """ zone_obj = self._get_resource(_zone.Zone, zone) self._delete( _zone_share.ZoneShare, zone_share, ignore_missing=ignore_missing, zone_id=zone_obj.id, ) def _get_cleanup_dependencies(self): # DNS may depend on floating ip return {'dns': {'before': ['network']}} def _service_cleanup( self, dry_run=True, client_status_queue=False, identified_resources=None, filters=None, resource_evaluation_fn=None, skip_resources=None, ): if not self.should_skip_resource_cleanup("zone", skip_resources): # Delete all zones for obj in self.zones(): self._service_cleanup_del_res( self.delete_zone, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) if not self.should_skip_resource_cleanup( "floating_ip", skip_resources ): # Unset all floatingIPs # NOTE: FloatingIPs are not cleaned when filters are set for obj in self.floating_ips(): self._service_cleanup_del_res( self.unset_floating_ip, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/dns/v2/floating_ip.py0000664000175000017500000000254700000000000022206 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.dns.v2 import _base from openstack import resource class FloatingIP(_base.Resource): """DNS Floating IP Resource""" resources_key = 'floatingips' base_path = '/reverse/floatingips' # capabilities allow_fetch = True allow_commit = True allow_list = True commit_method = "PATCH" #: Properties #: current action in progress on the resource action = resource.Body('action') #: The floatingip address for this PTR record address = resource.Body('address') #: Description for this PTR record description = resource.Body('description') #: Domain name for this PTR record ptrdname = resource.Body('ptrdname') #: status of the resource status = resource.Body('status') #: Time to live for this PTR record ttl = resource.Body('ttl', type=int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/dns/v2/recordset.py0000664000175000017500000000466000000000000021703 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.dns.v2 import _base from openstack import resource class Recordset(_base.Resource): """DNS Recordset Resource""" resources_key = 'recordsets' base_path = '/zones/%(zone_id)s/recordsets' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'name', 'type', 'ttl', 'data', 'status', 'description', 'limit', 'marker', ) #: Properties #: current action in progress on the resource action = resource.Body('action') #: Timestamp when the zone was created created_at = resource.Body('create_at') #: Recordset description description = resource.Body('description') #: Links contains a `self` pertaining to this zone or a `next` pertaining #: to next page links = resource.Body('links', type=dict) #: DNS Name of the recordset name = resource.Body('name') #: ID of the project which the recordset belongs to project_id = resource.Body('project_id') #: DNS record value list records = resource.Body('records', type=list) #: Recordset status #: Valid values include: `PENDING_CREATE`, `ACTIVE`,`PENDING_DELETE`, #: `ERROR` status = resource.Body('status') #: Time to live, default 300, available value 300-2147483647 (seconds) ttl = resource.Body('ttl', type=int) #: DNS type of the recordset #: Valid values include `A`, `AAAA`, `MX`, `CNAME`, `TXT`, `NS`, #: `SSHFP`, `SPF`, `SRV`, `PTR` type = resource.Body('type') #: Timestamp when the zone was last updated updated_at = resource.Body('updated_at') #: The id of the Zone which this recordset belongs to zone_id = resource.URI('zone_id') #: The name of the Zone which this recordset belongs to zone_name = resource.Body('zone_name') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/dns/v2/zone.py0000664000175000017500000000717700000000000020672 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.dns.v2 import _base from openstack import exceptions from openstack import resource from openstack import utils class Zone(_base.Resource): """DNS ZONE Resource""" resources_key = 'zones' base_path = '/zones' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True commit_method = "PATCH" _query_mapping = resource.QueryParameters( 'name', 'type', 'email', 'status', 'description', 'ttl', 'limit', 'marker', ) #: Properties #: current action in progress on the resource action = resource.Body('action') #: Attributes #: Key:Value pairs of information about this zone, and the pool the user #: would like to place the zone in. This information can be used by the #: scheduler to place zones on the correct pool. attributes = resource.Body('attributes', type=dict) #: Timestamp when the zone was created created_at = resource.Body('created_at') #: Zone description #: *Type: str* description = resource.Body('description') #: The administrator email of this zone #: *Type: str* email = resource.Body('email') #: Links contains a `self` pertaining to this zone or a `next` pertaining #: to next page links = resource.Body('links', type=dict) #: The master list for slaver server to fetch DNS masters = resource.Body('masters', type=list) #: Zone name name = resource.Body('name') #: The pool which manages the zone, assigned by system pool_id = resource.Body('pool_id') #: The project id which the zone belongs to project_id = resource.Body('project_id') #: Serial number in the SOA record set in the zone, #: which identifies the change on the primary DNS server #: *Type: int* serial = resource.Body('serial', type=int) #: Zone status #: Valid values include `PENDING_CREATE`, `ACTIVE`, #: `PENDING_DELETE`, `ERROR` status = resource.Body('status') #: SOA TTL time, unit is seconds, default 300, TTL range 300-2147483647 #: *Type: int* ttl = resource.Body('ttl', type=int) #: Zone type, #: Valid values include `PRIMARY`, `SECONDARY` #: *Type: str* type = resource.Body('type') #: Timestamp when the zone was last updated updated_at = resource.Body('updated_at') #: Whether the zone is shared with other projects #: *Type: bool* is_shared = resource.Body('shared') # Headers for DELETE requests #: If true, delete any existing zone shares along with the zone delete_shares = resource.Header('x-designate-delete-shares', type=bool) def _action(self, session, action, body): """Preform actions given the message body.""" url = utils.urljoin(self.base_path, self.id, 'tasks', action) response = session.post(url, json=body) exceptions.raise_from_response(response) return response def abandon(self, session): self._action(session, 'abandon', None) def xfr(self, session): self._action(session, 'xfr', None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/dns/v2/zone_export.py0000664000175000017500000000651100000000000022262 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.dns.v2 import _base from openstack import exceptions from openstack import resource class ZoneExport(_base.Resource): """DNS Zone Exports Resource""" resource_key = '' resources_key = 'exports' base_path = '/zones/tasks/export' # capabilities allow_create = True allow_fetch = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters('zone_id', 'message', 'status') #: Properties #: Timestamp when the zone was created created_at = resource.Body('created_at') #: Links contains a `self` pertaining to this zone or a `next` pertaining #: to next page links = resource.Body('links', type=dict) #: Message message = resource.Body('message') #: Returns the total_count of resources matching this filter metadata = resource.Body('metadata', type=list) #: The project id which the zone belongs to project_id = resource.Body('project_id') #: Current status of the zone export status = resource.Body('status') #: Timestamp when the zone was last updated updated_at = resource.Body('updated_at') #: Version of the resource version = resource.Body('version', type=int) #: ID for the zone that was created by this export zone_id = resource.Body('zone_id') def create(self, session, prepend_key=True, base_path=None): """Create a remote resource based on this instance. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param prepend_key: A boolean indicating whether the resource_key should be prepended in a resource creation request. Default to True. :param str base_path: Base part of the URI for creating resources, if different from :data:`~openstack.resource.Resource.base_path`. :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_create` is not set to ``True``. """ if not self.allow_create: raise exceptions.MethodNotSupported(self, "create") session = self._get_session(session) microversion = self._get_microversion(session, action='create') # Create ZoneExport requires empty body # skip _prepare_request completely, since we need just empty body request = resource._Request(self.base_path, None, None) response = session.post( request.url, json=request.body, headers=request.headers, microversion=microversion, ) self.microversion = microversion self._translate_response(response) return self ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/dns/v2/zone_import.py0000664000175000017500000000662500000000000022261 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.dns.v2 import _base from openstack import exceptions from openstack import resource class ZoneImport(_base.Resource): """DNS Zone Import Resource""" resource_key = '' resources_key = 'imports' base_path = '/zones/tasks/import' # capabilities allow_create = True allow_fetch = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters('zone_id', 'message', 'status') #: Properties #: Timestamp when the zone was created created_at = resource.Body('created_at') #: Links contains a `self` pertaining to this zone or a `next` pertaining #: to next page links = resource.Body('links', type=dict) #: Message message = resource.Body('message') #: Returns the total_count of resources matching this filter metadata = resource.Body('metadata', type=list) #: The project id which the zone belongs to project_id = resource.Body('project_id') #: Current status of the zone import status = resource.Body('status') #: Timestamp when the zone was last updated updated_at = resource.Body('updated_at') #: Version of the resource version = resource.Body('version', type=int) #: ID for the zone that was created by this import zone_id = resource.Body('zone_id') def create(self, session, prepend_key=True, base_path=None): """Create a remote resource based on this instance. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param prepend_key: A boolean indicating whether the resource_key should be prepended in a resource creation request. Default to True. :param str base_path: Base part of the URI for creating resources, if different from :data:`~openstack.resource.Resource.base_path`. :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_create` is not set to ``True``. """ if not self.allow_create: raise exceptions.MethodNotSupported(self, "create") session = self._get_session(session) microversion = self._get_microversion(session, action='create') # Create ZoneImport requires empty body and 'text/dns' as content-type # skip _prepare_request completely, since we need just empty body request = resource._Request( self.base_path, None, {'content-type': 'text/dns'} ) response = session.post( request.url, json=request.body, headers=request.headers, microversion=microversion, ) self.microversion = microversion self._translate_response(response) return self ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/dns/v2/zone_share.py0000664000175000017500000000307000000000000022040 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.dns.v2 import _base from openstack import resource class ZoneShare(_base.Resource): """DNS ZONE Share Resource""" resources_key = 'shared_zones' base_path = '/zones/%(zone_id)s/shares' # capabilities allow_create = True allow_delete = True allow_fetch = True allow_list = True _query_mapping = resource.QueryParameters('target_project_id') # Properties #: The ID of the zone being shared. zone_id = resource.URI('zone_id') #: Timestamp when the share was created. created_at = resource.Body('created_at') #: Timestamp when the member was last updated. updated_at = resource.Body('updated_at') # FIXME(stephenfin): This conflicts since there is a zone ID in the URI #: The zone ID of the zone being shared. # zone_id = resource.Body('zone_id') #: The project ID that owns the share. project_id = resource.Body('project_id') #: The target project ID that the zone is shared with. target_project_id = resource.Body('target_project_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/dns/v2/zone_transfer.py0000664000175000017500000000475200000000000022572 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.dns.v2 import _base from openstack import resource class ZoneTransferBase(_base.Resource): """DNS Zone Transfer Request/Accept Base Resource""" _query_mapping = resource.QueryParameters('status') #: Properties #: Timestamp when the resource was created created_at = resource.Body('created_at') #: Key that is used as part of the zone transfer accept process. #: This is only shown to the creator, and must be communicated out of band. key = resource.Body('key') #: The project id which the zone belongs to project_id = resource.Body('project_id') #: Current status of the zone import status = resource.Body('status') #: Timestamp when the resource was last updated updated_at = resource.Body('updated_at') #: Version of the resource version = resource.Body('version', type=int) #: ID for the zone that is being exported zone_id = resource.Body('zone_id') class ZoneTransferRequest(ZoneTransferBase): """DNS Zone Transfer Request Resource""" base_path = '/zones/tasks/transfer_requests' resources_key = 'transfer_requests' # capabilities allow_create = True allow_fetch = True allow_delete = True allow_list = True allow_commit = True #: Description description = resource.Body('description') #: A project ID that the request will be limited to. #: No other project will be allowed to accept this request. target_project_id = resource.Body('target_project_id') #: Name for the zone that is being exported zone_name = resource.Body('zone_name') class ZoneTransferAccept(ZoneTransferBase): """DNS Zone Transfer Accept Resource""" base_path = '/zones/tasks/transfer_accepts' resources_key = 'transfer_accepts' # capabilities allow_create = True allow_fetch = True allow_list = True #: Name for the zone that is being exported zone_transfer_request_id = resource.Body('zone_transfer_request_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/dns/version.py0000664000175000017500000000147300000000000021046 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Version(resource.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' # capabilities allow_list = True # Properties links = resource.Body('links') status = resource.Body('status') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/exceptions.py0000664000175000017500000002056100000000000020755 0ustar00zuulzuul00000000000000# Copyright 2010 Jacob Kaplan-Moss # Copyright 2011 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Exception definitions. """ import json import re import typing as ty from requests import exceptions as _rex class SDKException(Exception): """The base exception class for all exceptions this library raises.""" def __init__(self, message=None, extra_data=None): self.message = self.__class__.__name__ if message is None else message self.extra_data = extra_data super().__init__(self.message) class EndpointNotFound(SDKException): """A mismatch occurred between what the client and server expect.""" def __init__(self, message=None): super().__init__(message) class InvalidResponse(SDKException): """The response from the server is not valid for this request.""" def __init__(self, response): super().__init__() self.response = response class InvalidRequest(SDKException): """The request to the server is not valid.""" def __init__(self, message=None): super().__init__(message) class HttpException(SDKException, _rex.HTTPError): """The base exception for all HTTP error responses.""" def __init__( self, message='Error', response=None, http_status=None, details=None, request_id=None, ): # TODO(shade) Remove http_status parameter and the ability for response # to be None once we're not mocking Session everywhere. if not message: if response is not None: message = "{name}: {code}".format( name=self.__class__.__name__, code=response.status_code ) else: message = "{name}: Unknown error".format( name=self.__class__.__name__ ) # Call directly rather than via super to control parameters SDKException.__init__(self, message=message) _rex.HTTPError.__init__(self, message, response=response) if response is not None: self.request_id = response.headers.get('x-openstack-request-id') self.status_code = response.status_code else: self.request_id = request_id self.status_code = http_status self.details = details self.url = self.request and self.request.url or None self.method = self.request and self.request.method or None self.source = "Server" if self.status_code is not None and (400 <= self.status_code < 500): self.source = "Client" def __str__(self): # 'Error' is the default value for self.message. If self.message isn't # 'Error', then someone has set a more informative error message # and we should use it. If it is 'Error', then we should construct a # better message from the information we do have. if not self.url or self.message == 'Error': return self.message if self.url: remote_error = "{source} Error for url: {url}".format( source=self.source, url=self.url ) if self.details: remote_error += ', ' if self.details: remote_error += str(self.details) return "{message}: {remote_error}".format( message=super().__str__(), remote_error=remote_error, ) class BadRequestException(HttpException): """HTTP 400 Bad Request.""" class NotFoundException(HttpException): """HTTP 404 Not Found.""" class ForbiddenException(HttpException): """HTTP 403 Forbidden Request.""" class ConflictException(HttpException): """HTTP 409 Conflict.""" class PreconditionFailedException(HttpException): """HTTP 412 Precondition Failed.""" class MethodNotSupported(SDKException): """The resource does not support this operation type.""" def __init__(self, resource, method): # This needs to work with both classes and instances. try: name = resource.__name__ except AttributeError: name = resource.__class__.__name__ message = 'The {} method is not supported for {}.{}'.format( method, resource.__module__, name, ) super().__init__(message=message) class DuplicateResource(SDKException): """More than one resource exists with that name.""" class ResourceTimeout(SDKException): """Timeout waiting for resource.""" class ResourceFailure(SDKException): """General resource failure.""" class InvalidResourceQuery(SDKException): """Invalid query params for resource.""" def _extract_message(obj): if isinstance(obj, dict): # Most of services: compute, network if obj.get('message'): return obj['message'] # Ironic starting with Stein elif obj.get('faultstring'): return obj['faultstring'] elif isinstance(obj, str): # Ironic before Stein has double JSON encoding, nobody remembers why. try: obj = json.loads(obj) except Exception: pass else: return _extract_message(obj) def raise_from_response(response, error_message=None): """Raise an instance of an HTTPException based on keystoneauth response.""" if response.status_code < 400: return cls: ty.Type[SDKException] if response.status_code == 400: cls = BadRequestException elif response.status_code == 403: cls = ForbiddenException elif response.status_code == 404: cls = NotFoundException elif response.status_code == 409: cls = ConflictException elif response.status_code == 412: cls = PreconditionFailedException else: cls = HttpException details = None content_type = response.headers.get('content-type', '') if response.content and 'application/json' in content_type: # Iterate over the nested objects to retrieve "message" attribute. # TODO(shade) Add exception handling for times when the content type # is lying. try: content = response.json() messages = [_extract_message(obj) for obj in content.values()] if not any(messages): # Exception dict may be the root dict in projects that use WSME messages = [_extract_message(content)] # Join all of the messages together nicely and filter out any # objects that don't have a "message" attr. details = '\n'.join(msg for msg in messages if msg) except Exception: details = response.text elif response.content and 'text/html' in content_type: messages = [] for line in response.text.splitlines(): message = re.sub(r'<.+?>', '', line.strip()) if message not in messages: messages.append(message) # Return joined string separated by colons. details = ': '.join(messages) if not details: details = response.reason if response.reason else response.text http_status = response.status_code request_id = response.headers.get('x-openstack-request-id') raise cls( message=error_message, response=response, details=details, http_status=http_status, request_id=request_id, ) class ConfigException(SDKException): """Something went wrong with parsing your OpenStack Config.""" class NotSupported(SDKException): """Request cannot be performed by any supported API version.""" class ValidationException(SDKException): """Validation failed for resource.""" class ServiceDisabledException(ConfigException): """This service is disabled for reasons.""" class ServiceDiscoveryException(SDKException): """The service cannot be discovered.""" # Backwards compatibility OpenStackCloudException = SDKException ResourceNotFound = NotFoundException ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.265323 openstacksdk-4.0.0/openstack/fixture/0000775000175000017500000000000000000000000017704 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/fixture/__init__.py0000664000175000017500000000000000000000000022003 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/fixture/connection.py0000664000175000017500000000763100000000000022424 0ustar00zuulzuul00000000000000# Copyright 2019 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import fixtures from keystoneauth1.fixture import v2 from keystoneauth1.fixture import v3 import os_service_types _service_type_manager = os_service_types.ServiceTypes() _SUBURL_TEMPLATES = { 'public': 'https://example.com/{service_type}', 'internal': 'https://internal.example.com/{service_type}', 'admin': 'https://example.com/{service_type}', } _ENDPOINT_TEMPLATES = { 'public': 'https://{service_type}.example.com', 'internal': 'https://internal.{service_type}.example.com', 'admin': 'https://{service_type}.example.com', } class ConnectionFixture(fixtures.Fixture): _suffixes = { 'baremetal': '/', 'block-storage': '/{project_id}', 'compute': '/v2.1/', 'container-infrastructure-management': '/v1', 'object-store': '/v1/{project_id}', 'orchestration': '/v1/{project_id}', 'volumev2': '/v2/{project_id}', 'volumev3': '/v3/{project_id}', } def __init__(self, suburl=False, project_id=None, *args, **kwargs): super().__init__(*args, **kwargs) self._endpoint_templates = _ENDPOINT_TEMPLATES if suburl: self.use_suburl() self.project_id = project_id or uuid.uuid4().hex.replace('-', '') self.build_tokens() def use_suburl(self): self._endpoint_templates = _SUBURL_TEMPLATES def _get_endpoint_templates(self, service_type, alias=None, v2=False): templates = {} for k, v in self._endpoint_templates.items(): suffix = self._suffixes.get( alias, self._suffixes.get(service_type, '') ) # For a keystone v2 catalog, we want to list the # versioned endpoint in the catalog, because that's # more likely how those were deployed. if v2: suffix = '/v2.0' templates[k] = (v + suffix).format( service_type=service_type, project_id=self.project_id, ) return templates def _setUp(self): pass def clear_tokens(self): self.v2_token = v2.Token(tenant_id=self.project_id) self.v3_token = v3.Token(project_id=self.project_id) def build_tokens(self): self.clear_tokens() for service in _service_type_manager.services: service_type = service['service_type'] if service_type == 'ec2-api': continue service_name = service['project'] ets = self._get_endpoint_templates(service_type) v3_svc = self.v3_token.add_service(service_type, name=service_name) v2_svc = self.v2_token.add_service(service_type, name=service_name) v3_svc.add_standard_endpoints(region='RegionOne', **ets) if service_type == 'identity': ets = self._get_endpoint_templates(service_type, v2=True) v2_svc.add_endpoint(region='RegionOne', **ets) for alias in service.get('aliases', []): ets = self._get_endpoint_templates(service_type, alias=alias) v3_svc = self.v3_token.add_service(alias, name=service_name) v2_svc = self.v2_token.add_service(alias, name=service_name) v3_svc.add_standard_endpoints(region='RegionOne', **ets) v2_svc.add_endpoint(region='RegionOne', **ets) def _cleanup(self): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/format.py0000664000175000017500000000212200000000000020055 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class Formatter: @classmethod def deserialize(cls, value): """Return a formatted object representing the value""" raise NotImplementedError class BoolStr(Formatter): @classmethod def deserialize(cls, value): """Convert a boolean string to a boolean""" expr = str(value).lower() if "true" == expr: return True elif "false" == expr: return False else: raise ValueError( "Unable to deserialize boolean string: %s" % value ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.265323 openstacksdk-4.0.0/openstack/identity/0000775000175000017500000000000000000000000020047 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/__init__.py0000664000175000017500000000000000000000000022146 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/identity_service.py0000664000175000017500000000156700000000000024003 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v2 import _proxy as _proxy_v2 from openstack.identity.v3 import _proxy as _proxy_v3 from openstack import service_description class IdentityService(service_description.ServiceDescription): """The identity service.""" supported_versions = { '2': _proxy_v2.Proxy, '3': _proxy_v3.Proxy, } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.265323 openstacksdk-4.0.0/openstack/identity/v2/0000775000175000017500000000000000000000000020376 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v2/__init__.py0000664000175000017500000000000000000000000022475 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v2/_proxy.py0000664000175000017500000002416100000000000022274 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v2 import extension as _extension from openstack.identity.v2 import role as _role from openstack.identity.v2 import tenant as _tenant from openstack.identity.v2 import user as _user from openstack import proxy class Proxy(proxy.Proxy): def extensions(self): """Retrieve a generator of extensions :returns: A generator of extension instances. :rtype: :class:`~openstack.identity.v2.extension.Extension` """ return self._list(_extension.Extension) def get_extension(self, extension): """Get a single extension :param extension: The value can be the ID of an extension or a :class:`~openstack.identity.v2.extension.Extension` instance. :returns: One :class:`~openstack.identity.v2.extension.Extension` :raises: :class:`~openstack.exceptions.NotFoundException` when no extension can be found. """ return self._get(_extension.Extension, extension) def create_role(self, **attrs): """Create a new role from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v2.role.Role`, comprised of the properties on the Role class. :returns: The results of role creation :rtype: :class:`~openstack.identity.v2.role.Role` """ return self._create(_role.Role, **attrs) def delete_role(self, role, ignore_missing=True): """Delete a role :param role: The value can be either the ID of a role or a :class:`~openstack.identity.v2.role.Role` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the role does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent role. :returns: ``None`` """ self._delete(_role.Role, role, ignore_missing=ignore_missing) def find_role(self, name_or_id, ignore_missing=True): """Find a single role :param name_or_id: The name or ID of a role. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v2.role.Role` or None """ return self._find( _role.Role, name_or_id, ignore_missing=ignore_missing ) def get_role(self, role): """Get a single role :param role: The value can be the ID of a role or a :class:`~openstack.identity.v2.role.Role` instance. :returns: One :class:`~openstack.identity.v2.role.Role` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_role.Role, role) def roles(self, **query): """Retrieve a generator of roles :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of role instances. :rtype: :class:`~openstack.identity.v2.role.Role` """ return self._list(_role.Role, **query) def update_role(self, role, **attrs): """Update a role :param role: Either the ID of a role or a :class:`~openstack.identity.v2.role.Role` instance. :param attrs: The attributes to update on the role represented by ``role``. :returns: The updated role :rtype: :class:`~openstack.identity.v2.role.Role` """ return self._update(_role.Role, role, **attrs) def create_tenant(self, **attrs): """Create a new tenant from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v2.tenant.Tenant`, comprised of the properties on the Tenant class. :returns: The results of tenant creation :rtype: :class:`~openstack.identity.v2.tenant.Tenant` """ return self._create(_tenant.Tenant, **attrs) def delete_tenant(self, tenant, ignore_missing=True): """Delete a tenant :param tenant: The value can be either the ID of a tenant or a :class:`~openstack.identity.v2.tenant.Tenant` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the tenant does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent tenant. :returns: ``None`` """ self._delete(_tenant.Tenant, tenant, ignore_missing=ignore_missing) def find_tenant(self, name_or_id, ignore_missing=True): """Find a single tenant :param name_or_id: The name or ID of a tenant. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v2.tenant.Tenant` or None """ return self._find( _tenant.Tenant, name_or_id, ignore_missing=ignore_missing ) def get_tenant(self, tenant): """Get a single tenant :param tenant: The value can be the ID of a tenant or a :class:`~openstack.identity.v2.tenant.Tenant` instance. :returns: One :class:`~openstack.identity.v2.tenant.Tenant` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_tenant.Tenant, tenant) def tenants(self, **query): """Retrieve a generator of tenants :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of tenant instances. :rtype: :class:`~openstack.identity.v2.tenant.Tenant` """ return self._list(_tenant.Tenant, **query) def update_tenant(self, tenant, **attrs): """Update a tenant :param tenant: Either the ID of a tenant or a :class:`~openstack.identity.v2.tenant.Tenant` instance. :param attrs: The attributes to update on the tenant represented by ``tenant``. :returns: The updated tenant :rtype: :class:`~openstack.identity.v2.tenant.Tenant` """ return self._update(_tenant.Tenant, tenant, **attrs) def create_user(self, **attrs): """Create a new user from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v2.user.User`, comprised of the properties on the User class. :returns: The results of user creation :rtype: :class:`~openstack.identity.v2.user.User` """ return self._create(_user.User, **attrs) def delete_user(self, user, ignore_missing=True): """Delete a user :param user: The value can be either the ID of a user or a :class:`~openstack.identity.v2.user.User` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the user does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent user. :returns: ``None`` """ self._delete(_user.User, user, ignore_missing=ignore_missing) def find_user(self, name_or_id, ignore_missing=True): """Find a single user :param name_or_id: The name or ID of a user. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v2.user.User` or None """ return self._find( _user.User, name_or_id, ignore_missing=ignore_missing ) def get_user(self, user): """Get a single user :param user: The value can be the ID of a user or a :class:`~openstack.identity.v2.user.User` instance. :returns: One :class:`~openstack.identity.v2.user.User` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_user.User, user) def users(self, **query): """Retrieve a generator of users :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of user instances. :rtype: :class:`~openstack.identity.v2.user.User` """ return self._list(_user.User, **query) def update_user(self, user, **attrs): """Update a user :param user: Either the ID of a user or a :class:`~openstack.identity.v2.user.User` instance. :param attrs: The attributes to update on the user represented by ``user``. :returns: The updated user :rtype: :class:`~openstack.identity.v2.user.User` """ return self._update(_user.User, user, **attrs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v2/extension.py0000664000175000017500000000407700000000000022774 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Extension(resource.Resource): resource_key = 'extension' resources_key = 'extensions' base_path = '/extensions' # capabilities allow_list = True allow_fetch = True # Properties #: A unique identifier, which will be used for accessing the extension #: through a dedicated url ``/extensions/*alias*``. The extension #: alias uniquely identifies an extension and is prefixed by a vendor #: identifier. *Type: string* alias = resource.Body('alias', alternate_id=True) #: A description of the extension. *Type: string* description = resource.Body('description') #: Links to the documentation in various format. *Type: string* links = resource.Body('links', type=list, list_type=dict) #: The name of the extension. *Type: string* name = resource.Body('name') #: The second unique identifier of the extension after the alias. #: It is usually a URL which will be used. Example: #: "http://docs.openstack.org/identity/api/ext/s3tokens/v1.0" #: *Type: string* namespace = resource.Body('namespace') #: The last time the extension has been modified (update date). updated_at = resource.Body('updated') @classmethod def list(cls, session, paginated=False, base_path=None, **params): if base_path is None: base_path = cls.base_path resp = session.get(base_path, params=params) resp = resp.json() for data in resp[cls.resources_key]['values']: yield cls.existing(**data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v2/role.py0000664000175000017500000000232500000000000021713 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import format from openstack import resource class Role(resource.Resource): resource_key = 'role' resources_key = 'roles' base_path = '/OS-KSADM/roles' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: The description of the role. *Type: string* description = resource.Body('description') #: Setting this attribute to ``False`` prevents this role from being #: available in the role list. *Type: bool* is_enabled = resource.Body('enabled', type=format.BoolStr) #: Unique role name. *Type: string* name = resource.Body('name') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v2/tenant.py0000664000175000017500000000252400000000000022244 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Tenant(resource.Resource): resource_key = 'tenant' resources_key = 'tenants' base_path = '/tenants' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: The description of the tenant. *Type: string* description = resource.Body('description') #: Setting this attribute to ``False`` prevents users from authorizing #: against this tenant. Additionally, all pre-existing tokens authorized #: for the tenant are immediately invalidated. Re-enabling a tenant #: does not re-enable pre-existing tokens. *Type: bool* is_enabled = resource.Body('enabled', type=bool) #: Unique tenant name. *Type: string* name = resource.Body('name') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v2/user.py0000664000175000017500000000247200000000000021733 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class User(resource.Resource): resource_key = 'user' resources_key = 'users' base_path = '/users' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: The email of this user. *Type: string* email = resource.Body('email') #: Setting this value to ``False`` prevents the user from authenticating or #: receiving authorization. Additionally, all pre-existing tokens held by #: the user are immediately invalidated. Re-enabling a user does not #: re-enable pre-existing tokens. *Type: bool* is_enabled = resource.Body('enabled', type=bool) #: The name of this user. *Type: string* name = resource.Body('name') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2733269 openstacksdk-4.0.0/openstack/identity/v3/0000775000175000017500000000000000000000000020377 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/__init__.py0000664000175000017500000000000000000000000022476 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/_proxy.py0000664000175000017500000026364400000000000022310 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import openstack.exceptions as exception from openstack.identity.v3 import ( application_credential as _application_credential, ) from openstack.identity.v3 import access_rule as _access_rule from openstack.identity.v3 import credential as _credential from openstack.identity.v3 import domain as _domain from openstack.identity.v3 import domain_config as _domain_config from openstack.identity.v3 import endpoint as _endpoint from openstack.identity.v3 import federation_protocol as _federation_protocol from openstack.identity.v3 import group as _group from openstack.identity.v3 import identity_provider as _identity_provider from openstack.identity.v3 import limit as _limit from openstack.identity.v3 import mapping as _mapping from openstack.identity.v3 import policy as _policy from openstack.identity.v3 import project as _project from openstack.identity.v3 import region as _region from openstack.identity.v3 import registered_limit as _registered_limit from openstack.identity.v3 import role as _role from openstack.identity.v3 import role_assignment as _role_assignment from openstack.identity.v3 import ( role_domain_group_assignment as _role_domain_group_assignment, ) from openstack.identity.v3 import ( role_domain_user_assignment as _role_domain_user_assignment, ) from openstack.identity.v3 import ( role_project_group_assignment as _role_project_group_assignment, ) from openstack.identity.v3 import ( role_project_user_assignment as _role_project_user_assignment, ) from openstack.identity.v3 import ( role_system_group_assignment as _role_system_group_assignment, ) from openstack.identity.v3 import ( role_system_user_assignment as _role_system_user_assignment, ) from openstack.identity.v3 import service as _service from openstack.identity.v3 import service_provider as _service_provider from openstack.identity.v3 import system as _system from openstack.identity.v3 import trust as _trust from openstack.identity.v3 import user as _user from openstack import proxy from openstack import resource from openstack import utils class Proxy(proxy.Proxy): _resource_registry = { "application_credential": _application_credential.ApplicationCredential, # noqa: E501 "access_rule": _access_rule.AccessRule, "credential": _credential.Credential, "domain": _domain.Domain, "endpoint": _endpoint.Endpoint, "federation_protocol": _federation_protocol.FederationProtocol, "group": _group.Group, "identity_provider": _identity_provider.IdentityProvider, "limit": _limit.Limit, "mapping": _mapping.Mapping, "policy": _policy.Policy, "project": _project.Project, "region": _region.Region, "registered_limit": _registered_limit.RegisteredLimit, "role": _role.Role, "role_assignment": _role_assignment.RoleAssignment, "role_domain_group_assignment": _role_domain_group_assignment.RoleDomainGroupAssignment, # noqa: E501 "role_domain_user_assignment": _role_domain_user_assignment.RoleDomainUserAssignment, # noqa: E501 "role_project_group_assignment": _role_project_group_assignment.RoleProjectGroupAssignment, # noqa: E501 "role_project_user_assignment": _role_project_user_assignment.RoleProjectUserAssignment, # noqa: E501 "role_system_group_assignment": _role_system_group_assignment.RoleSystemGroupAssignment, # noqa: E501 "role_system_user_assignment": _role_system_user_assignment.RoleSystemUserAssignment, # noqa: E501 "service": _service.Service, "system": _system.System, "trust": _trust.Trust, "user": _user.User, } # ========== Credentials ========== def create_credential(self, **attrs): """Create a new credential from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.credential.Credential`, comprised of the properties on the Credential class. :returns: The results of credential creation :rtype: :class:`~openstack.identity.v3.credential.Credential` """ return self._create(_credential.Credential, **attrs) def delete_credential(self, credential, ignore_missing=True): """Delete a credential :param credential: The value can be either the ID of a credential or a :class:`~openstack.identity.v3.credential.Credential` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the credential does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent credential. :returns: ``None`` """ self._delete( _credential.Credential, credential, ignore_missing=ignore_missing ) def find_credential(self, name_or_id, ignore_missing=True): """Find a single credential :param name_or_id: The name or ID of a credential. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.credential.Credential` or None """ return self._find( _credential.Credential, name_or_id, ignore_missing=ignore_missing ) def get_credential(self, credential): """Get a single credential :param credential: The value can be the ID of a credential or a :class:`~openstack.identity.v3.credential.Credential` instance. :returns: One :class:`~openstack.identity.v3.credential.Credential` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_credential.Credential, credential) def credentials(self, **query): """Retrieve a generator of credentials :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of credentials instances. :rtype: :class:`~openstack.identity.v3.credential.Credential` """ # TODO(briancurtin): This is paginated but requires base list changes. return self._list(_credential.Credential, **query) def update_credential(self, credential, **attrs): """Update a credential :param credential: Either the ID of a credential or a :class:`~openstack.identity.v3.credential.Credential` instance. :param attrs: The attributes to update on the credential represented by ``credential``. :returns: The updated credential :rtype: :class:`~openstack.identity.v3.credential.Credential` """ return self._update(_credential.Credential, credential, **attrs) # ========== Domains ========== def create_domain(self, **attrs): """Create a new domain from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.domain.Domain`, comprised of the properties on the Domain class. :returns: The results of domain creation :rtype: :class:`~openstack.identity.v3.domain.Domain` """ return self._create(_domain.Domain, **attrs) def delete_domain(self, domain, ignore_missing=True): """Delete a domain :param domain: The value can be either the ID of a domain or a :class:`~openstack.identity.v3.domain.Domain` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the domain does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent domain. :returns: ``None`` """ self._delete(_domain.Domain, domain, ignore_missing=ignore_missing) def find_domain(self, name_or_id, ignore_missing=True): """Find a single domain :param name_or_id: The name or ID of a domain. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.domain.Domain` or None """ return self._find( _domain.Domain, name_or_id, ignore_missing=ignore_missing ) def get_domain(self, domain): """Get a single domain :param domain: The value can be the ID of a domain or a :class:`~openstack.identity.v3.domain.Domain` instance. :returns: One :class:`~openstack.identity.v3.domain.Domain` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_domain.Domain, domain) def domains(self, **query): """Retrieve a generator of domains :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of domain instances. :rtype: :class:`~openstack.identity.v3.domain.Domain` """ # TODO(briancurtin): This is paginated but requires base list changes. return self._list(_domain.Domain, **query) def update_domain(self, domain, **attrs): """Update a domain :param domain: Either the ID of a domain or a :class:`~openstack.identity.v3.domain.Domain` instance. :param attrs: The attributes to update on the domain represented by ``domain``. :returns: The updated domain :rtype: :class:`~openstack.identity.v3.domain.Domain` """ return self._update(_domain.Domain, domain, **attrs) # ========== Domain configs ========== def create_domain_config(self, domain, **attrs): """Create a new config for a domain from attributes. :param domain: The value can be the ID of a domain or a :class:`~openstack.identity.v3.domain.Domain` instance. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.domain_config.DomainConfig` comprised of the properties on the DomainConfig class. :returns: The results of domain config creation :rtype: :class:`~openstack.identity.v3.domain_config.DomainConfig` """ domain_id = resource.Resource._get_id(domain) return self._create( _domain_config.DomainConfig, domain_id=domain_id, **attrs, ) def delete_domain_config(self, domain, ignore_missing=True): """Delete a config for a domain :param domain: The value can be the ID of a domain or a a :class:`~openstack.identity.v3.domain.Domain` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the identity provider does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent config for a domain. :returns: ``None`` """ domain_id = resource.Resource._get_id(domain) self._delete( _domain_config.DomainConfig, domain_id=domain_id, ignore_missing=ignore_missing, ) def get_domain_config(self, domain): """Get a single config for a domain :param domain_id: The value can be the ID of a domain or a :class:`~openstack.identity.v3.domain.Domain` instance. :returns: One :class:`~openstack.identity.v3.domain_config.DomainConfig` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ domain_id = resource.Resource._get_id(domain) return self._get( _domain_config.DomainConfig, domain_id=domain_id, requires_id=False, ) def update_domain_config(self, domain, **attrs): """Update a config for a domain :param domain_id: The value can be the ID of a domain or a :class:`~openstack.identity.v3.domain.Domain` instance. :param attrs: The attributes to update on the config for a domain represented by ``domain_id``. :returns: The updated config for a domain :rtype: :class:`~openstack.identity.v3.domain_config.DomainConfig` """ domain_id = resource.Resource._get_id(domain) return self._update( _domain_config.DomainConfig, domain_id=domain_id, **attrs, ) # ========== Endpoints ========== def create_endpoint(self, **attrs): """Create a new endpoint from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.endpoint.Endpoint`, comprised of the properties on the Endpoint class. :returns: The results of endpoint creation :rtype: :class:`~openstack.identity.v3.endpoint.Endpoint` """ return self._create(_endpoint.Endpoint, **attrs) def delete_endpoint(self, endpoint, ignore_missing=True): """Delete an endpoint :param endpoint: The value can be either the ID of an endpoint or a :class:`~openstack.identity.v3.endpoint.Endpoint` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the endpoint does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent endpoint. :returns: ``None`` """ self._delete( _endpoint.Endpoint, endpoint, ignore_missing=ignore_missing ) def find_endpoint(self, name_or_id, ignore_missing=True): """Find a single endpoint :param name_or_id: The name or ID of a endpoint. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.endpoint.Endpoint` or None """ return self._find( _endpoint.Endpoint, name_or_id, ignore_missing=ignore_missing ) def get_endpoint(self, endpoint): """Get a single endpoint :param endpoint: The value can be the ID of an endpoint or a :class:`~openstack.identity.v3.endpoint.Endpoint` instance. :returns: One :class:`~openstack.identity.v3.endpoint.Endpoint` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_endpoint.Endpoint, endpoint) def endpoints(self, **query): """Retrieve a generator of endpoints :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of endpoint instances. :rtype: :class:`~openstack.identity.v3.endpoint.Endpoint` """ # TODO(briancurtin): This is paginated but requires base list changes. return self._list(_endpoint.Endpoint, **query) def update_endpoint(self, endpoint, **attrs): """Update a endpoint :param endpoint: Either the ID of an endpoint or a :class:`~openstack.identity.v3.endpoint.Endpoint` instance. :param attrs: The attributes to update on the endpoint represented by ``endpoint``. :returns: The updated endpoint :rtype: :class:`~openstack.identity.v3.endpoint.Endpoint` """ return self._update(_endpoint.Endpoint, endpoint, **attrs) # ========== Groups ========== def create_group(self, **attrs): """Create a new group from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.group.Group`, comprised of the properties on the Group class. :returns: The results of group creation :rtype: :class:`~openstack.identity.v3.group.Group` """ return self._create(_group.Group, **attrs) def delete_group(self, group, ignore_missing=True): """Delete a group :param group: The value can be either the ID of a group or a :class:`~openstack.identity.v3.group.Group` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the group does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent group. :returns: ``None`` """ self._delete(_group.Group, group, ignore_missing=ignore_missing) def find_group(self, name_or_id, ignore_missing=True, **query): """Find a single group :param name_or_id: The name or ID of a group. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.group.Group` or None """ return self._find( _group.Group, name_or_id, ignore_missing=ignore_missing, **query, ) def get_group(self, group): """Get a single group :param group: The value can be the ID of a group or a :class:`~openstack.identity.v3.group.Group` instance. :returns: One :class:`~openstack.identity.v3.group.Group` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_group.Group, group) def groups(self, **query): """Retrieve a generator of groups :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of group instances. :rtype: :class:`~openstack.identity.v3.group.Group` """ # TODO(briancurtin): This is paginated but requires base list changes. return self._list(_group.Group, **query) def update_group(self, group, **attrs): """Update a group :param group: Either the ID of a group or a :class:`~openstack.identity.v3.group.Group` instance. :param attrs: The attributes to update on the group represented by ``group``. :returns: The updated group :rtype: :class:`~openstack.identity.v3.group.Group` """ return self._update(_group.Group, group, **attrs) def add_user_to_group(self, user, group): """Add user to group :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param group: Either the ID of a group or a :class:`~openstack.identity.v3.group.Group` instance. :return: ``None`` """ user = self._get_resource(_user.User, user) group = self._get_resource(_group.Group, group) group.add_user(self, user) def remove_user_from_group(self, user, group): """Remove user to group :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param group: Either the ID of a group or a :class:`~openstack.identity.v3.group.Group` instance. :return: ``None`` """ user = self._get_resource(_user.User, user) group = self._get_resource(_group.Group, group) group.remove_user(self, user) def check_user_in_group(self, user, group): """Check whether user belongsto group :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param group: Either the ID of a group or a :class:`~openstack.identity.v3.group.Group` instance. :return: A boolean representing current relation """ user = self._get_resource(_user.User, user) group = self._get_resource(_group.Group, group) return group.check_user(self, user) def group_users(self, group, **attrs): """List users in a group :param group: Either the ID of a group or a :class:`~openstack.identity.v3.group.Group` instance. :param attrs: Only password_expires_at can be filter for result. :return: List of :class:`~openstack.identity.v3.user.User` """ group = self._get_resource(_group.Group, group) base_path = utils.urljoin(group.base_path, group.id, 'users') users = self._list(_user.User, base_path=base_path, **attrs) return users # ========== Policies ========== def create_policy(self, **attrs): """Create a new policy from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.policy.Policy`, comprised of the properties on the Policy class. :returns: The results of policy creation :rtype: :class:`~openstack.identity.v3.policy.Policy` """ return self._create(_policy.Policy, **attrs) def delete_policy(self, policy, ignore_missing=True): """Delete a policy :param policy: The value can be either the ID of a policy or a :class:`~openstack.identity.v3.policy.Policy` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the policy does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent policy. :returns: ``None`` """ self._delete(_policy.Policy, policy, ignore_missing=ignore_missing) def find_policy(self, name_or_id, ignore_missing=True): """Find a single policy :param name_or_id: The name or ID of a policy. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.policy.Policy` or None """ return self._find( _policy.Policy, name_or_id, ignore_missing=ignore_missing ) def get_policy(self, policy): """Get a single policy :param policy: The value can be the ID of a policy or a :class:`~openstack.identity.v3.policy.Policy` instance. :returns: One :class:`~openstack.identity.v3.policy.Policy` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_policy.Policy, policy) def policies(self, **query): """Retrieve a generator of policies :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of policy instances. :rtype: :class:`~openstack.identity.v3.policy.Policy` """ # TODO(briancurtin): This is paginated but requires base list changes. return self._list(_policy.Policy, **query) def update_policy(self, policy, **attrs): """Update a policy :param policy: Either the ID of a policy or a :class:`~openstack.identity.v3.policy.Policy` instance. :param attrs: The attributes to update on the policy represented by ``policy``. :returns: The updated policy :rtype: :class:`~openstack.identity.v3.policy.Policy` """ return self._update(_policy.Policy, policy, **attrs) # ========== Project ========== def create_project(self, **attrs): """Create a new project from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.project.Project`, comprised of the properties on the Project class. :returns: The results of project creation :rtype: :class:`~openstack.identity.v3.project.Project` """ return self._create(_project.Project, **attrs) def delete_project(self, project, ignore_missing=True): """Delete a project :param project: The value can be either the ID of a project or a :class:`~openstack.identity.v3.project.Project` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the project does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent project. :returns: ``None`` """ self._delete(_project.Project, project, ignore_missing=ignore_missing) def find_project(self, name_or_id, ignore_missing=True, **query): """Find a single project :param name_or_id: The name or ID of a project. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.project.Project` or None """ return self._find( _project.Project, name_or_id, ignore_missing=ignore_missing, **query, ) def get_project(self, project): """Get a single project :param project: The value can be the ID of a project or a :class:`~openstack.identity.v3.project.Project` instance. :returns: One :class:`~openstack.identity.v3.project.Project` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_project.Project, project) def projects(self, **query): """Retrieve a generator of projects :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of project instances. :rtype: :class:`~openstack.identity.v3.project.Project` """ # TODO(briancurtin): This is paginated but requires base list changes. return self._list(_project.Project, **query) def user_projects(self, user, **query): """Retrieve a generator of projects to which the user has authorization to access. :param user: Either the user id or an instance of :class:`~openstack.identity.v3.user.User` :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of project instances. :rtype: :class:`~openstack.identity.v3.project.UserProject` """ user = self._get_resource(_user.User, user) return self._list(_project.UserProject, user_id=user.id, **query) def update_project(self, project, **attrs): """Update a project :param project: Either the ID of a project or a :class:`~openstack.identity.v3.project.Project` instance. :param attrs: The attributes to update on the project represented by ``project``. :returns: The updated project :rtype: :class:`~openstack.identity.v3.project.Project` """ return self._update(_project.Project, project, **attrs) # ========== Services ========== def create_service(self, **attrs): """Create a new service from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.service.Service`, comprised of the properties on the Service class. :returns: The results of service creation :rtype: :class:`~openstack.identity.v3.service.Service` """ return self._create(_service.Service, **attrs) def delete_service(self, service, ignore_missing=True): """Delete a service :param service: The value can be either the ID of a service or a :class:`~openstack.identity.v3.service.Service` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the service does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent service. :returns: ``None`` """ self._delete(_service.Service, service, ignore_missing=ignore_missing) def find_service(self, name_or_id, ignore_missing=True): """Find a single service :param name_or_id: The name or ID of a service. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.service.Service` or None """ return self._find( _service.Service, name_or_id, ignore_missing=ignore_missing ) def get_service(self, service): """Get a single service :param service: The value can be the ID of a service or a :class:`~openstack.identity.v3.service.Service` instance. :returns: One :class:`~openstack.identity.v3.service.Service` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_service.Service, service) def services(self, **query): """Retrieve a generator of services :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of service instances. :rtype: :class:`~openstack.identity.v3.service.Service` """ # TODO(briancurtin): This is paginated but requires base list changes. return self._list(_service.Service, **query) def update_service(self, service, **attrs): """Update a service :param service: Either the ID of a service or a :class:`~openstack.identity.v3.service.Service` instance. :param attrs: The attributes to update on the service represented by ``service``. :returns: The updated service :rtype: :class:`~openstack.identity.v3.service.Service` """ return self._update(_service.Service, service, **attrs) # ========== Users ========== def create_user(self, **attrs): """Create a new user from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.user.User`, comprised of the properties on the User class. :returns: The results of user creation :rtype: :class:`~openstack.identity.v3.user.User` """ return self._create(_user.User, **attrs) def delete_user(self, user, ignore_missing=True): """Delete a user :param user: The value can be either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the user does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent user. :returns: ``None`` """ self._delete(_user.User, user, ignore_missing=ignore_missing) def find_user(self, name_or_id, ignore_missing=True, **query): """Find a single user :param name_or_id: The name or ID of a user. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.user.User` or None """ return self._find( _user.User, name_or_id, ignore_missing=ignore_missing, **query, ) def get_user(self, user): """Get a single user :param user: The value can be the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :returns: One :class:`~openstack.identity.v3.user.User` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_user.User, user) def users(self, **query): """Retrieve a generator of users :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of user instances. :rtype: :class:`~openstack.identity.v3.user.User` """ # TODO(briancurtin): This is paginated but requires base list changes. return self._list(_user.User, **query) def update_user(self, user, **attrs): """Update a user :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param attrs: The attributes to update on the user represented by ``attrs``. :returns: The updated user :rtype: :class:`~openstack.identity.v3.user.User` """ return self._update(_user.User, user, **attrs) # ========== Trusts ========== def create_trust(self, **attrs): """Create a new trust from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.trust.Trust`, comprised of the properties on the Trust class. :returns: The results of trust creation :rtype: :class:`~openstack.identity.v3.trust.Trust` """ return self._create(_trust.Trust, **attrs) def delete_trust(self, trust, ignore_missing=True): """Delete a trust :param trust: The value can be either the ID of a trust or a :class:`~openstack.identity.v3.trust.Trust` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the credential does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent credential. :returns: ``None`` """ self._delete(_trust.Trust, trust, ignore_missing=ignore_missing) def find_trust(self, name_or_id, ignore_missing=True): """Find a single trust :param name_or_id: The name or ID of a trust. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.trust.Trust` or None """ return self._find( _trust.Trust, name_or_id, ignore_missing=ignore_missing ) def get_trust(self, trust): """Get a single trust :param trust: The value can be the ID of a trust or a :class:`~openstack.identity.v3.trust.Trust` instance. :returns: One :class:`~openstack.identity.v3.trust.Trust` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_trust.Trust, trust) def trusts(self, **query): """Retrieve a generator of trusts :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of trust instances. :rtype: :class:`~openstack.identity.v3.trust.Trust` """ # TODO(briancurtin): This is paginated but requires base list changes. return self._list(_trust.Trust, **query) # ========== Regions ========== def create_region(self, **attrs): """Create a new region from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.region.Region`, comprised of the properties on the Region class. :returns: The results of region creation. :rtype: :class:`~openstack.identity.v3.region.Region` """ return self._create(_region.Region, **attrs) def delete_region(self, region, ignore_missing=True): """Delete a region :param region: The value can be either the ID of a region or a :class:`~openstack.identity.v3.region.Region` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the region does not exist. When set to ``True``, no exception will be thrown when attempting to delete a nonexistent region. :returns: ``None`` """ self._delete(_region.Region, region, ignore_missing=ignore_missing) def find_region(self, name_or_id, ignore_missing=True): """Find a single region :param name_or_id: The name or ID of a region. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the region does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent region. :returns: One :class:`~openstack.identity.v3.region.Region` or None """ return self._find( _region.Region, name_or_id, ignore_missing=ignore_missing ) def get_region(self, region): """Get a single region :param region: The value can be the ID of a region or a :class:`~openstack.identity.v3.region.Region` instance. :returns: One :class:`~openstack.identity.v3.region.Region` :raises: :class:`~openstack.exceptions.NotFoundException` when no matching region can be found. """ return self._get(_region.Region, region) def regions(self, **query): """Retrieve a generator of regions :param kwargs query: Optional query parameters to be sent to limit the regions being returned. :returns: A generator of region instances. :rtype: :class:`~openstack.identity.v3.region.Region` """ # TODO(briancurtin): This is paginated but requires base list changes. return self._list(_region.Region, **query) def update_region(self, region, **attrs): """Update a region :param region: Either the ID of a region or a :class:`~openstack.identity.v3.region.Region` instance. :param attrs: The attributes to update on the region represented by ``region``. :returns: The updated region. :rtype: :class:`~openstack.identity.v3.region.Region` """ return self._update(_region.Region, region, **attrs) # ========== Roles ========== def create_role(self, **attrs): """Create a new role from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.role.Role`, comprised of the properties on the Role class. :returns: The results of role creation. :rtype: :class:`~openstack.identity.v3.role.Role` """ return self._create(_role.Role, **attrs) def delete_role(self, role, ignore_missing=True): """Delete a role :param role: The value can be either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the role does not exist. When set to ``True``, no exception will be thrown when attempting to delete a nonexistent role. :returns: ``None`` """ self._delete(_role.Role, role, ignore_missing=ignore_missing) def find_role(self, name_or_id, ignore_missing=True, **query): """Find a single role :param name_or_id: The name or ID of a role. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the role does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent role. :returns: One :class:`~openstack.identity.v3.role.Role` or None """ return self._find( _role.Role, name_or_id, ignore_missing=ignore_missing, **query, ) def get_role(self, role): """Get a single role :param role: The value can be the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :returns: One :class:`~openstack.identity.v3.role.Role` :raises: :class:`~openstack.exceptions.NotFoundException` when no matching role can be found. """ return self._get(_role.Role, role) def roles(self, **query): """Retrieve a generator of roles :param kwargs query: Optional query parameters to be sent to limit the resources being returned. The options are: domain_id, name. :return: A generator of role instances. :rtype: :class:`~openstack.identity.v3.role.Role` """ return self._list(_role.Role, **query) def update_role(self, role, **attrs): """Update a role :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :param dict kwargs: The attributes to update on the role represented by ``value``. Only name can be updated :returns: The updated role. :rtype: :class:`~openstack.identity.v3.role.Role` """ return self._update(_role.Role, role, **attrs) # ========== Role assignments ========== def role_assignments_filter( self, domain=None, project=None, system=None, group=None, user=None ): """Retrieve a generator of roles assigned to user/group :param domain: Either the ID of a domain or a :class:`~openstack.identity.v3.domain.Domain` instance. :param project: Either the ID of a project or a :class:`~openstack.identity.v3.project.Project` instance. :param system: Either the system name or a :class:`~openstack.identity.v3.system.System` instance. :param group: Either the ID of a group or a :class:`~openstack.identity.v3.group.Group` instance. :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :return: A generator of role instances. :rtype: :class:`~openstack.identity.v3.role.Role` """ if domain and project and system: raise exception.InvalidRequest( 'Only one of domain, project, or system can be specified' ) if domain is None and project is None and system is None: raise exception.InvalidRequest( 'Either domain, project, or system should be specified' ) if group and user: raise exception.InvalidRequest( 'Only one of group or user can be specified' ) if group is None and user is None: raise exception.InvalidRequest( 'Either group or user should be specified' ) if domain: domain_id = resource.Resource._get_id(domain) if group: group_id = resource.Resource._get_id(group) return self._list( _role_domain_group_assignment.RoleDomainGroupAssignment, domain_id=domain_id, group_id=group_id, ) else: user_id = resource.Resource._get_id(user) return self._list( _role_domain_user_assignment.RoleDomainUserAssignment, domain_id=domain_id, user_id=user_id, ) elif project: project_id = resource.Resource._get_id(project) if group: group_id = resource.Resource._get_id(group) return self._list( _role_project_group_assignment.RoleProjectGroupAssignment, project_id=project_id, group_id=group_id, ) else: user_id = resource.Resource._get_id(user) return self._list( _role_project_user_assignment.RoleProjectUserAssignment, project_id=project_id, user_id=user_id, ) else: system_id = resource.Resource._get_id(system) if group: group_id = resource.Resource._get_id(group) return self._list( _role_system_group_assignment.RoleSystemGroupAssignment, system_id=system_id, group_id=group_id, ) else: user_id = resource.Resource._get_id(user) return self._list( _role_system_user_assignment.RoleSystemUserAssignment, system_id=system_id, user_id=user_id, ) def role_assignments(self, **query): """Retrieve a generator of role assignments :param kwargs query: Optional query parameters to be sent to limit the resources being returned. The options are: group_id, role_id, scope_domain_id, scope_project_id, inherited_to, user_id, include_names, include_subtree. :return: :class:`~openstack.identity.v3.role_assignment.RoleAssignment` """ return self._list(_role_assignment.RoleAssignment, **query) def assign_domain_role_to_user(self, domain, user, role): """Assign role to user on a domain :param domain: Either the ID of a domain or a :class:`~openstack.identity.v3.domain.Domain` instance. :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :return: ``None`` """ domain = self._get_resource(_domain.Domain, domain) user = self._get_resource(_user.User, user) role = self._get_resource(_role.Role, role) domain.assign_role_to_user(self, user, role) def unassign_domain_role_from_user(self, domain, user, role): """Unassign role from user on a domain :param domain: Either the ID of a domain or a :class:`~openstack.identity.v3.domain.Domain` instance. :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :return: ``None`` """ domain = self._get_resource(_domain.Domain, domain) user = self._get_resource(_user.User, user) role = self._get_resource(_role.Role, role) domain.unassign_role_from_user(self, user, role) def validate_user_has_domain_role(self, domain, user, role): """Validates that a user has a role on a domain :param domain: Either the ID of a domain or a :class:`~openstack.identity.v3.domain.Domain` instance. :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :returns: True if user has role in domain """ domain = self._get_resource(_domain.Domain, domain) user = self._get_resource(_user.User, user) role = self._get_resource(_role.Role, role) return domain.validate_user_has_role(self, user, role) def assign_domain_role_to_group(self, domain, group, role): """Assign role to group on a domain :param domain: Either the ID of a domain or a :class:`~openstack.identity.v3.domain.Domain` instance. :param group: Either the ID of a group or a :class:`~openstack.identity.v3.group.Group` instance. :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :return: ``None`` """ domain = self._get_resource(_domain.Domain, domain) group = self._get_resource(_group.Group, group) role = self._get_resource(_role.Role, role) domain.assign_role_to_group(self, group, role) def unassign_domain_role_from_group(self, domain, group, role): """Unassign role from group on a domain :param domain: Either the ID of a domain or a :class:`~openstack.identity.v3.domain.Domain` instance. :param group: Either the ID of a group or a :class:`~openstack.identity.v3.group.Group` instance. :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :return: ``None`` """ domain = self._get_resource(_domain.Domain, domain) group = self._get_resource(_group.Group, group) role = self._get_resource(_role.Role, role) domain.unassign_role_from_group(self, group, role) def validate_group_has_domain_role(self, domain, group, role): """Validates that a group has a role on a domain :param domain: Either the ID of a domain or a :class:`~openstack.identity.v3.domain.Domain` instance. :param group: Either the ID of a group or a :class:`~openstack.identity.v3.group.Group` instance. :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :returns: True if group has role on domain """ domain = self._get_resource(_domain.Domain, domain) group = self._get_resource(_group.Group, group) role = self._get_resource(_role.Role, role) return domain.validate_group_has_role(self, group, role) def assign_project_role_to_user(self, project, user, role): """Assign role to user on a project :param project: Either the ID of a project or a :class:`~openstack.identity.v3.project.Project` instance. :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :return: ``None`` """ project = self._get_resource(_project.Project, project) user = self._get_resource(_user.User, user) role = self._get_resource(_role.Role, role) project.assign_role_to_user(self, user, role) def unassign_project_role_from_user(self, project, user, role): """Unassign role from user on a project :param project: Either the ID of a project or a :class:`~openstack.identity.v3.project.Project` instance. :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :return: ``None`` """ project = self._get_resource(_project.Project, project) user = self._get_resource(_user.User, user) role = self._get_resource(_role.Role, role) project.unassign_role_from_user(self, user, role) def validate_user_has_project_role(self, project, user, role): """Validates that a user has a role on a project :param project: Either the ID of a project or a :class:`~openstack.identity.v3.project.Project` instance. :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :returns: True if user has role in project """ project = self._get_resource(_project.Project, project) user = self._get_resource(_user.User, user) role = self._get_resource(_role.Role, role) return project.validate_user_has_role(self, user, role) def assign_project_role_to_group(self, project, group, role): """Assign role to group on a project :param project: Either the ID of a project or a :class:`~openstack.identity.v3.project.Project` instance. :param group: Either the ID of a group or a :class:`~openstack.identity.v3.group.Group` instance. :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :return: ``None`` """ project = self._get_resource(_project.Project, project) group = self._get_resource(_group.Group, group) role = self._get_resource(_role.Role, role) project.assign_role_to_group(self, group, role) def unassign_project_role_from_group(self, project, group, role): """Unassign role from group on a project :param project: Either the ID of a project or a :class:`~openstack.identity.v3.project.Project` instance. :param group: Either the ID of a group or a :class:`~openstack.identity.v3.group.Group` instance. :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :return: ``None`` """ project = self._get_resource(_project.Project, project) group = self._get_resource(_group.Group, group) role = self._get_resource(_role.Role, role) project.unassign_role_from_group(self, group, role) def validate_group_has_project_role(self, project, group, role): """Validates that a group has a role on a project :param project: Either the ID of a project or a :class:`~openstack.identity.v3.project.Project` instance. :param group: Either the ID of a group or a :class:`~openstack.identity.v3.group.Group` instance. :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :returns: True if group has role in project """ project = self._get_resource(_project.Project, project) group = self._get_resource(_group.Group, group) role = self._get_resource(_role.Role, role) return project.validate_group_has_role(self, group, role) def assign_system_role_to_user(self, user, role, system): """Assign a role to user on a system :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :param system: The system name :return: ``None`` """ user = self._get_resource(_user.User, user) role = self._get_resource(_role.Role, role) system = self._get_resource(_system.System, system) system.assign_role_to_user(self, user, role) def unassign_system_role_from_user(self, user, role, system): """Unassign a role from user on a system :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :param system: The system name :return: ``None`` """ user = self._get_resource(_user.User, user) role = self._get_resource(_role.Role, role) system = self._get_resource(_system.System, system) system.unassign_role_from_user(self, user, role) def validate_user_has_system_role(self, user, role, system): """Validates that a user has a role on a system :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :param system: The system name :returns: True if user has role in system """ user = self._get_resource(_user.User, user) role = self._get_resource(_role.Role, role) system = self._get_resource(_system.System, system) return system.validate_user_has_role(self, user, role) def assign_system_role_to_group(self, group, role, system): """Assign a role to group on a system :param group: Either the ID of a group or a :class:`~openstack.identity.v3.group.Group` instance. :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :param system: The system name :return: ``None`` """ group = self._get_resource(_group.Group, group) role = self._get_resource(_role.Role, role) system = self._get_resource(_system.System, system) system.assign_role_to_group(self, group, role) def unassign_system_role_from_group(self, group, role, system): """Unassign a role from group on a system :param group: Either the ID of a group or a :class:`~openstack.identity.v3.group.Group` instance. :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :param system: The system name :return: ``None`` """ group = self._get_resource(_group.Group, group) role = self._get_resource(_role.Role, role) system = self._get_resource(_system.System, system) system.unassign_role_from_group(self, group, role) def validate_group_has_system_role(self, group, role, system): """Validates that a group has a role on a system :param group: Either the ID of a group or a :class:`~openstack.identity.v3.group.Group` instance. :param role: Either the ID of a role or a :class:`~openstack.identity.v3.role.Role` instance. :param system: The system name :returns: True if group has role on system """ group = self._get_resource(_group.Group, group) role = self._get_resource(_role.Role, role) system = self._get_resource(_system.System, system) return system.validate_group_has_role(self, group, role) # ========== Registered limits ========== def registered_limits(self, **query): """Retrieve a generator of registered_limits :param kwargs query: Optional query parameters to be sent to limit the registered_limits being returned. :returns: A generator of registered_limits instances. :rtype: :class:`~openstack.identity.v3.registered_limit.RegisteredLimit` """ return self._list(_registered_limit.RegisteredLimit, **query) def get_registered_limit(self, registered_limit): """Get a single registered_limit :param registered_limit: The value can be the ID of a registered_limit or a :class:`~openstack.identity.v3.registered_limit.RegisteredLimit` instance. :returns: One :class:`~openstack.identity.v3.registered_limit.RegisteredLimit` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_registered_limit.RegisteredLimit, registered_limit) def create_registered_limit(self, **attrs): """Create a new registered_limit from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.registered_limit.RegisteredLimit`, comprised of the properties on the RegisteredLimit class. :returns: The results of registered_limit creation. :rtype: :class:`~openstack.identity.v3.registered_limit.RegisteredLimit` """ return self._create(_registered_limit.RegisteredLimit, **attrs) def update_registered_limit(self, registered_limit, **attrs): """Update a registered_limit :param registered_limit: Either the ID of a registered_limit. or a :class:`~openstack.identity.v3.registered_limit.RegisteredLimit` instance. :param dict kwargs: The attributes to update on the registered_limit represented by ``value``. :returns: The updated registered_limit. :rtype: :class:`~openstack.identity.v3.registered_limit.RegisteredLimit` """ return self._update( _registered_limit.RegisteredLimit, registered_limit, **attrs ) def delete_registered_limit(self, registered_limit, ignore_missing=True): """Delete a registered_limit :param registered_limit: The value can be either the ID of a registered_limit or a :class:`~openstack.identity.v3.registered_limit.RegisteredLimit` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the registered_limit does not exist. When set to ``True``, no exception will be thrown when attempting to delete a nonexistent registered_limit. :returns: ``None`` """ self._delete( _registered_limit.RegisteredLimit, registered_limit, ignore_missing=ignore_missing, ) # ========== Limits ========== def limits(self, **query): """Retrieve a generator of limits :param kwargs query: Optional query parameters to be sent to limit the limits being returned. :returns: A generator of limits instances. :rtype: :class:`~openstack.identity.v3.limit.Limit` """ return self._list(_limit.Limit, **query) def get_limit(self, limit): """Get a single limit :param limit: The value can be the ID of a limit or a :class:`~openstack.identity.v3.limit.Limit` instance. :returns: One :class:`~openstack.identity.v3.limit.Limit` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_limit.Limit, limit) def create_limit(self, **attrs): """Create a new limit from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.limit.Limit`, comprised of the properties on the Limit class. :returns: The results of limit creation. :rtype: :class:`~openstack.identity.v3.limit.Limit` """ return self._create(_limit.Limit, **attrs) def update_limit(self, limit, **attrs): """Update a limit :param limit: Either the ID of a limit. or a :class:`~openstack.identity.v3.limit.Limit` instance. :param dict kwargs: The attributes to update on the limit represented by ``value``. :returns: The updated limit. :rtype: :class:`~openstack.identity.v3.limit.Limit` """ return self._update(_limit.Limit, limit, **attrs) def delete_limit(self, limit, ignore_missing=True): """Delete a limit :param limit: The value can be either the ID of a limit or a :class:`~openstack.identity.v3.limit.Limit` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the limit does not exist. When set to ``True``, no exception will be thrown when attempting to delete a nonexistent limit. :returns: ``None`` """ self._delete(_limit.Limit, limit, ignore_missing=ignore_missing) # ========== Application credentials ========== def application_credentials(self, user, **query): """Retrieve a generator of application credentials :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of application credentials instances. :rtype: :class:`~openstack.identity.v3.application_credential.ApplicationCredential` """ user = self._get_resource(_user.User, user) return self._list( _application_credential.ApplicationCredential, user_id=user.id, **query, ) def get_application_credential(self, user, application_credential): """Get a single application credential :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param application_credential: The value can be the ID of a application credential or a :class:`~openstack.identity.v3.application_credential.ApplicationCredential` instance. :returns: One :class:`~openstack.identity.v3.application_credential.ApplicationCredential` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ user = self._get_resource(_user.User, user) return self._get( _application_credential.ApplicationCredential, application_credential, user_id=user.id, ) def create_application_credential(self, user, name, **attrs): """Create a new application credential from attributes :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param name: The name of the application credential which is unique to the user. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.application_credential.ApplicationCredential`, comprised of the properties on the ApplicationCredential class. :returns: The results of application credential creation. :rtype: :class:`~openstack.identity.v3.application_credential.ApplicationCredential` """ user = self._get_resource(_user.User, user) return self._create( _application_credential.ApplicationCredential, name=name, user_id=user.id, **attrs, ) def find_application_credential( self, user, name_or_id, ignore_missing=True, **query, ): """Find a single application credential :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param name_or_id: The name or ID of an application credential. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.application_credential.ApplicationCredential` or None """ user = self._get_resource(_user.User, user) return self._find( _application_credential.ApplicationCredential, user_id=user.id, name_or_id=name_or_id, ignore_missing=ignore_missing, **query, ) def delete_application_credential( self, user, application_credential, ignore_missing=True ): """Delete an application credential :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param application credential: The value can be either the ID of an application credential or a :class:`~openstack.identity.v3.application_credential.ApplicationCredential` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the application credential does not exist. When set to ``True``, no exception will be thrown when attempting to delete a nonexistent application credential. :returns: ``None`` """ user = self._get_resource(_user.User, user) self._delete( _application_credential.ApplicationCredential, application_credential, user_id=user.id, ignore_missing=ignore_missing, ) # ========== Federation protocols ========== def create_federation_protocol(self, idp_id, **attrs): """Create a new federation protocol from attributes :param idp_id: The ID of the identity provider or a :class:`~openstack.identity.v3.identity_provider.IdentityProvider` representing the identity provider the protocol is to be attached to. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.federation_protocol.FederationProtocol`, comprised of the properties on the FederationProtocol class. :returns: The results of federation protocol creation :rtype: :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` """ idp_cls = _identity_provider.IdentityProvider if isinstance(idp_id, idp_cls): idp_id = idp_id.id return self._create( _federation_protocol.FederationProtocol, idp_id=idp_id, **attrs ) def delete_federation_protocol( self, idp_id, protocol, ignore_missing=True ): """Delete a federation protocol :param idp_id: The ID of the identity provider or a :class:`~openstack.identity.v3.identity_provider.IdentityProvider` representing the identity provider the protocol is attached to. Can be None if protocol is a :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` instance. :param protocol: The ID of a federation protocol or a :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the federation protocol does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent federation protocol. :returns: ``None`` """ cls = _federation_protocol.FederationProtocol if idp_id is None and isinstance(protocol, cls): idp_id = protocol.idp_id idp_cls = _identity_provider.IdentityProvider if isinstance(idp_id, idp_cls): idp_id = idp_id.id self._delete( cls, protocol, ignore_missing=ignore_missing, idp_id=idp_id ) def find_federation_protocol(self, idp_id, protocol, ignore_missing=True): """Find a single federation protocol :param idp_id: The ID of the identity provider or a :class:`~openstack.identity.v3.identity_provider.IdentityProvider` representing the identity provider the protocol is attached to. :param protocol: The name or ID of a federation protocol. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One federation protocol or None :rtype: :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` """ idp_cls = _identity_provider.IdentityProvider if isinstance(idp_id, idp_cls): idp_id = idp_id.id return self._find( _federation_protocol.FederationProtocol, protocol, ignore_missing=ignore_missing, idp_id=idp_id, ) def get_federation_protocol(self, idp_id, protocol): """Get a single federation protocol :param idp_id: The ID of the identity provider or a :class:`~openstack.identity.v3.identity_provider.IdentityProvider` representing the identity provider the protocol is attached to. Can be None if protocol is a :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` :param protocol: The value can be the ID of a federation protocol or a :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` instance. :returns: One federation protocol :rtype: :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ cls = _federation_protocol.FederationProtocol if idp_id is None and isinstance(protocol, cls): idp_id = protocol.idp_id idp_cls = _identity_provider.IdentityProvider if isinstance(idp_id, idp_cls): idp_id = idp_id.id return self._get(cls, protocol, idp_id=idp_id) def federation_protocols(self, idp_id, **query): """Retrieve a generator of federation protocols :param idp_id: The ID of the identity provider or a :class:`~openstack.identity.v3.identity_provider.IdentityProvider` representing the identity provider the protocol is attached to. :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of federation protocol instances. :rtype: :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` """ idp_cls = _identity_provider.IdentityProvider if isinstance(idp_id, idp_cls): idp_id = idp_id.id return self._list( _federation_protocol.FederationProtocol, idp_id=idp_id, **query ) def update_federation_protocol(self, idp_id, protocol, **attrs): """Update a federation protocol :param idp_id: The ID of the identity provider or a :class:`~openstack.identity.v3.identity_provider.IdentityProvider` representing the identity provider the protocol is attached to. Can be None if protocol is a :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` :param protocol: Either the ID of a federation protocol or a :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` instance. :param attrs: The attributes to update on the federation protocol represented by ``protocol``. :returns: The updated federation protocol :rtype: :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` """ cls = _federation_protocol.FederationProtocol if (idp_id is None) and (isinstance(protocol, cls)): idp_id = protocol.idp_id idp_cls = _identity_provider.IdentityProvider if isinstance(idp_id, idp_cls): idp_id = idp_id.id return self._update(cls, protocol, idp_id=idp_id, **attrs) # ========== Mappings ========== def create_mapping(self, **attrs): """Create a new mapping from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.mapping.Mapping`, comprised of the properties on the Mapping class. :returns: The results of mapping creation :rtype: :class:`~openstack.identity.v3.mapping.Mapping` """ return self._create(_mapping.Mapping, **attrs) def delete_mapping(self, mapping, ignore_missing=True): """Delete a mapping :param mapping: The ID of a mapping or a :class:`~openstack.identity.v3.mapping.Mapping` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the mapping does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent mapping. :returns: ``None`` """ self._delete(_mapping.Mapping, mapping, ignore_missing=ignore_missing) def find_mapping(self, name_or_id, ignore_missing=True): """Find a single mapping :param name_or_id: The name or ID of a mapping. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.mapping.Mapping` or None """ return self._find( _mapping.Mapping, name_or_id, ignore_missing=ignore_missing ) def get_mapping(self, mapping): """Get a single mapping :param mapping: The value can be the ID of a mapping or a :class:`~openstack.identity.v3.mapping.Mapping` instance. :returns: One :class:`~openstack.identity.v3.mapping.Mapping` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_mapping.Mapping, mapping) def mappings(self, **query): """Retrieve a generator of mappings :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of mapping instances. :rtype: :class:`~openstack.identity.v3.mapping.Mapping` """ return self._list(_mapping.Mapping, **query) def update_mapping(self, mapping, **attrs): """Update a mapping :param mapping: Either the ID of a mapping or a :class:`~openstack.identity.v3.mapping.Mapping` instance. :param attrs: The attributes to update on the mapping represented by ``mapping``. :returns: The updated mapping :rtype: :class:`~openstack.identity.v3.mapping.Mapping` """ return self._update(_mapping.Mapping, mapping, **attrs) # ========== Identity providers ========== def create_identity_provider(self, **attrs): """Create a new identity provider from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.identity_provider.IdentityProvider` comprised of the properties on the IdentityProvider class. :returns: The results of identity provider creation :rtype: :class:`~openstack.identity.v3.identity_provider.IdentityProvider` """ return self._create(_identity_provider.IdentityProvider, **attrs) def delete_identity_provider(self, identity_provider, ignore_missing=True): """Delete an identity provider :param mapping: The ID of an identity provoder or a :class:`~openstack.identity.v3.identity_provider.IdentityProvider` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the identity provider does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent identity provider. :returns: ``None`` """ self._delete( _identity_provider.IdentityProvider, identity_provider, ignore_missing=ignore_missing, ) def find_identity_provider(self, name_or_id, ignore_missing=True): """Find a single identity provider :param name_or_id: The name or ID of an identity provider :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: The details of an identity provider or None. :rtype: :class:`~openstack.identity.v3.identity_provider.IdentityProvider` """ return self._find( _identity_provider.IdentityProvider, name_or_id, ignore_missing=ignore_missing, ) def get_identity_provider(self, identity_provider): """Get a single mapping :param mapping: The value can be the ID of an identity provider or a :class:`~openstack.identity.v3.identity_provider.IdentityProvider` instance. :returns: The details of an identity provider. :rtype: :class:`~openstack.identity.v3.identity_provider.IdentityProvider` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _identity_provider.IdentityProvider, identity_provider ) def identity_providers(self, **query): """Retrieve a generator of identity providers :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of identity provider instances. :rtype: :class:`~openstack.identity.v3.identity_provider.IdentityProvider` """ return self._list(_identity_provider.IdentityProvider, **query) def update_identity_provider(self, identity_provider, **attrs): """Update a mapping :param mapping: Either the ID of an identity provider or a :class:`~openstack.identity.v3.identity_provider.IdentityProvider` instance. :param attrs: The attributes to update on the identity_provider represented by ``identity_provider``. :returns: The updated identity provider. :rtype: :class:`~openstack.identity.v3.identity_provider.IdentityProvider` """ return self._update( _identity_provider.IdentityProvider, identity_provider, **attrs ) # ========== Access rules ========== def access_rules(self, user, **query): """Retrieve a generator of access rules :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of access rules instances. :rtype: :class:`~openstack.identity.v3.access_rule.AccessRule` """ user = self._get_resource(_user.User, user) return self._list(_access_rule.AccessRule, user_id=user.id, **query) def get_access_rule(self, user, access_rule): """Get a single access rule :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param access rule: The value can be the ID of an access rule or a :class:`~.access_rule.AccessRule` instance. :returns: One :class:`~.access_rule.AccessRule` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ user = self._get_resource(_user.User, user) return self._get(_access_rule.AccessRule, access_rule, user_id=user.id) def delete_access_rule(self, user, access_rule, ignore_missing=True): """Delete an access rule :param user: Either the ID of a user or a :class:`~openstack.identity.v3.user.User` instance. :param access rule: The value can be either the ID of an access rule or a :class:`~.access_rule.AccessRule` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the access rule does not exist. When set to ``True``, no exception will be thrown when attempting to delete a nonexistent access rule. :returns: ``None`` """ user = self._get_resource(_user.User, user) self._delete( _access_rule.AccessRule, access_rule, user_id=user.id, ignore_missing=ignore_missing, ) # ========== Service providers ========== def create_service_provider(self, **attrs): """Create a new service provider from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.identity.v3.service_provider.ServiceProvider`, comprised of the properties on the ServiceProvider class. :returns: The results of service provider creation :rtype: :class:`~openstack.identity.v3.service_provider.ServiceProvider` """ return self._create(_service_provider.ServiceProvider, **attrs) def delete_service_provider(self, service_provider, ignore_missing=True): """Delete a service provider :param service_provider: The ID of a service provider or a :class:`~openstack.identity.v3.service_provider.ServiceProvider` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the service provider does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent service provider. :returns: ``None`` """ self._delete( _service_provider.ServiceProvider, service_provider, ignore_missing=ignore_missing, ) def find_service_provider(self, name_or_id, ignore_missing=True): """Find a single service provider :param name_or_id: The name or ID of a service provider :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: The details of an service provider or None. :rtype: :class:`~openstack.identity.v3.service_provider.ServiceProvider` """ return self._find( _service_provider.ServiceProvider, name_or_id, ignore_missing=ignore_missing, ) def get_service_provider(self, service_provider): """Get a single service provider :param service_provider: The value can be the ID of a service provider or a :class:`~openstack.identity.v3.server_provider.ServiceProvider` instance. :returns: The details of an service provider. :rtype: :class:`~openstack.identity.v3.service_provider.ServiceProvider` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_service_provider.ServiceProvider, service_provider) def service_providers(self, **query): """Retrieve a generator of service providers :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of service provider instances. :rtype: :class:`~openstack.identity.v3.service_provider.ServiceProvider` """ return self._list(_service_provider.ServiceProvider, **query) def update_service_provider(self, service_provider, **attrs): """Update a service provider :param service_provider: Either the ID of an service provider or a :class:`~openstack.identity.v3.service_provider.ServiceProvider` instance. :param attrs: The attributes to update on the service provider represented by ``service_provider``. :returns: The updated service provider. :rtype: :class:`~openstack.identity.v3.service_provider.ServiceProvider` """ return self._update( _service_provider.ServiceProvider, service_provider, **attrs ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/access_rule.py0000664000175000017500000000255000000000000023243 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class AccessRule(resource.Resource): resource_key = 'access_rule' resources_key = 'access_rules' base_path = '/users/%(user_id)s/access_rules' # capabilities allow_fetch = True allow_delete = True allow_list = True # Properties #: The links for the access rule resource. links = resource.Body('links') #: Method that application credential is permitted to use. # *Type: string* method = resource.Body('method') #: Path that the application credential is permitted to access. # *Type: string* path = resource.Body('path') #: Service type identifier that application credential had access. # *Type: string* service = resource.Body('service') #: User ID using access rule. *Type: string* user_id = resource.URI('user_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/application_credential.py0000664000175000017500000000376300000000000025457 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ApplicationCredential(resource.Resource): resource_key = 'application_credential' resources_key = 'application_credentials' base_path = '/users/%(user_id)s/application_credentials' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: User ID using application credential. *Type: string* user_id = resource.URI('user_id') #: User object using application credential. *Type: string* user = resource.Body('user') #: The links for the application credential resource. links = resource.Body('links') #: name of the user. *Type: string* name = resource.Body('name') #: secret that application credential will be created with, if any. # *Type: string* secret = resource.Body('secret') #: description of application credential's purpose. *Type: string* description = resource.Body('description') #: expire time of application credential. *Type: string* expires_at = resource.Body('expires_at') #: roles of the user. *Type: list* roles = resource.Body('roles') #: restricts the application credential. *Type: boolean* unrestricted = resource.Body('unrestricted', type=bool) #: ID of project. *Type: string* project_id = resource.Body('project_id') #: access rules for application credential. *Type: list* access_rules = resource.Body('access_rules') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/credential.py0000664000175000017500000000317400000000000023070 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Credential(resource.Resource): resource_key = 'credential' resources_key = 'credentials' base_path = '/credentials' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True commit_method = 'PATCH' _query_mapping = resource.QueryParameters( 'type', 'user_id', ) # Properties #: Arbitrary blob of the credential data, to be parsed according to the #: ``type``. *Type: string* blob = resource.Body('blob') #: References a project ID which limits the scope the credential applies #: to. This attribute is **mandatory** if the credential type is ``ec2``. #: *Type: string* project_id = resource.Body('project_id') #: Representing the credential type, such as ``ec2`` or ``cert``. #: A specific implementation may determine the list of supported types. #: *Type: string* type = resource.Body('type') #: References the user ID which owns the credential. *Type: string* user_id = resource.Body('user_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/domain.py0000664000175000017500000000757600000000000022237 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource from openstack import utils class Domain(resource.Resource): resource_key = 'domain' resources_key = 'domains' base_path = '/domains' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True commit_method = 'PATCH' _query_mapping = resource.QueryParameters( 'name', is_enabled='enabled', ) # Properties #: The description of this domain. *Type: string* description = resource.Body('description') #: Setting this attribute to ``False`` prevents users from authorizing #: against this domain or any projects owned by this domain, and prevents #: users owned by this domain from authenticating or receiving any other #: authorization. Additionally, all pre-existing tokens applicable #: to the above entities are immediately invalidated. #: Re-enabling a domain does not re-enable pre-existing tokens. #: *Type: bool* is_enabled = resource.Body('enabled', type=bool) #: The globally unique name of this domain. *Type: string* name = resource.Body('name') #: The links related to the domain resource. links = resource.Body('links') def assign_role_to_user(self, session, user, role): """Assign role to user on domain""" url = utils.urljoin( self.base_path, self.id, 'users', user.id, 'roles', role.id ) resp = session.put( url, ) if resp.status_code == 204: return True return False def validate_user_has_role(self, session, user, role): """Validates that a user has a role on a domain""" url = utils.urljoin( self.base_path, self.id, 'users', user.id, 'roles', role.id ) resp = session.head( url, ) if resp.status_code == 204: return True return False def unassign_role_from_user(self, session, user, role): """Unassigns a role from a user on a domain""" url = utils.urljoin( self.base_path, self.id, 'users', user.id, 'roles', role.id ) resp = session.delete( url, ) if resp.status_code == 204: return True return False def assign_role_to_group(self, session, group, role): """Assign role to group on domain""" url = utils.urljoin( self.base_path, self.id, 'groups', group.id, 'roles', role.id ) resp = session.put( url, ) if resp.status_code == 204: return True return False def validate_group_has_role(self, session, group, role): """Validates that a group has a role on a domain""" url = utils.urljoin( self.base_path, self.id, 'groups', group.id, 'roles', role.id ) resp = session.head( url, ) if resp.status_code == 204: return True return False def unassign_role_from_group(self, session, group, role): """Unassigns a role from a group on a domain""" url = utils.urljoin( self.base_path, self.id, 'groups', group.id, 'roles', role.id ) resp = session.delete( url, ) if resp.status_code == 204: return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/domain_config.py0000664000175000017500000000270100000000000023545 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class DomainConfigLDAP(resource.Resource): #: The base distinguished name (DN) of LDAP. user_tree_dn = resource.Body('user_tree_dn') #: The LDAP URL. url = resource.Body('url') class DomainConfigDriver(resource.Resource): #: The Identity backend driver. driver = resource.Body('driver') class DomainConfig(resource.Resource): resource_key = 'config' base_path = '/domains/%(domain_id)s/config' requires_id = False create_requires_id = False commit_method = 'PATCH' create_method = 'PUT' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True #: The domain ID. domain_id = resource.URI('domain_id') #: An identity object. identity = resource.Body('identity', type=DomainConfigDriver) #: The config object. ldap = resource.Body('ldap', type=DomainConfigLDAP) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/endpoint.py0000664000175000017500000000423300000000000022573 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Endpoint(resource.Resource): resource_key = 'endpoint' resources_key = 'endpoints' base_path = '/endpoints' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True commit_method = 'PATCH' _query_mapping = resource.QueryParameters( 'interface', 'region_id', 'service_id', ) # Properties #: Describes the interface of the endpoint according to one of the #: following values: #: #: - `public`: intended for consumption by end users, generally on a #: publicly available network interface #: - `internal`: not intended for consumption by end users, generally on an #: unmetered internal network interface #: - `admin`: intended only for consumption by those needing administrative #: access to the service, generally on a secure network interface #: #: *Type: string* interface = resource.Body('interface') #: Setting this value to ``False`` prevents the endpoint from appearing #: in the service catalog. *Type: bool* is_enabled = resource.Body('enabled', type=bool) #: The links for the region resource. links = resource.Body('links') #: Represents the containing region ID of the service endpoint. #: *New in v3.2* *Type: string* region_id = resource.Body('region_id') #: References the service ID to which the endpoint belongs. *Type: string* service_id = resource.Body('service_id') #: Fully qualified URL of the service endpoint. *Type: string* url = resource.Body('url') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/federation_protocol.py0000664000175000017500000000255600000000000025022 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class FederationProtocol(resource.Resource): resource_key = 'protocol' resources_key = 'protocols' base_path = '/OS-FEDERATION/identity_providers/%(idp_id)s/protocols' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True create_exclude_id_from_body = True create_method = 'PUT' commit_method = 'PATCH' _query_mapping = resource.QueryParameters( 'id', ) # Properties #: name of the protocol (read only) *Type: string* name = resource.Body('id') #: The ID of the identity provider the protocol is attached to. # *Type: string* idp_id = resource.URI('idp_id') #: The definition of the protocol # *Type: dict* mapping_id = resource.Body('mapping_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/group.py0000664000175000017500000000474000000000000022112 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class Group(resource.Resource): resource_key = 'group' resources_key = 'groups' base_path = '/groups' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True commit_method = 'PATCH' _query_mapping = resource.QueryParameters( 'domain_id', 'name', ) # Properties #: The description of this group. *Type: string* description = resource.Body('description') #: References the domain ID which owns the group; if a domain ID is not #: specified by the client, the Identity service implementation will #: default it to the domain ID to which the client's token is scoped. #: *Type: string* domain_id = resource.Body('domain_id') #: Unique group name, within the owning domain. *Type: string* name = resource.Body('name') def add_user(self, session, user): """Add user to the group""" url = utils.urljoin(self.base_path, self.id, 'users', user.id) resp = session.put( url, ) exceptions.raise_from_response(resp) def remove_user(self, session, user): """Remove user from the group""" url = utils.urljoin(self.base_path, self.id, 'users', user.id) resp = session.delete( url, ) exceptions.raise_from_response(resp) def check_user(self, session, user): """Check whether user belongs to group""" url = utils.urljoin(self.base_path, self.id, 'users', user.id) resp = session.head( url, ) if resp.status_code == 404: # If we recieve 404 - treat this as False, # rather then returning exception return False exceptions.raise_from_response(resp) if resp.status_code == 204: return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/identity_provider.py0000664000175000017500000000323600000000000024520 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class IdentityProvider(resource.Resource): resource_key = 'identity_provider' resources_key = 'identity_providers' base_path = '/OS-FEDERATION/identity_providers' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True create_method = 'PUT' create_exclude_id_from_body = True commit_method = 'PATCH' _query_mapping = resource.QueryParameters( 'id', is_enabled='enabled', ) # Properties #: The id of a domain associated with this identity provider. # *Type: string* domain_id = resource.Body('domain_id') #: A description of this identity provider. *Type: string* description = resource.Body('description') #: If the identity provider is currently enabled. *Type: bool* is_enabled = resource.Body('enabled', type=bool) #: Remote IDs associated with the identity provider. *Type: list* remote_ids = resource.Body('remote_ids', type=list) #: The identifier of the identity provider (read only). *Type: string* name = resource.Body('id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/limit.py0000664000175000017500000000412100000000000022065 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Limit(resource.Resource): resource_key = 'limit' resources_key = 'limits' base_path = '/limits' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True commit_method = 'PATCH' commit_jsonpatch = True _query_mapping = resource.QueryParameters( 'service_id', 'region_id', 'resource_name', 'project_id' ) # Properties #: User-facing description of the registered_limit. *Type: string* description = resource.Body('description') #: The links for the registered_limit resource. links = resource.Body('links') #: ID of service. *Type: string* service_id = resource.Body('service_id') #: ID of region, if any. *Type: string* region_id = resource.Body('region_id') #: The resource name. *Type: string* resource_name = resource.Body('resource_name') #: The resource limit value. *Type: int* resource_limit = resource.Body('resource_limit') #: ID of project. *Type: string* project_id = resource.Body('project_id') def _prepare_request_body(self, patch, prepend_key): body = self._body.dirty if prepend_key and self.resource_key is not None: if patch: body = {self.resource_key: body} else: # Keystone support bunch create for unified limit. So the # request body for creating limit is a list instead of dict. body = {self.resources_key: [body]} return body ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/mapping.py0000664000175000017500000000217700000000000022413 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Mapping(resource.Resource): resource_key = 'mapping' resources_key = 'mappings' base_path = '/OS-FEDERATION/mappings' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True create_method = 'PUT' commit_method = 'PATCH' _query_mapping = resource.QueryParameters() # Properties #: The rules of this mapping. *Type: list* rules = resource.Body('rules', type=list) #: The identifier of the mapping. *Type: string* name = resource.Body('id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/policy.py0000664000175000017500000000246300000000000022255 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Policy(resource.Resource): resource_key = 'policy' resources_key = 'policies' base_path = '/policies' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True commit_method = 'PATCH' # Properties #: The policy rule set itself, as a serialized blob. *Type: string* blob = resource.Body('blob') #: The links for the policy resource. links = resource.Body('links') #: The ID for the project. project_id = resource.Body('project_id') #: The MIME Media Type of the serialized policy blob. *Type: string* type = resource.Body('type') #: The ID of the user who owns the policy user_id = resource.Body('user_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/project.py0000664000175000017500000001142500000000000022422 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack import resource from openstack import utils class Project(resource.Resource, tag.TagMixin): resource_key = 'project' resources_key = 'projects' base_path = '/projects' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True commit_method = 'PATCH' _allow_unknown_attrs_in_body = True _query_mapping = resource.QueryParameters( 'domain_id', 'is_domain', 'name', 'parent_id', is_enabled='enabled', **tag.TagMixin._tag_query_parameters ) # Properties #: The description of the project. *Type: string* description = resource.Body('description') #: References the domain ID which owns the project; if a domain ID is not #: specified by the client, the Identity service implementation will #: default it to the domain ID to which the client's token is scoped. #: *Type: string* domain_id = resource.Body('domain_id') #: Indicates whether the project also acts as a domain. If set to True, #: the project acts as both a project and a domain. Default is False. #: New in version 3.6 is_domain = resource.Body('is_domain', type=bool) #: Setting this attribute to ``False`` prevents users from authorizing #: against this project. Additionally, all pre-existing tokens authorized #: for the project are immediately invalidated. Re-enabling a project #: does not re-enable pre-existing tokens. *Type: bool* is_enabled = resource.Body('enabled', type=bool) #: The resource options for the project. Available resource options are #: immutable. options = resource.Body('options', type=dict) #: The ID of the parent of the project. #: New in version 3.4 parent_id = resource.Body('parent_id') def assign_role_to_user(self, session, user, role): """Assign role to user on project""" url = utils.urljoin( self.base_path, self.id, 'users', user.id, 'roles', role.id ) resp = session.put( url, ) if resp.status_code == 204: return True return False def validate_user_has_role(self, session, user, role): """Validates that a user has a role on a project""" url = utils.urljoin( self.base_path, self.id, 'users', user.id, 'roles', role.id ) resp = session.head( url, ) if resp.status_code == 204: return True return False def unassign_role_from_user(self, session, user, role): """Unassigns a role from a user on a project""" url = utils.urljoin( self.base_path, self.id, 'users', user.id, 'roles', role.id ) resp = session.delete( url, ) if resp.status_code == 204: return True return False def assign_role_to_group(self, session, group, role): """Assign role to group on project""" url = utils.urljoin( self.base_path, self.id, 'groups', group.id, 'roles', role.id ) resp = session.put( url, ) if resp.status_code == 204: return True return False def validate_group_has_role(self, session, group, role): """Validates that a group has a role on a project""" url = utils.urljoin( self.base_path, self.id, 'groups', group.id, 'roles', role.id ) resp = session.head( url, ) if resp.status_code == 204: return True return False def unassign_role_from_group(self, session, group, role): """Unassigns a role from a group on a project""" url = utils.urljoin( self.base_path, self.id, 'groups', group.id, 'roles', role.id ) resp = session.delete( url, ) if resp.status_code == 204: return True return False class UserProject(Project): resource_key = 'project' resources_key = 'projects' base_path = '/users/%(user_id)s/projects' # capabilities allow_create = False allow_fetch = False allow_commit = False allow_delete = False allow_list = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/region.py0000664000175000017500000000234700000000000022242 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Region(resource.Resource): resource_key = 'region' resources_key = 'regions' base_path = '/regions' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True commit_method = 'PATCH' _query_mapping = resource.QueryParameters( 'parent_region_id', ) # Properties #: User-facing description of the region. *Type: string* description = resource.Body('description') #: The links for the region resource. links = resource.Body('links') #: ID of parent region, if any. *Type: string* parent_region_id = resource.Body('parent_region_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/registered_limit.py0000664000175000017500000000406600000000000024312 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class RegisteredLimit(resource.Resource): resource_key = 'registered_limit' resources_key = 'registered_limits' base_path = '/registered_limits' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True commit_method = 'PATCH' commit_jsonpatch = True _query_mapping = resource.QueryParameters( 'service_id', 'region_id', 'resource_name' ) # Properties #: User-facing description of the registered_limit. *Type: string* description = resource.Body('description') #: The links for the registered_limit resource. links = resource.Body('links') #: ID of service. *Type: string* service_id = resource.Body('service_id') #: ID of region, if any. *Type: string* region_id = resource.Body('region_id') #: The resource name. *Type: string* resource_name = resource.Body('resource_name') #: The default limit value. *Type: int* default_limit = resource.Body('default_limit') def _prepare_request_body(self, patch, prepend_key): body = self._body.dirty if prepend_key and self.resource_key is not None: if patch: body = {self.resource_key: body} else: # Keystone support bunch create for unified limit. So the # request body for creating registered_limit is a list instead # of dict. body = {self.resources_key: [body]} return body ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/role.py0000664000175000017500000000246600000000000021722 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Role(resource.Resource): resource_key = 'role' resources_key = 'roles' base_path = '/roles' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True commit_method = 'PATCH' _query_mapping = resource.QueryParameters('name', 'domain_id') # Properties #: Unique role name, within the owning domain. *Type: string* name = resource.Body('name') #: User-facing description of the role. *Type: string* description = resource.Body('description') #: References the domain ID which owns the role. *Type: string* domain_id = resource.Body('domain_id') #: The links for the service resource. links = resource.Body('links') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/role_assignment.py0000664000175000017500000000343000000000000024142 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class RoleAssignment(resource.Resource): resource_key = 'role_assignment' resources_key = 'role_assignments' base_path = '/role_assignments' # capabilities allow_list = True _query_mapping = resource.QueryParameters( 'group_id', 'role_id', 'scope_domain_id', 'scope_project_id', 'user_id', 'effective', 'include_names', 'include_subtree', role_id='role.id', user_id='user.id', group_id='group.id', scope_project_id='scope.project.id', scope_domain_id='scope.domain.id', scope_system='scope.system', inherited_to='scope.OS-INHERIT:inherited_to', ) # Properties #: The links for the service resource. links = resource.Body('links') #: The role (dictionary contains only id) *Type: dict* role = resource.Body('role', type=dict) #: The scope (either domain or project; dictionary contains only id) *Type: dict* scope = resource.Body('scope', type=dict) #: The user (dictionary contains only id) *Type: dict* user = resource.Body('user', type=dict) #: The group (dictionary contains only id) *Type: dict* group = resource.Body('group', type=dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/role_domain_group_assignment.py0000664000175000017500000000223300000000000026705 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class RoleDomainGroupAssignment(resource.Resource): resource_key = 'role' resources_key = 'roles' base_path = '/domains/%(domain_id)s/groups/%(group_id)s/roles' # capabilities allow_list = True # Properties #: name of the role *Type: string* name = resource.Body('name') #: The links for the service resource. links = resource.Body('links') #: The ID of the domain to list assignment from. *Type: string* domain_id = resource.URI('domain_id') #: The ID of the group to list assignment from. *Type: string* group_id = resource.URI('group_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/role_domain_user_assignment.py0000664000175000017500000000222500000000000026530 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class RoleDomainUserAssignment(resource.Resource): resource_key = 'role' resources_key = 'roles' base_path = '/domains/%(domain_id)s/users/%(user_id)s/roles' # capabilities allow_list = True # Properties #: name of the role *Type: string* name = resource.Body('name') #: The links for the service resource. links = resource.Body('links') #: The ID of the domain to list assignment from. *Type: string* domain_id = resource.URI('domain_id') #: The ID of the user to list assignment from. *Type: string* user_id = resource.URI('user_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/role_project_group_assignment.py0000664000175000017500000000224100000000000027103 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class RoleProjectGroupAssignment(resource.Resource): resource_key = 'role' resources_key = 'roles' base_path = '/projects/%(project_id)s/groups/%(group_id)s/roles' # capabilities allow_list = True # Properties #: name of the role *Type: string* name = resource.Body('name') #: The links for the service resource. links = resource.Body('links') #: The ID of the project to list assignment from. *Type: string* project_id = resource.URI('project_id') #: The ID of the group to list assignment from. *Type: string* group_id = resource.URI('group_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/role_project_user_assignment.py0000664000175000017500000000223300000000000026726 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class RoleProjectUserAssignment(resource.Resource): resource_key = 'role' resources_key = 'roles' base_path = '/projects/%(project_id)s/users/%(user_id)s/roles' # capabilities allow_list = True # Properties #: name of the role *Type: string* name = resource.Body('name') #: The links for the service resource. links = resource.Body('links') #: The ID of the project to list assignment from. *Type: string* project_id = resource.URI('project_id') #: The ID of the user to list assignment from. *Type: string* user_id = resource.URI('user_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/role_system_group_assignment.py0000664000175000017500000000177000000000000026767 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class RoleSystemGroupAssignment(resource.Resource): resource_key = 'role' resources_key = 'roles' base_path = '/system/groups/%(group_id)s/roles' # capabilities allow_list = True # Properties #: The ID of the group to list assignment from. *Type: string* group_id = resource.URI('group_id') #: The name of the system to list assignment from. *Type: string* system_id = resource.URI('system_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/role_system_user_assignment.py0000664000175000017500000000176200000000000026612 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class RoleSystemUserAssignment(resource.Resource): resource_key = 'role' resources_key = 'roles' base_path = '/system/users/%(user_id)s/roles' # capabilities allow_list = True # Properties #: The name of the system to list assignment from. *Type: string* system_id = resource.URI('system_id') #: The ID of the user to list assignment from. *Type: string* user_id = resource.URI('user_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/service.py0000664000175000017500000000340600000000000022414 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Service(resource.Resource): resource_key = 'service' resources_key = 'services' base_path = '/services' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True commit_method = 'PATCH' _query_mapping = resource.QueryParameters( 'name', 'type', ) # Properties #: User-facing description of the service. *Type: string* description = resource.Body('description') #: Setting this value to ``False`` prevents the service and #: its endpoints from appearing in the service catalog. *Type: bool* is_enabled = resource.Body('enabled', type=bool) #: The links for the service resource. links = resource.Body('links') #: User-facing name of the service. *Type: string* name = resource.Body('name') #: Describes the API implemented by the service. The following values are #: recognized within the OpenStack ecosystem: ``compute``, ``image``, #: ``ec2``, ``identity``, ``volume``, ``network``. To support non-core and #: future projects, the value should not be validated against this list. #: *Type: string* type = resource.Body('type') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/service_provider.py0000664000175000017500000000315000000000000024322 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ServiceProvider(resource.Resource): resource_key = 'service_provider' resources_key = 'service_providers' base_path = '/OS-FEDERATION/service_providers' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True create_method = 'PUT' create_exclude_id_from_body = True commit_method = 'PATCH' _query_mapping = resource.QueryParameters( 'id', is_enabled='enabled', ) # Properties #: The URL to authenticate against. auth_url = resource.Body('auth_url') #: A description of this service provider. description = resource.Body('description') #: If the service provider is currently enabled. is_enabled = resource.Body('enabled', type=bool) #: The identifier of the service provider. name = resource.Body('id') #: The prefix of the RelayState SAML attribute. relay_state_prefix = resource.Body('relay_state_prefix') #: The service provider's URL. sp_url = resource.Body('sp_url') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/system.py0000664000175000017500000000532300000000000022300 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource from openstack import utils class System(resource.Resource): resource_key = 'system' base_path = '/system' # capabilities def assign_role_to_user(self, session, user, role): """Assign role to user on system""" url = utils.urljoin(self.base_path, 'users', user.id, 'roles', role.id) resp = session.put( url, ) if resp.status_code == 204: return True return False def validate_user_has_role(self, session, user, role): """Validates that a user has a role on a system""" url = utils.urljoin(self.base_path, 'users', user.id, 'roles', role.id) resp = session.head( url, ) if resp.status_code == 204: return True return False def unassign_role_from_user(self, session, user, role): """Unassigns a role from a user on a system""" url = utils.urljoin(self.base_path, 'users', user.id, 'roles', role.id) resp = session.delete( url, ) if resp.status_code == 204: return True return False def assign_role_to_group(self, session, group, role): """Assign role to group on system""" url = utils.urljoin( self.base_path, 'groups', group.id, 'roles', role.id ) resp = session.put( url, ) if resp.status_code == 204: return True return False def validate_group_has_role(self, session, group, role): """Validates that a group has a role on a system""" url = utils.urljoin( self.base_path, 'groups', group.id, 'roles', role.id ) resp = session.head( url, ) if resp.status_code == 204: return True return False def unassign_role_from_group(self, session, group, role): """Unassigns a role from a group on a system""" url = utils.urljoin( self.base_path, 'groups', group.id, 'roles', role.id ) resp = session.delete( url, ) if resp.status_code == 204: return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/trust.py0000664000175000017500000000640200000000000022134 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Trust(resource.Resource): resource_key = 'trust' resources_key = 'trusts' base_path = '/OS-TRUST/trusts' # capabilities allow_create = True allow_fetch = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'trustor_user_id', 'trustee_user_id' ) # Properties #: A boolean indicating whether the trust can be issued by the trustee as #: a regulart trust. Default is ``False``. allow_redelegation = resource.Body('allow_redelegation', type=bool) #: Specifies the expiration time of the trust. A trust may be revoked #: ahead of expiration. If the value represents a time in the past, #: the trust is deactivated. expires_at = resource.Body('expires_at') #: If ``impersonation`` is set to true, then the ``user`` attribute #: of tokens that are generated based on the trust will represent #: that of the trustor rather than the trustee, thus allowing the trustee #: to impersonate the trustor. #: If ``impersonation`` is set to ``False``, then the token's ``user`` #: attribute will represent that of the trustee. *Type: bool* is_impersonation = resource.Body('impersonation', type=bool) #: Links for the trust resource. links = resource.Body('links') #: ID of the project upon which the trustor is #: delegating authorization. *Type: string* project_id = resource.Body('project_id') #: A role links object that includes 'next', 'previous', and self links #: for roles. role_links = resource.Body('role_links') #: Specifies the subset of the trustor's roles on the ``project_id`` #: to be granted to the trustee when the token in consumed. The #: trustor must already be granted these roles in the project referenced #: by the ``project_id`` attribute. *Type: list* roles = resource.Body('roles') #: Returned with redelegated trust provides information about the #: predecessor in the trust chain. redelegated_trust_id = resource.Body('redelegated_trust_id') #: Redelegation count redelegation_count = resource.Body('redelegation_count') #: How many times the trust can be used to obtain a token. The value is #: decreased each time a token is issued through the trust. Once it #: reaches zero, no further tokens will be isued through the trust. remaining_uses = resource.Body('remaining_uses') #: Represents the user ID who is capable of consuming the trust. #: *Type: string* trustee_user_id = resource.Body('trustee_user_id') #: Represents the user ID who created the trust, and who's authorization is #: being delegated. *Type: string* trustor_user_id = resource.Body('trustor_user_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/v3/user.py0000664000175000017500000000576100000000000021740 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class User(resource.Resource): resource_key = 'user' resources_key = 'users' base_path = '/users' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True commit_method = 'PATCH' _query_mapping = resource.QueryParameters( 'domain_id', 'name', 'password_expires_at', is_enabled='enabled', ) # Properties #: References the user's default project ID against which to authorize, #: if the API user does not explicitly specify one when creating a token. #: Setting this attribute does not grant any actual authorization on the #: project, and is merely provided for the user's convenience. #: Therefore, the referenced project does not need to exist within the #: user's domain. #: #: *New in version 3.1* If the user does not have authorization to #: their default project, the default project will be ignored at token #: creation. *Type: string* default_project_id = resource.Body('default_project_id') #: The description of this user. *Type: string* description = resource.Body('description') #: References the domain ID which owns the user; if a domain ID is not #: specified by the client, the Identity service implementation will #: default it to the domain ID to which the client's token is scoped. #: *Type: string* domain_id = resource.Body('domain_id') #: The email of this user. *Type: string* email = resource.Body('email') #: Setting this value to ``False`` prevents the user from authenticating or #: receiving authorization. Additionally, all pre-existing tokens held by #: the user are immediately invalidated. Re-enabling a user does not #: re-enable pre-existing tokens. *Type: bool* is_enabled = resource.Body('enabled', type=bool) #: The links for the user resource. links = resource.Body('links') #: Unique user name, within the owning domain. *Type: string* name = resource.Body('name') #: The default form of credential used during authentication. #: *Type: string* password = resource.Body('password') #: The date and time when the password expires. The time zone is UTC. #: A None value means the password never expires. #: This is a response object attribute, not valid for requests. #: *New in version 3.7* password_expires_at = resource.Body('password_expires_at') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/identity/version.py0000664000175000017500000000227000000000000022107 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Version(resource.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' # capabilities allow_list = True # Properties media_types = resource.Body('media-types') status = resource.Body('status') updated = resource.Body('updated') @classmethod def list(cls, session, paginated=False, base_path=None, **params): if base_path is None: base_path = cls.base_path resp = session.get(base_path, params=params) resp = resp.json() for data in resp[cls.resources_key]['values']: yield cls.existing(**data) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2733269 openstacksdk-4.0.0/openstack/image/0000775000175000017500000000000000000000000017300 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/__init__.py0000664000175000017500000000000000000000000021377 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/_download.py0000664000175000017500000000676200000000000021633 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io from openstack import exceptions from openstack import resource from openstack import utils def _verify_checksum(md5, checksum): if checksum: digest = md5.hexdigest() if digest != checksum: raise exceptions.InvalidResponse( f"checksum mismatch: {checksum} != {digest}" ) class DownloadMixin: id: resource.Body base_path: str def fetch( self, session, requires_id=True, base_path=None, error_message=None, skip_cache=False, *, resource_response_key=None, microversion=None, **params, ): ... def download( self, session, stream=False, output=None, chunk_size=1024 * 1024, ): """Download the data contained in an image""" # TODO(briancurtin): This method should probably offload the get # operation into another thread or something of that nature. url = utils.urljoin(self.base_path, self.id, 'file') resp = session.get(url, stream=stream) # See the following bug report for details on why the checksum # code may sometimes depend on a second GET call. # https://storyboard.openstack.org/#!/story/1619675 checksum = resp.headers.get("Content-MD5") if checksum is None: # If we don't receive the Content-MD5 header with the download, # make an additional call to get the image details and look at # the checksum attribute. details = self.fetch(session) checksum = details.checksum md5 = utils.md5(usedforsecurity=False) if output: try: if isinstance(output, io.IOBase): for chunk in resp.iter_content(chunk_size=chunk_size): output.write(chunk) md5.update(chunk) else: with open(output, 'wb') as fd: for chunk in resp.iter_content(chunk_size=chunk_size): fd.write(chunk) md5.update(chunk) _verify_checksum(md5, checksum) return resp except Exception as e: raise exceptions.SDKException( "Unable to download image: %s" % e ) # if we are returning the repsonse object, ensure that it # has the content-md5 header so that the caller doesn't # need to jump through the same hoops through which we # just jumped. if stream: resp.headers['content-md5'] = checksum return resp if checksum is not None: _verify_checksum( utils.md5(resp.content, usedforsecurity=False), checksum ) else: session.log.warning( "Unable to verify the integrity of image %s", (self.id) ) return resp ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/image_service.py0000664000175000017500000000155300000000000022460 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image.v1 import _proxy as _proxy_v1 from openstack.image.v2 import _proxy as _proxy_v2 from openstack import service_description class ImageService(service_description.ServiceDescription): """The image service.""" supported_versions = { '1': _proxy_v1.Proxy, '2': _proxy_v2.Proxy, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/image_signer.py0000664000175000017500000000507400000000000022311 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives.asymmetric import utils from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives import serialization from openstack import exceptions from openstack.image.iterable_chunked_file import IterableChunkedFile HASH_METHODS = { 'SHA-224': hashes.SHA224(), 'SHA-256': hashes.SHA256(), 'SHA-384': hashes.SHA384(), 'SHA-512': hashes.SHA512(), } class ImageSigner: """Image file signature generator. Generates signatures for files using a specified private key file. """ def __init__(self, hash_method='SHA-256', padding_method='RSA-PSS'): padding_types = { 'RSA-PSS': padding.PSS( mgf=padding.MGF1(HASH_METHODS[hash_method]), salt_length=padding.PSS.MAX_LENGTH, ) } # informational attributes self.hash_method = hash_method self.padding_method = padding_method # runtime objects self.private_key = None self.hash = HASH_METHODS[hash_method] self.hasher = hashes.Hash(self.hash, default_backend()) self.padding = padding_types[padding_method] def load_private_key(self, file_path, password=None): with open(file_path, 'rb') as key_file: self.private_key = serialization.load_pem_private_key( key_file.read(), password=password, backend=default_backend() ) def generate_signature(self, file_obj): if not self.private_key: raise exceptions.SDKException("private_key not set") file_obj.seek(0) chunked_file = IterableChunkedFile(file_obj) for chunk in chunked_file: self.hasher.update(chunk) file_obj.seek(0) digest = self.hasher.finalize() signature = self.private_key.sign( digest, self.padding, utils.Prehashed(self.hash) ) return signature ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/iterable_chunked_file.py0000664000175000017500000000251300000000000024142 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # class IterableChunkedFile: """File object chunk iterator using yield. Represents a local file as an iterable object by splitting the file into chunks. Avoids the file from being completely loaded into memory. """ def __init__(self, file_object, chunk_size=1024 * 1024 * 128, close=False): self.close_after_read = close self.file_object = file_object self.chunk_size = chunk_size def __iter__(self): try: while True: data = self.file_object.read(self.chunk_size) if not data: break yield data finally: if self.close_after_read: self.file_object.close() def __len__(self): return len(self.file_object) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2773287 openstacksdk-4.0.0/openstack/image/v1/0000775000175000017500000000000000000000000017626 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/v1/__init__.py0000664000175000017500000000000000000000000021725 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/v1/_proxy.py0000664000175000017500000004265200000000000021531 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import warnings from openstack import exceptions as exc from openstack.image.v1 import image as _image from openstack import proxy from openstack import utils from openstack import warnings as os_warnings def _get_name_and_filename(name, image_format): # See if name points to an existing file if os.path.exists(name): # Neat. Easy enough return os.path.splitext(os.path.basename(name))[0], name # Try appending the disk format name_with_ext = '.'.join((name, image_format)) if os.path.exists(name_with_ext): return os.path.basename(name), name_with_ext return name, None class Proxy(proxy.Proxy): retriable_status_codes = [503] _IMAGE_MD5_KEY = 'owner_specified.openstack.md5' _IMAGE_SHA256_KEY = 'owner_specified.openstack.sha256' _IMAGE_OBJECT_KEY = 'owner_specified.openstack.object' # NOTE(shade) shade keys were owner_specified.shade.md5 - we need to add # those to freshness checks so that a shade->sdk transition # doesn't result in a re-upload _SHADE_IMAGE_MD5_KEY = 'owner_specified.shade.md5' _SHADE_IMAGE_SHA256_KEY = 'owner_specified.shade.sha256' _SHADE_IMAGE_OBJECT_KEY = 'owner_specified.shade.object' # ====== IMAGES ====== def create_image( self, name, filename=None, container=None, md5=None, sha256=None, disk_format=None, container_format=None, disable_vendor_agent=True, allow_duplicates=False, meta=None, data=None, validate_checksum=False, tags=None, **kwargs, ): """Create an image and optionally upload data. Create a new image. If ``filename`` or ``data`` are provided, it will also upload data to this image. :param str name: Name of the image to create. If it is a path name of an image, the name will be constructed from the extensionless basename of the path. :param str filename: The path to the file to upload, if needed. (optional, defaults to None) :param data: Image data (string or file-like object). It is mutually exclusive with filename :param str container: Name of the container in swift where images should be uploaded for import if the cloud requires such a thing. (optional, defaults to 'images') :param str md5: md5 sum of the image file. If not given, an md5 will be calculated. :param str sha256: sha256 sum of the image file. If not given, an md5 will be calculated. :param str disk_format: The disk format the image is in. (optional, defaults to the os-client-config config value for this cloud) :param str container_format: The container format the image is in. (optional, defaults to the os-client-config config value for this cloud) :param list tags: List of tags for this image. Each tag is a string of at most 255 chars. :param bool disable_vendor_agent: Whether or not to append metadata flags to the image to inform the cloud in question to not expect a vendor agent to be runing. (optional, defaults to True) :param allow_duplicates: If true, skips checks that enforce unique image name. (optional, defaults to False) :param meta: A dict of key/value pairs to use for metadata that bypasses automatic type conversion. :param bool validate_checksum: If true and cloud returns checksum, compares return value with the one calculated or passed into this call. If value does not match - raises exception. Default is 'false' Additional kwargs will be passed to the image creation as additional metadata for the image and will have all values converted to string except for min_disk, min_ram, size and virtual_size which will be converted to int. If you are sure you have all of your data types correct or have an advanced need to be explicit, use meta. If you are just a normal consumer, using kwargs is likely the right choice. If a value is in meta and kwargs, meta wins. :returns: The results of image creation :rtype: :class:`~openstack.image.v1.image.Image` :raises: SDKException if there are problems uploading """ # these were previously provided for API (method) compatibility; that # was a bad idea if ( 'use_import' in kwargs or 'stores' in kwargs or 'all_stores' in kwargs or 'all_stores_must_succeed' in kwargs ): raise exc.InvalidRequest( "Glance v1 does not support stores or image import" ) # silently ignore these; they were never supported and were only given # for API (method) compatibility kwargs.pop('wait') kwargs.pop('timeout') if container is None: container = self._connection._OBJECT_AUTOCREATE_CONTAINER if not meta: meta = {} if not disk_format: disk_format = self._connection.config.config['image_format'] if not container_format: # https://docs.openstack.org/image-guide/image-formats.html container_format = 'bare' if data and filename: raise exc.SDKException( 'Passing filename and data simultaneously is not supported' ) # If there is no filename, see if name is actually the filename if not filename and not data: name, filename = _get_name_and_filename( name, self._connection.config.config['image_format'], ) if validate_checksum and data and not isinstance(data, bytes): raise exc.SDKException( 'Validating checksum is not possible when data is not a ' 'direct binary object' ) if not (md5 or sha256) and validate_checksum: if filename: md5, sha256 = utils._get_file_hashes(filename) elif data and isinstance(data, bytes): md5, sha256 = utils._calculate_data_hashes(data) if allow_duplicates: current_image = None else: current_image = self.find_image(name) if current_image: # NOTE(pas-ha) 'properties' may be absent or be None props = current_image.get('properties') or {} md5_key = props.get( self._IMAGE_MD5_KEY, props.get(self._SHADE_IMAGE_MD5_KEY, ''), ) sha256_key = props.get( self._IMAGE_SHA256_KEY, props.get(self._SHADE_IMAGE_SHA256_KEY, ''), ) up_to_date = utils._hashes_up_to_date( md5=md5, sha256=sha256, md5_key=md5_key, sha256_key=sha256_key, ) if up_to_date: self.log.debug( "image %(name)s exists and is up to date", {'name': name}, ) return current_image else: self.log.debug( "image %(name)s exists, but contains different " "checksums. Updating.", {'name': name}, ) if disable_vendor_agent: kwargs.update( self._connection.config.config['disable_vendor_agent'] ) # If a user used the v1 calling format, they will have # passed a dict called properties along properties = kwargs.pop('properties', {}) properties[self._IMAGE_MD5_KEY] = md5 or '' properties[self._IMAGE_SHA256_KEY] = sha256 or '' properties[self._IMAGE_OBJECT_KEY] = '/'.join([container, name]) kwargs.update(properties) image_kwargs = {'properties': kwargs} if disk_format: image_kwargs['disk_format'] = disk_format if container_format: image_kwargs['container_format'] = container_format if tags: image_kwargs['tags'] = tags if filename or data: image = self._upload_image( name, filename=filename, data=data, meta=meta, validate_checksum=validate_checksum, **image_kwargs, ) else: image = self._create(_image.Image, name=name, **kwargs) return image def upload_image(self, **attrs): """Upload a new image from attributes .. warning: This method is deprecated - and also doesn't work very well. Please stop using it immediately and switch to `create_image`. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.image.v1.image.Image`, comprised of the properties on the Image class. :returns: The results of image creation :rtype: :class:`~openstack.image.v1.image.Image` """ warnings.warn( "upload_image is deprecated. Use create_image instead.", os_warnings.OpenStackDeprecationWarning, ) return self._create(_image.Image, **attrs) def _upload_image( self, name, filename, data, meta, **image_kwargs, ): if filename and not data: image_data = open(filename, 'rb') else: image_data = data image_kwargs['properties'].update(meta) image_kwargs['name'] = name # TODO(mordred) Convert this to use image Resource image = self._connection._get_and_munchify( 'image', self.post('/images', json=image_kwargs) ) checksum = image_kwargs['properties'].get(self._IMAGE_MD5_KEY, '') try: # Let us all take a brief moment to be grateful that this # is not actually how OpenStack APIs work anymore headers = { 'x-glance-registry-purge-props': 'false', } if checksum: headers['x-image-meta-checksum'] = checksum image = self._connection._get_and_munchify( 'image', self.put( f'/images/{image.id}', headers=headers, data=image_data, ), ) except exc.HttpException: self.log.debug("Deleting failed upload of image %s", name) try: self.delete(f'/images/{image.id}') except exc.HttpException: # We're just trying to clean up - if it doesn't work - shrug self.log.warning( "Failed deleting image after we failed uploading it.", exc_info=True, ) raise return image def _existing_image(self, **kwargs): return _image.Image.existing(connection=self._connection, **kwargs) def delete_image(self, image, ignore_missing=True): """Delete an image :param image: The value can be either the ID of an image or a :class:`~openstack.image.v1.image.Image` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the image does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent image. :returns: ``None`` """ self._delete(_image.Image, image, ignore_missing=ignore_missing) def find_image(self, name_or_id, ignore_missing=True): """Find a single image :param name_or_id: The name or ID of a image. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.image.v1.image.Image` or None """ return self._find( _image.Image, name_or_id, ignore_missing=ignore_missing ) def get_image(self, image): """Get a single image :param image: The value can be the ID of an image or a :class:`~openstack.image.v1.image.Image` instance. :returns: One :class:`~openstack.image.v1.image.Image` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_image.Image, image) def images(self, **query): """Return a generator of images :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of image objects :rtype: :class:`~openstack.image.v1.image.Image` """ return self._list(_image.Image, base_path='/images/detail', **query) def update_image(self, image, **attrs): """Update a image :param image: Either the ID of a image or a :class:`~openstack.image.v1.image.Image` instance. :param attrs: The attributes to update on the image represented by ``image``. :returns: The updated image :rtype: :class:`~openstack.image.v1.image.Image` """ return self._update(_image.Image, image, **attrs) def download_image( self, image, stream=False, output=None, chunk_size=1024 * 1024, ): """Download an image This will download an image to memory when ``stream=False``, or allow streaming downloads using an iterator when ``stream=True``. For examples of working with streamed responses, see :ref:`download_image-stream-true`. :param image: The value can be either the ID of an image or a :class:`~openstack.image.v2.image.Image` instance. :param bool stream: When ``True``, return a :class:`requests.Response` instance allowing you to iterate over the response data stream instead of storing its entire contents in memory. See :meth:`requests.Response.iter_content` for more details. *NOTE*: If you do not consume the entirety of the response you must explicitly call :meth:`requests.Response.close` or otherwise risk inefficiencies with the ``requests`` library's handling of connections. When ``False``, return the entire contents of the response. :param output: Either a file object or a path to store data into. :param int chunk_size: size in bytes to read from the wire and buffer at one time. Defaults to 1024 * 1024 = 1 MiB :returns: When output is not given - the bytes comprising the given Image when stream is False, otherwise a :class:`requests.Response` instance. When output is given - a :class:`~openstack.image.v2.image.Image` instance. """ image = self._get_resource(_image.Image, image) return image.download( self, stream=stream, output=output, chunk_size=chunk_size, ) def _update_image_properties(self, image, meta, properties): properties.update(meta) img_props = {} for k, v in iter(properties.items()): if image.properties.get(k, None) != v: img_props[f'x-image-meta-{k}'] = v if not img_props: return False self.put(f'/images/{image.id}', headers=img_props) return True def update_image_properties( self, image=None, meta=None, **kwargs, ): """ Update the properties of an existing image. :param image: Name or id of an image or an Image object. :param meta: A dict of key/value pairs to use for metadata that bypasses automatic type conversion. Additional kwargs will be passed to the image creation as additional metadata for the image and will have all values converted to string except for min_disk, min_ram, size and virtual_size which will be converted to int. """ if isinstance(image, str): image = self._connection.get_image(image) if not meta: meta = {} img_props = {} for k, v in iter(kwargs.items()): if v and k in ['ramdisk', 'kernel']: v = self._connection.get_image_id(v) k = f'{k}_id' img_props[k] = v return self._update_image_properties(image, meta, img_props) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/v1/image.py0000664000175000017500000001330100000000000021260 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack.image import _download from openstack import resource class Image(resource.Resource, _download.DownloadMixin): resource_key = 'image' resources_key = 'images' base_path = '/images' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Store all unknown attributes under 'properties' in the object. # Remotely they would be still in the resource root _store_unknown_attrs_as_properties = True _query_mapping = resource.QueryParameters( 'name', 'container_format', 'disk_format', 'status', 'size_min', 'size_max', ) #: Hash of the image data used. The Image service uses this value #: for verification. checksum = resource.Body('checksum') #: The container format refers to whether the VM image is in a file #: format that also contains metadata about the actual VM. #: Container formats include OVF and Amazon AMI. In addition, #: a VM image might not have a container format - instead, #: the image is just a blob of unstructured data. container_format = resource.Body('container_format') #: A URL to copy an image from copy_from = resource.Body('copy_from') #: The timestamp when this image was created. created_at = resource.Body('created_at') #: Valid values are: aki, ari, ami, raw, iso, vhd, vdi, qcow2, or vmdk. #: The disk format of a VM image is the format of the underlying #: disk image. Virtual appliance vendors have different formats for #: laying out the information contained in a VM disk image. disk_format = resource.Body('disk_format') #: Defines whether the image can be deleted. #: *Type: bool* is_protected = resource.Body('protected', type=bool) #: ``True`` if this is a public image. #: *Type: bool* is_public = resource.Body('is_public', type=bool) #: A location for the image identified by a URI location = resource.Body('location') #: The minimum disk size in GB that is required to boot the image. min_disk = resource.Body('min_disk') #: The minimum amount of RAM in MB that is required to boot the image. min_ram = resource.Body('min_ram') #: Name for the image. Note that the name of an image is not unique #: to a Glance node. The API cannot expect users to know the names #: of images owned by others. name = resource.Body('name') #: The ID of the owner, or project, of the image. owner = resource.Body('owner', alias='owner_id') #: The ID of the owner, or project, of the image. (backwards compat) owner_id = resource.Body('owner', alias='owner') #: Properties, if any, that are associated with the image. properties = resource.Body('properties') #: The size of the image data, in bytes. size = resource.Body('size') #: The image status. status = resource.Body('status') #: The timestamp when this image was last updated. updated_at = resource.Body('updated_at') @classmethod def find(cls, session, name_or_id, ignore_missing=True, **params): """Find a resource by its name or id. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param name_or_id: This resource's identifier, if needed by the request. The default is ``None``. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict params: Any additional parameters to be passed into underlying methods, such as to :meth:`~openstack.resource.Resource.existing` in order to pass on URI parameters. :return: The :class:`Resource` object matching the given name or id or None if nothing matches. :raises: :class:`openstack.exceptions.DuplicateResource` if more than one resource is found for this request. :raises: :class:`openstack.exceptions.NotFoundException` if nothing is found and ignore_missing is ``False``. """ session = cls._get_session(session) # Try to short-circuit by looking directly for a matching ID. try: match = cls.existing( id=name_or_id, connection=session._get_connection(), **params, ) return match.fetch(session, **params) except exceptions.NotFoundException: pass params['name'] = name_or_id data = cls.list(session, base_path='/images/detail', **params) result = cls._get_one_match(name_or_id, data) if result is not None: return result if ignore_missing: return None raise exceptions.NotFoundException( f"No {cls.__name__} found for {name_or_id}" ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2813306 openstacksdk-4.0.0/openstack/image/v2/0000775000175000017500000000000000000000000017627 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/v2/__init__.py0000664000175000017500000000000000000000000021726 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/v2/_proxy.py0000664000175000017500000022300000000000000021516 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import time import typing as ty import warnings from openstack import exceptions from openstack.image.v2 import cache as _cache from openstack.image.v2 import image as _image from openstack.image.v2 import member as _member from openstack.image.v2 import metadef_namespace as _metadef_namespace from openstack.image.v2 import metadef_object as _metadef_object from openstack.image.v2 import metadef_property as _metadef_property from openstack.image.v2 import metadef_resource_type as _metadef_resource_type from openstack.image.v2 import metadef_schema as _metadef_schema from openstack.image.v2 import schema as _schema from openstack.image.v2 import service_info as _si from openstack.image.v2 import task as _task from openstack import proxy from openstack import resource from openstack import utils from openstack import warnings as os_warnings # Rackspace returns this for intermittent import errors _IMAGE_ERROR_396 = "Image cannot be imported. Error code: '396'" _INT_PROPERTIES = ('min_disk', 'min_ram', 'size', 'virtual_size') _RAW_PROPERTIES = ('is_protected', 'protected', 'tags') def _get_name_and_filename(name, image_format): # See if name points to an existing file if os.path.exists(name) and os.path.isfile(name): # Neat. Easy enough return os.path.splitext(os.path.basename(name))[0], name # Try appending the disk format name_with_ext = '.'.join((name, image_format)) if os.path.exists(name_with_ext) and os.path.isfile(name): return os.path.basename(name), name_with_ext return name, None class Proxy(proxy.Proxy): _resource_registry = { "cache": _cache.Cache, "image": _image.Image, "image_member": _member.Member, "metadef_namespace": _metadef_namespace.MetadefNamespace, "metadef_resource_type": _metadef_resource_type.MetadefResourceType, "metadef_resource_type_association": _metadef_resource_type.MetadefResourceTypeAssociation, # noqa "schema": _schema.Schema, "info_import": _si.Import, "info_store": _si.Store, "task": _task.Task, } retriable_status_codes = [503] _IMAGE_MD5_KEY = 'owner_specified.openstack.md5' _IMAGE_SHA256_KEY = 'owner_specified.openstack.sha256' _IMAGE_OBJECT_KEY = 'owner_specified.openstack.object' # NOTE(shade) shade keys were owner_specified.shade.md5 - we need to add # those to freshness checks so that a shade->sdk transition # doesn't result in a re-upload _SHADE_IMAGE_MD5_KEY = 'owner_specified.shade.md5' _SHADE_IMAGE_SHA256_KEY = 'owner_specified.shade.sha256' _SHADE_IMAGE_OBJECT_KEY = 'owner_specified.shade.object' # ====== CACHE MANAGEMENT====== def get_image_cache(self): return self._get(_cache.Cache, requires_id=False) def cache_delete_image(self, image, ignore_missing=True): """Delete an image from cache. :param image: The value can be either the name of an image or a :class:`~openstack.image.v2.image.Image` instance. :param bool ignore_missing: When set to ``False``, :class:`~openstack.exceptions.NotFoundException` will be raised when the metadef namespace does not exist. :returns: ``None`` """ return self._delete(_cache.Cache, image, ignore_missing=ignore_missing) def queue_image(self, image_id): """Queue image(s) for caching.""" cache = self._get_resource(_cache.Cache, None) return cache.queue(self, image_id) def clear_cache(self, target='both'): """Clear all images from cache, queue or both :param target: Specify which target you want to clear One of: ``both``(default), ``cache``, ``queue``. """ cache = self._get_resource(_cache.Cache, None) return cache.clear(self, target) # ====== IMAGES ====== def create_image( self, name, *, filename=None, data=None, container=None, md5=None, sha256=None, disk_format=None, container_format=None, tags=None, disable_vendor_agent=True, allow_duplicates=False, meta=None, wait=False, timeout=3600, validate_checksum=False, use_import=False, stores=None, all_stores=None, all_stores_must_succeed=None, **kwargs, ): """Create an image and optionally upload data Create a new image. If ``filename`` or ``data`` are provided, it will also upload data to this image. Note that uploading image data is actually quite a complicated procedure. There are three ways to upload an image: * Image upload * Image import * Image tasks If the image tasks API is enabled, this must be used. However, this API is deprecated since the Image service's Mitaka (12.0.0) release and is now admin-only. Assuming this API is not enabled, you may choose between image upload or image import. Image import is more powerful and allows you to upload data from multiple sources including other glance instances. It should be preferred on all services that support it. :param str name: Name of the image to create. If it is a pathname of an image, the name will be constructed from the extensionless basename of the path. :param str filename: The path to the file to upload, if needed. (optional, defaults to None) :param data: Image data (string or file-like object). It is mutually exclusive with filename :param str container: Name of the container in swift where images should be uploaded for import if the cloud requires such a thing. (optional, defaults to 'images') :param str md5: md5 sum of the image file. If not given, an md5 will be calculated. :param str sha256: sha256 sum of the image file. If not given, an md5 will be calculated. :param str disk_format: The disk format the image is in. (optional, defaults to the os-client-config config value for this cloud) :param str container_format: The container format the image is in. (optional, defaults to the os-client-config config value for this cloud) :param list tags: List of tags for this image. Each tag is a string of at most 255 chars. :param bool disable_vendor_agent: Whether or not to append metadata flags to the image to inform the cloud in question to not expect a vendor agent to be runing. (optional, defaults to True) :param allow_duplicates: If true, skips checks that enforce unique image name. (optional, defaults to False) :param meta: A dict of key/value pairs to use for metadata that bypasses automatic type conversion. :param bool wait: If true, waits for image to be created. Defaults to true - however, be aware that one of the upload methods is always synchronous. :param timeout: Seconds to wait for image creation. None is forever. :param bool validate_checksum: If true and cloud returns checksum, compares return value with the one calculated or passed into this call. If value does not match - raises exception. Default is 'false' :param bool use_import: Use the 'glance-direct' method of the interoperable image import mechanism to import the image. This defaults to false because it is harder on the target cloud so should only be used when needed, such as when the user needs the cloud to transform image format. If the cloud has disabled direct uploads, this will default to true. If you wish to use other import methods, use the ``import_image`` method instead. :param stores: List of stores to be used when enabled_backends is activated in glance. List values can be the id of a store or a :class:`~openstack.image.v2.service_info.Store` instance. Implies ``use_import`` equals ``True``. :param all_stores: Upload to all available stores. Mutually exclusive with ``store`` and ``stores``. Implies ``use_import`` equals ``True``. :param all_stores_must_succeed: When set to True, if an error occurs during the upload in at least one store, the worfklow fails, the data is deleted from stores where copying is done (not staging), and the state of the image is unchanged. When set to False, the workflow will fail (data deleted from stores, …) only if the import fails on all stores specified by the user. In case of a partial success, the locations added to the image will be the stores where the data has been correctly uploaded. Default is True. Implies ``use_import`` equals ``True``. Additional kwargs will be passed to the image creation as additional metadata for the image and will have all values converted to string except for min_disk, min_ram, size and virtual_size which will be converted to int. If you are sure you have all of your data types correct or have an advanced need to be explicit, use meta. If you are just a normal consumer, using kwargs is likely the right choice. If a value is in meta and kwargs, meta wins. :returns: The results of image creation :rtype: :class:`~openstack.image.v2.image.Image` :raises: SDKException if there are problems uploading """ if filename and data: raise exceptions.SDKException( 'filename and data are mutually exclusive' ) if container is None: container = self._connection._OBJECT_AUTOCREATE_CONTAINER if not meta: meta = {} if not disk_format: disk_format = self._connection.config.config['image_format'] if not container_format: # https://docs.openstack.org/image-guide/image-formats.html container_format = 'bare' # If there is no filename, see if name is actually the filename if not filename and not data: name, filename = _get_name_and_filename( name, self._connection.config.config['image_format'], ) if validate_checksum and data and not isinstance(data, bytes): raise exceptions.SDKException( 'Validating checksum is not possible when data is not a ' 'direct binary object' ) if not (md5 or sha256) and validate_checksum: if filename: md5, sha256 = utils._get_file_hashes(filename) elif data and isinstance(data, bytes): md5, sha256 = utils._calculate_data_hashes(data) if allow_duplicates: current_image = None else: current_image = self.find_image(name) if current_image: # NOTE(pas-ha) 'properties' may be absent or be None props = current_image.get('properties') or {} md5_key = props.get( self._IMAGE_MD5_KEY, props.get(self._SHADE_IMAGE_MD5_KEY, ''), ) sha256_key = props.get( self._IMAGE_SHA256_KEY, props.get(self._SHADE_IMAGE_SHA256_KEY, ''), ) up_to_date = utils._hashes_up_to_date( md5=md5, sha256=sha256, md5_key=md5_key, sha256_key=sha256_key, ) if up_to_date: self.log.debug( "image %(name)s exists and is up to date", {'name': name}, ) return current_image else: self.log.debug( "image %(name)s exists, but contains different " "checksums. Updating.", {'name': name}, ) if disable_vendor_agent: kwargs.update( self._connection.config.config['disable_vendor_agent'] ) # If a user used the v1 calling format, they will have # passed a dict called properties along properties = kwargs.pop('properties', {}) properties[self._IMAGE_MD5_KEY] = md5 or '' properties[self._IMAGE_SHA256_KEY] = sha256 or '' properties[self._IMAGE_OBJECT_KEY] = '/'.join([container, name]) kwargs.update(properties) image_kwargs = {'properties': kwargs} if disk_format: image_kwargs['disk_format'] = disk_format if container_format: image_kwargs['container_format'] = container_format if tags: image_kwargs['tags'] = tags if filename or data: image = self._upload_image( name, filename=filename, data=data, meta=meta, wait=wait, timeout=timeout, validate_checksum=validate_checksum, use_import=use_import, stores=stores, all_stores=all_stores, all_stores_must_succeed=all_stores_must_succeed, **image_kwargs, ) else: properties = image_kwargs.pop('properties', {}) image_kwargs.update(self._make_v2_image_params(meta, properties)) image_kwargs['name'] = name image = self._create(_image.Image, **image_kwargs) return image def import_image( self, image, method='glance-direct', *, uri=None, remote_region=None, remote_image_id=None, remote_service_interface=None, store=None, stores=None, all_stores=None, all_stores_must_succeed=None, ): """Import data to an existing image Interoperable image import process are introduced in the Image API v2.6. It mainly allow image importing from an external url and let Image Service download it by itself without sending binary data at image creation. :param image: The value can be the ID of a image or a :class:`~openstack.image.v2.image.Image` instance. :param method: Method to use for importing the image. Not all deployments support all methods. One of: ``glance-direct`` (default), ``web-download``, ``glance-download``, or ``copy-image``. Use of ``glance-direct`` requires the image be first staged. :param uri: Required only if using the ``web-download`` import method. This url is where the data is made available to the Image service. :param remote_region: The remote glance region to download the image from when using glance-download. :param remote_image_id: The ID of the image to import from the remote glance when using glance-download. :param remote_service_interface: The remote glance service interface to use when using glance-download. :param store: Used when enabled_backends is activated in glance. The value can be the id of a store or a. :class:`~openstack.image.v2.service_info.Store` instance. :param stores: List of stores to be used when enabled_backends is activated in glance. List values can be the id of a store or a :class:`~openstack.image.v2.service_info.Store` instance. :param all_stores: Upload to all available stores. Mutually exclusive with ``store`` and ``stores``. :param all_stores_must_succeed: When set to True, if an error occurs during the upload in at least one store, the worfklow fails, the data is deleted from stores where copying is done (not staging), and the state of the image is unchanged. When set to False, the workflow will fail (data deleted from stores, …) only if the import fails on all stores specified by the user. In case of a partial success, the locations added to the image will be the stores where the data has been correctly uploaded. Default is True. :returns: The raw response from the request. """ image = self._get_resource(_image.Image, image) if all_stores and (store or stores): raise exceptions.InvalidRequest( "all_stores is mutually exclusive with store and stores" ) if store is not None: if stores: raise exceptions.InvalidRequest( "store and stores are mutually exclusive" ) store = self._get_resource(_si.Store, store) stores = stores or [] new_stores = [] for s in stores: new_stores.append(self._get_resource(_si.Store, s)) stores = new_stores # as for the standard image upload function, container_format and # disk_format are required for using image import process if not all([image.container_format, image.disk_format]): raise exceptions.InvalidRequest( "Both container_format and disk_format are required for " "importing an image" ) return image.import_image( self, method=method, uri=uri, remote_region=remote_region, remote_image_id=remote_image_id, remote_service_interface=remote_service_interface, store=store, stores=stores, all_stores=all_stores, all_stores_must_succeed=all_stores_must_succeed, ) def stage_image(self, image, *, filename=None, data=None): """Stage binary image data :param image: The value can be the ID of a image or a :class:`~openstack.image.v2.image.Image` instance. :param filename: Optional name of the file to read data from. :param data: Optional data to be uploaded as an image. :returns: The results of image creation :rtype: :class:`~openstack.image.v2.image.Image` """ if filename and data: raise exceptions.SDKException( 'filename and data are mutually exclusive' ) image = self._get_resource(_image.Image, image) if 'queued' != image.status: raise exceptions.SDKException( 'Image stage is only possible for images in the queued state. ' 'Current state is {status}'.format(status=image.status) ) if filename: image.data = open(filename, 'rb') elif data: image.data = data image.stage(self) # Stage does not return content, but updates the object image.fetch(self) return image def upload_image( self, container_format=None, disk_format=None, data=None, **attrs, ): """Create and upload a new image from attributes .. warning: This method is deprecated - and also doesn't work very well. Please stop using it immediately and switch to `create_image`. :param container_format: Format of the container. A valid value is ami, ari, aki, bare, ovf, ova, or docker. :param disk_format: The format of the disk. A valid value is ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, or iso. :param data: The data to be uploaded as an image. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.image.v2.image.Image`, comprised of the properties on the Image class. :returns: The results of image creation :rtype: :class:`~openstack.image.v2.image.Image` """ warnings.warn( "upload_image is deprecated. Use create_image instead.", os_warnings.OpenStackDeprecationWarning, ) # container_format and disk_format are required to be set # on the image by the time upload_image is called, but they're not # required by the _create call. Enforce them here so that we don't # need to handle a failure in _create, as upload_image will # return a 400 with a message about disk_format and container_format # not being set. if not all([container_format, disk_format]): raise exceptions.InvalidRequest( "Both container_format and disk_format are required" ) img = self._create( _image.Image, disk_format=disk_format, container_format=container_format, **attrs, ) # TODO(briancurtin): Perhaps we should run img.upload_image # in a background thread and just return what is called by # self._create, especially because the upload_image call doesn't # return anything anyway. Otherwise this blocks while uploading # significant amounts of image data. img.data = data img.upload(self) return img def _upload_image( self, name, *, filename=None, data=None, meta=None, wait=False, timeout=None, validate_checksum=True, use_import=False, stores=None, all_stores=None, all_stores_must_succeed=None, **kwargs, ): # We can never have nice things. Glance v1 took "is_public" as a # boolean. Glance v2 takes "visibility". If the user gives us # is_public, we know what they mean. If they give us visibility, they # know that they mean. if 'is_public' in kwargs['properties']: is_public = kwargs['properties'].pop('is_public') if is_public: kwargs['visibility'] = 'public' else: kwargs['visibility'] = 'private' try: # This makes me want to die inside if self._connection.image_api_use_tasks: if use_import: raise exceptions.SDKException( "The Glance Task API and Import API are mutually " "exclusive. Either disable image_api_use_tasks in " "config, or do not request using import" ) return self._upload_image_task( name, filename, data=data, meta=meta, wait=wait, timeout=timeout, **kwargs, ) else: return self._upload_image_put( name, filename, data=data, meta=meta, validate_checksum=validate_checksum, use_import=use_import, stores=stores, all_stores=all_stores, all_stores_must_succeed=all_stores_must_succeed, **kwargs, ) except exceptions.SDKException: self.log.debug("Image creation failed", exc_info=True) raise except Exception as e: raise exceptions.SDKException(f"Image creation failed: {str(e)}") def _make_v2_image_params(self, meta, properties): ret: ty.Dict = {} for k, v in iter(properties.items()): if k in _INT_PROPERTIES: ret[k] = int(v) elif k in _RAW_PROPERTIES: ret[k] = v else: if v is None: ret[k] = None else: ret[k] = str(v) ret.update(meta) return ret def _upload_image_put( self, name, filename, data, meta, validate_checksum, use_import=False, stores=None, all_stores=None, all_stores_must_succeed=None, **image_kwargs, ): # use of any of these imply use_import=True if stores or all_stores or all_stores_must_succeed: use_import = True if filename and not data: image_data = open(filename, 'rb') else: image_data = data properties = image_kwargs.pop('properties', {}) image_kwargs.update(self._make_v2_image_params(meta, properties)) image_kwargs['name'] = name image = self._create(_image.Image, **image_kwargs) image.data = image_data supports_import = ( image.image_import_methods and 'glance-direct' in image.image_import_methods ) if use_import and not supports_import: raise exceptions.SDKException( "Importing image was requested but the cloud does not " "support the image import method." ) try: if not use_import: response = image.upload(self) exceptions.raise_from_response(response) if use_import: image.stage(self) image.import_image(self) # image_kwargs are flat here md5 = image_kwargs.get(self._IMAGE_MD5_KEY) sha256 = image_kwargs.get(self._IMAGE_SHA256_KEY) if validate_checksum and (md5 or sha256): # Verify that the hash computed remotely matches the local # value data = image.fetch(self) checksum = data.get('checksum') if checksum: valid = checksum == md5 or checksum == sha256 if not valid: raise Exception('Image checksum verification failed') except Exception: self.log.debug("Deleting failed upload of image %s", name) self.delete_image(image.id) raise return image def _upload_image_task( self, name, filename, data, wait, timeout, meta, **image_kwargs, ): if not self._connection.has_service('object-store'): raise exceptions.SDKException( "The cloud {cloud} is configured to use tasks for image " "upload, but no object-store service is available. " "Aborting.".format(cloud=self._connection.config.name) ) properties = image_kwargs.get('properties', {}) md5 = properties[self._IMAGE_MD5_KEY] sha256 = properties[self._IMAGE_SHA256_KEY] container = properties[self._IMAGE_OBJECT_KEY].split('/', 1)[0] image_kwargs.pop('disk_format', None) image_kwargs.pop('container_format', None) self._connection.create_container(container) self._connection.create_object( container, name, filename, md5=md5, sha256=sha256, data=data, metadata={self._connection._OBJECT_AUTOCREATE_KEY: 'true'}, **{ 'content-type': 'application/octet-stream', 'x-delete-after': str(24 * 60 * 60), }, ) # TODO(mordred): Can we do something similar to what nodepool does # using glance properties to not delete then upload but instead make a # new "good" image and then mark the old one as "bad" task_args = { 'type': 'import', 'input': { 'import_from': f'{container}/{name}', 'image_properties': {'name': name}, }, } glance_task = self.create_task(**task_args) if wait: start = time.time() try: glance_task = self.wait_for_task( task=glance_task, status='success', wait=timeout ) image_id = glance_task.result['image_id'] image = self.get_image(image_id) # NOTE(gtema): Since we might move unknown attributes of # the image under properties - merge current with update # properties not to end up removing "existing" properties props = image.properties.copy() props.update(image_kwargs.pop('properties', {})) image_kwargs['properties'] = props image = self.update_image(image, **image_kwargs) self.log.debug( "Image Task %s imported %s in %s", glance_task.id, image_id, (time.time() - start), ) except exceptions.ResourceFailure as e: glance_task = self.get_task(glance_task) raise exceptions.SDKException( "Image creation failed: {message}".format( message=e.message ), extra_data=glance_task, ) finally: # Clean up after ourselves. The object we created is not # needed after the import is done. self._connection.delete_object(container, name) return image else: return glance_task def _existing_image(self, **kwargs): return _image.Image.existing(connection=self._connection, **kwargs) def download_image( self, image, *, stream=False, output=None, chunk_size=1024 * 1024, ): """Download an image This will download an image to memory when ``stream=False``, or allow streaming downloads using an iterator when ``stream=True``. For examples of working with streamed responses, see :ref:`download_image-stream-true`. :param image: The value can be either the ID of an image or a :class:`~openstack.image.v2.image.Image` instance. :param bool stream: When ``True``, return a :class:`requests.Response` instance allowing you to iterate over the response data stream instead of storing its entire contents in memory. See :meth:`requests.Response.iter_content` for more details. *NOTE*: If you do not consume the entirety of the response you must explicitly call :meth:`requests.Response.close` or otherwise risk inefficiencies with the ``requests`` library's handling of connections. When ``False``, return the entire contents of the response. :param output: Either a file object or a path to store data into. :param int chunk_size: size in bytes to read from the wire and buffer at one time. Defaults to 1024 * 1024 = 1 MiB :returns: When output is not given - the bytes comprising the given Image when stream is False, otherwise a :class:`requests.Response` instance. When output is given - a :class:`~openstack.image.v2.image.Image` instance. """ image = self._get_resource(_image.Image, image) return image.download( self, stream=stream, output=output, chunk_size=chunk_size, ) def delete_image(self, image, *, store=None, ignore_missing=True): """Delete an image :param image: The value can be either the ID of an image or a :class:`~openstack.image.v2.image.Image` instance. :param store: The value can be either the ID of a store or a :class:`~openstack.image.v2.service_info.Store` instance that the image is associated with. If specified, the image will only be deleted from the specified store. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the image does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent image. :returns: ``None`` """ if store: store = self._get_resource(_si.Store, store) store.delete_image(self, image, ignore_missing=ignore_missing) else: self._delete(_image.Image, image, ignore_missing=ignore_missing) def find_image(self, name_or_id, ignore_missing=True): """Find a single image :param name_or_id: The name or ID of a image. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.image.v2.image.Image` or None """ return self._find( _image.Image, name_or_id, ignore_missing=ignore_missing, ) def get_image(self, image): """Get a single image :param image: The value can be the ID of a image or a :class:`~openstack.image.v2.image.Image` instance. :returns: One :class:`~openstack.image.v2.image.Image` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_image.Image, image) def images(self, **query): """Return a generator of images :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of image objects :rtype: :class:`~openstack.image.v2.image.Image` """ return self._list(_image.Image, **query) def update_image(self, image, **attrs): """Update a image :param image: Either the ID of a image or a :class:`~openstack.image.v2.image.Image` instance. :param attrs: The attributes to update on the image represented by ``image``. :returns: The updated image :rtype: :class:`~openstack.image.v2.image.Image` """ return self._update(_image.Image, image, **attrs) def deactivate_image(self, image): """Deactivate an image :param image: Either the ID of a image or a :class:`~openstack.image.v2.image.Image` instance. :returns: None """ image = self._get_resource(_image.Image, image) image.deactivate(self) def reactivate_image(self, image): """Reactivate an image :param image: Either the ID of a image or a :class:`~openstack.image.v2.image.Image` instance. :returns: None """ image = self._get_resource(_image.Image, image) image.reactivate(self) def update_image_properties( self, image=None, meta=None, **kwargs, ): """Update the properties of an existing image :param image: The value can be the ID of a image or a :class:`~openstack.image.v2.image.Image` instance. :param meta: A dict of key/value pairs to use for metadata that bypasses automatic type conversion. Additional kwargs will be passed to the image creation as additional metadata for the image and will have all values converted to string except for min_disk, min_ram, size and virtual_size which will be converted to int. """ image = self._get_resource(_image.Image, image) if not meta: meta = {} properties = {} for k, v in iter(kwargs.items()): if v and k in ['ramdisk', 'kernel']: v = self._connection.get_image_id(v) k = f'{k}_id' properties[k] = v img_props = image.properties.copy() for k, v in iter(self._make_v2_image_params(meta, properties).items()): if image.get(k, None) != v: img_props[k] = v if not img_props: return False self.update_image(image, **img_props) return True def add_tag(self, image, tag): """Add a tag to an image :param image: The value can be the ID of a image or a :class:`~openstack.image.v2.image.Image` instance that the member will be created for. :param str tag: The tag to be added :returns: None """ image = self._get_resource(_image.Image, image) image.add_tag(self, tag) def remove_tag(self, image, tag): """Remove a tag to an image :param image: The value can be the ID of a image or a :class:`~openstack.image.v2.image.Image` instance that the member will be created for. :param str tag: The tag to be removed :returns: None """ image = self._get_resource(_image.Image, image) image.remove_tag(self, tag) # ====== IMAGE MEMBERS ====== def add_member(self, image, **attrs): """Create a new member from attributes :param image: The value can be the ID of a image or a :class:`~openstack.image.v2.image.Image` instance that the member will be created for. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.image.v2.member.Member`, comprised of the properties on the Member class. See `Image Sharing Reference `__ for details. :returns: The results of member creation :rtype: :class:`~openstack.image.v2.member.Member` """ image_id = resource.Resource._get_id(image) return self._create(_member.Member, image_id=image_id, **attrs) def remove_member(self, member, image=None, ignore_missing=True): """Delete a member :param member: The value can be either the ID of a member or a :class:`~openstack.image.v2.member.Member` instance. :param image: The value can be either the ID of an image or a :class:`~openstack.image.v2.image.Image` instance that the member is part of. This is required if ``member`` is an ID. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the member does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent member. :returns: ``None`` """ image_id = resource.Resource._get_id(image) member_id = resource.Resource._get_id(member) self._delete( _member.Member, member_id=member_id, image_id=image_id, ignore_missing=ignore_missing, ) def find_member(self, name_or_id, image, ignore_missing=True): """Find a single member :param name_or_id: The name or ID of a member. :param image: This is the image that the member belongs to, the value can be the ID of a image or a :class:`~openstack.image.v2.image.Image` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.image.v2.member.Member` or None """ image_id = resource.Resource._get_id(image) return self._find( _member.Member, name_or_id, image_id=image_id, ignore_missing=ignore_missing, ) def get_member(self, member, image): """Get a single member on an image :param member: The value can be the ID of a member or a :class:`~openstack.image.v2.member.Member` instance. :param image: This is the image that the member belongs to. The value can be the ID of a image or a :class:`~openstack.image.v2.image.Image` instance. :returns: One :class:`~openstack.image.v2.member.Member` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ member_id = resource.Resource._get_id(member) image_id = resource.Resource._get_id(image) return self._get( _member.Member, member_id=member_id, image_id=image_id ) def members(self, image, **query): """Return a generator of members :param image: This is the image that the member belongs to, the value can be the ID of a image or a :class:`~openstack.image.v2.image.Image` instance. :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of member objects :rtype: :class:`~openstack.image.v2.member.Member` """ image_id = resource.Resource._get_id(image) return self._list(_member.Member, image_id=image_id) def update_member(self, member, image, **attrs): """Update the member of an image :param member: Either the ID of a member or a :class:`~openstack.image.v2.member.Member` instance. :param image: This is the image that the member belongs to. The value can be the ID of a image or a :class:`~openstack.image.v2.image.Image` instance. :param attrs: The attributes to update on the member represented by ``member``. See `Image Sharing Reference `__ for details. :returns: The updated member :rtype: :class:`~openstack.image.v2.member.Member` """ member_id = resource.Resource._get_id(member) image_id = resource.Resource._get_id(image) return self._update( _member.Member, member_id=member_id, image_id=image_id, **attrs, ) # ====== METADEF NAMESPACES ====== def create_metadef_namespace(self, **attrs): """Create a new metadef namespace from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` comprised of the properties on the MetadefNamespace class. :returns: The results of metadef namespace creation :rtype: :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` """ return self._create(_metadef_namespace.MetadefNamespace, **attrs) def delete_metadef_namespace(self, metadef_namespace, ignore_missing=True): """Delete a metadef namespace :param metadef_namespace: The value can be either the name of a metadef namespace or a :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` instance. :param bool ignore_missing: When set to ``False``, :class:`~openstack.exceptions.NotFoundException` will be raised when the metadef namespace does not exist. :returns: ``None`` """ self._delete( _metadef_namespace.MetadefNamespace, metadef_namespace, ignore_missing=ignore_missing, ) # NOTE(stephenfin): There is no 'find_metadef_namespace' since namespaces # are identified by the namespace name, not an arbitrary UUID, meaning # 'find_metadef_namespace' would be identical to 'get_metadef_namespace' def get_metadef_namespace(self, metadef_namespace): """Get a single metadef namespace :param metadef_namespace: Either the name of a metadef namespace or an :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` instance. :returns: One :class:`~~openstack.image.v2.metadef_namespace.MetadefNamespace` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _metadef_namespace.MetadefNamespace, metadef_namespace, ) def metadef_namespaces(self, **query): """Return a generator of metadef namespaces :returns: A generator object of metadef namespaces :rtype: :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._list(_metadef_namespace.MetadefNamespace, **query) def update_metadef_namespace(self, metadef_namespace, **attrs): """Update a server :param metadef_namespace: Either the name of a metadef namespace or an :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` instance. :param attrs: The attributes to update on the metadef namespace represented by ``metadef_namespace``. :returns: The updated metadef namespace :rtype: :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` """ # rather annoyingly, Glance insists on us providing the 'namespace' # argument, even if we're not changing it... if 'namespace' not in attrs: attrs['namespace'] = resource.Resource._get_id(metadef_namespace) return self._update( _metadef_namespace.MetadefNamespace, metadef_namespace, **attrs, ) # ====== METADEF OBJECT ====== def create_metadef_object(self, namespace, **attrs): """Create a new object from namespace :param namespace: The value can be either the name of a metadef namespace or a :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` instance. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.image.v2.metadef_object.MetadefObject`, comprised of the properties on the Metadef object class. :returns: A metadef namespace :rtype: :class:`~openstack.image.v2.metadef_object.MetadefObject` """ namespace_name = resource.Resource._get_id(namespace) return self._create( _metadef_object.MetadefObject, namespace_name=namespace_name, **attrs, ) def get_metadef_object(self, metadef_object, namespace): """Get a single metadef object :param metadef_object: The value can be the ID of a metadef_object or a :class:`~openstack.image.v2.metadef_object.MetadefObject` instance. :param namespace: The value can be either the name of a metadef namespace or a :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` instance. :returns: One :class:`~openstack.image.v2.metadef_object.MetadefObject` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ object_name = resource.Resource._get_id(metadef_object) namespace_name = resource.Resource._get_id(namespace) return self._get( _metadef_object.MetadefObject, namespace_name=namespace_name, name=object_name, ) def metadef_objects(self, namespace): """Get metadef object list of the namespace :param namespace: The value can be either the name of a metadef namespace or a :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` instance. :returns: One :class:`~openstack.image.v2.metadef_object.MetadefObject` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ namespace_name = resource.Resource._get_id(namespace) return self._list( _metadef_object.MetadefObject, namespace_name=namespace_name, ) def update_metadef_object(self, metadef_object, namespace, **attrs): """Update a single metadef object :param metadef_object: The value can be the ID of a metadef_object or a :class:`~openstack.image.v2.metadef_object.MetadefObject` instance. :param namespace: The value can be either the name of a metadef namespace or a :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` instance. :param dict attrs: Keyword arguments which will be used to update a :class:`~openstack.image.v2.metadef_object.MetadefObject` :returns: One :class:`~openstack.image.v2.metadef_object.MetadefObject` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ namespace_name = resource.Resource._get_id(namespace) metadef_object = resource.Resource._get_id(metadef_object) return self._update( _metadef_object.MetadefObject, metadef_object, namespace_name=namespace_name, **attrs, ) def delete_metadef_object(self, metadef_object, namespace, **attrs): """Removes a single metadef object :param metadef_object: The value can be the ID of a metadef_object or a :class:`~openstack.image.v2.metadef_object.MetadefObject` instance. :param namespace: The value can be either the name of a metadef namespace or a :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` instance. :param dict attrs: Keyword arguments which will be used to update a :class:`~openstack.image.v2.metadef_object.MetadefObject` :returns: ``None`` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ namespace_name = resource.Resource._get_id(namespace) return self._delete( _metadef_object.MetadefObject, metadef_object, namespace_name=namespace_name, **attrs, ) def delete_all_metadef_objects(self, namespace): """Delete all objects :param namespace: The value can be either the name of a metadef namespace or a :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` instance. :returns: ``None`` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ namespace = self._get_resource( _metadef_namespace.MetadefNamespace, namespace ) return namespace.delete_all_objects(self) # ====== METADEF RESOURCE TYPES ====== def metadef_resource_types(self, **query): """Return a generator of metadef resource types :return: A generator object of metadef resource types :rtype: :class:`~openstack.image.v2.metadef_resource_type.MetadefResourceType` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._list(_metadef_resource_type.MetadefResourceType, **query) # ====== METADEF RESOURCE TYPES ASSOCIATION====== def create_metadef_resource_type_association( self, metadef_namespace, **attrs, ): """Creates a resource type association between a namespace and the resource type specified in the body of the request. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.image.v2.metadef_resource_type.MetadefResourceTypeAssociation` comprised of the properties on the MetadefResourceTypeAssociation class. :returns: The results of metadef resource type association creation :rtype: :class:`~openstack.image.v2.metadef_resource_type.MetadefResourceTypeAssociation` """ namespace_name = resource.Resource._get_id(metadef_namespace) return self._create( _metadef_resource_type.MetadefResourceTypeAssociation, namespace_name=namespace_name, **attrs, ) def delete_metadef_resource_type_association( self, metadef_resource_type, metadef_namespace, ignore_missing=True, ): """Removes a resource type association in a namespace. :param metadef_resource_type: The value can be either the name of a metadef resource type association or an :class:`~openstack.image.v2.metadef_resource_type.MetadefResourceTypeAssociation` instance. :param metadef_namespace: The value can be either the name of metadef namespace or an :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` instance :param bool ignore_missing: When set to ``False``, :class:`~openstack.exceptions.NotFoundException` will be raised when the metadef resource type association does not exist. :returns: ``None`` """ namespace_name = resource.Resource._get_id(metadef_namespace) self._delete( _metadef_resource_type.MetadefResourceTypeAssociation, metadef_resource_type, namespace_name=namespace_name, ignore_missing=ignore_missing, ) def metadef_resource_type_associations(self, metadef_namespace, **query): """Return a generator of metadef resource type associations :param metadef_namespace: The value can be either the name of metadef namespace or an :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` instance :return: A generator object of metadef resource type associations :rtype: :class:`~openstack.image.v2.metadef_resource_type.MetadefResourceTypeAssociation` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ namespace_name = resource.Resource._get_id(metadef_namespace) return self._list( _metadef_resource_type.MetadefResourceTypeAssociation, namespace_name=namespace_name, **query, ) # ====== METADEF PROPERTY ====== def create_metadef_property(self, metadef_namespace, **attrs): """Create a metadef property :param metadef_namespace: The value can be either the name of metadef namespace or an :class:`~openstack.image.v2.metadef_property.MetadefNamespace` instance :param attrs: The attributes to create on the metadef property represented by ``metadef_property``. :returns: The created metadef property :rtype: :class:`~openstack.image.v2.metadef_property.MetadefProperty` """ namespace_name = resource.Resource._get_id(metadef_namespace) return self._create( _metadef_property.MetadefProperty, namespace_name=namespace_name, **attrs, ) def update_metadef_property( self, metadef_property, metadef_namespace, **attrs ): """Update a metadef property :param metadef_property: The value can be either the name of metadef property or an :class:`~openstack.image.v2.metadef_property.MetadefProperty` instance. :param metadef_namespace: The value can be either the name of metadef namespace or an :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` instance :param attrs: The attributes to update on the metadef property represented by ``metadef_property``. :returns: The updated metadef property :rtype: :class:`~openstack.image.v2.metadef_property.MetadefProperty` """ namespace_name = resource.Resource._get_id(metadef_namespace) metadef_property = resource.Resource._get_id(metadef_property) return self._update( _metadef_property.MetadefProperty, metadef_property, namespace_name=namespace_name, **attrs, ) def delete_metadef_property( self, metadef_property, metadef_namespace, ignore_missing=True ): """Delete a metadef property :param metadef_property: The value can be either the name of metadef property or an :class:`~openstack.image.v2.metadef_property.MetadefProperty` instance :param metadef_namespace: The value can be either the name of metadef namespace or an :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` instance :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the instance does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent instance. :returns: ``None`` """ namespace_name = resource.Resource._get_id(metadef_namespace) metadef_property = resource.Resource._get_id(metadef_property) return self._delete( _metadef_property.MetadefProperty, metadef_property, namespace_name=namespace_name, ignore_missing=ignore_missing, ) def metadef_properties(self, metadef_namespace, **query): """Return a generator of metadef properties :param metadef_namespace: The value can be either the name of metadef namespace or an :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` instance :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of property objects """ namespace_name = resource.Resource._get_id(metadef_namespace) return self._list( _metadef_property.MetadefProperty, requires_id=False, namespace_name=namespace_name, **query, ) def get_metadef_property( self, metadef_property, metadef_namespace, **query ): """Get a single metadef property :param metadef_property: The value can be either the name of metadef property or an :class:`~openstack.image.v2.metadef_property.MetadefProperty` instance. :param metadef_namespace: The value can be either the name of metadef namespace or an :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` instance :returns: One :class:`~~openstack.image.v2.metadef_property.MetadefProperty` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ namespace_name = resource.Resource._get_id(metadef_namespace) return self._get( _metadef_property.MetadefProperty, metadef_property, namespace_name=namespace_name, **query, ) def delete_all_metadef_properties(self, metadef_namespace): """Delete all metadata definitions property inside a specific namespace. :param metadef_namespace: The value can be either the name of a metadef namespace or a :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` instance. :returns: ``None`` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ namespace = self._get_resource( _metadef_namespace.MetadefNamespace, metadef_namespace ) return namespace.delete_all_properties(self) # ====== SCHEMAS ====== def get_images_schema(self): """Get images schema :returns: One :class:`~openstack.image.v2.schema.Schema` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _schema.Schema, requires_id=False, base_path='/schemas/images', ) def get_image_schema(self): """Get single image schema :returns: One :class:`~openstack.image.v2.schema.Schema` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _schema.Schema, requires_id=False, base_path='/schemas/image', ) def get_members_schema(self): """Get image members schema :returns: One :class:`~openstack.image.v2.schema.Schema` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _schema.Schema, requires_id=False, base_path='/schemas/members', ) def get_member_schema(self): """Get image member schema :returns: One :class:`~openstack.image.v2.schema.Schema` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _schema.Schema, requires_id=False, base_path='/schemas/member', ) def get_tasks_schema(self): """Get image tasks schema :returns: One :class:`~openstack.image.v2.schema.Schema` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _schema.Schema, requires_id=False, base_path='/schemas/tasks', ) def get_task_schema(self): """Get image task schema :returns: One :class:`~openstack.image.v2.schema.Schema` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _schema.Schema, requires_id=False, base_path='/schemas/task', ) def get_metadef_namespace_schema(self): """Get metadata definition namespace schema :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _metadef_schema.MetadefSchema, requires_id=False, base_path='/schemas/metadefs/namespace', ) def get_metadef_namespaces_schema(self): """Get metadata definition namespaces schema :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _metadef_schema.MetadefSchema, requires_id=False, base_path='/schemas/metadefs/namespaces', ) def get_metadef_resource_type_schema(self): """Get metadata definition resource type association schema :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _metadef_schema.MetadefSchema, requires_id=False, base_path='/schemas/metadefs/resource_type', ) def get_metadef_resource_types_schema(self): """Get metadata definition resource type associations schema :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _metadef_schema.MetadefSchema, requires_id=False, base_path='/schemas/metadefs/resource_types', ) def get_metadef_object_schema(self): """Get metadata definition object schema :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _metadef_schema.MetadefSchema, requires_id=False, base_path='/schemas/metadefs/object', ) def get_metadef_objects_schema(self): """Get metadata definition objects schema :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _metadef_schema.MetadefSchema, requires_id=False, base_path='/schemas/metadefs/objects', ) def get_metadef_property_schema(self): """Get metadata definition property schema :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _metadef_schema.MetadefSchema, requires_id=False, base_path='/schemas/metadefs/property', ) def get_metadef_properties_schema(self): """Get metadata definition properties schema :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _metadef_schema.MetadefSchema, requires_id=False, base_path='/schemas/metadefs/properties', ) def get_metadef_tag_schema(self): """Get metadata definition tag schema :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _metadef_schema.MetadefSchema, requires_id=False, base_path='/schemas/metadefs/tag', ) def get_metadef_tags_schema(self): """Get metadata definition tags schema :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _metadef_schema.MetadefSchema, requires_id=False, base_path='/schemas/metadefs/tags', ) # ====== TASKS ====== def tasks(self, **query): """Return a generator of tasks :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of task objects :rtype: :class:`~openstack.image.v2.task.Task` """ return self._list(_task.Task, **query) def get_task(self, task): """Get task details :param task: The value can be the ID of a task or a :class:`~openstack.image.v2.task.Task` instance. :returns: One :class:`~openstack.image.v2.task.Task` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_task.Task, task) def create_task(self, **attrs): """Create a new task from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.image.v2.task.Task`, comprised of the properties on the Task class. :returns: The results of task creation :rtype: :class:`~openstack.image.v2.task.Task` """ return self._create(_task.Task, **attrs) def wait_for_task( self, task, status='success', failures=None, interval=2, wait=120, ): """Wait for a task to be in a particular status. :param task: The resource to wait on to reach the specified status. The resource must have a ``status`` attribute. :type resource: A :class:`~openstack.resource.Resource` object. :param status: Desired status. :param failures: Statuses that would be interpreted as failures. :type failures: :py:class:`list` :param interval: Number of seconds to wait before to consecutive checks. Default to 2. :param wait: Maximum number of seconds to wait before the change. Default to 120. :returns: The resource is returned on success. :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition to the desired status failed to occur in specified seconds. :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource has transited to one of the failure statuses. :raises: :class:`~AttributeError` if the resource does not have a ``status`` attribute. """ if failures is None: failures = ['failure'] else: failures = [f.lower() for f in failures] if task.status.lower() == status.lower(): return task name = f"{task.__class__.__name__}:{task.id}" msg = "Timeout waiting for {name} to transition to {status}".format( name=name, status=status ) for count in utils.iterate_timeout( timeout=wait, message=msg, wait=interval ): task = task.fetch(self) if not task: raise exceptions.ResourceFailure( "{name} went away while waiting for {status}".format( name=name, status=status ) ) new_status = task.status normalized_status = new_status.lower() if normalized_status == status.lower(): return task elif normalized_status in failures: if task.message == _IMAGE_ERROR_396: task_args = {'input': task.input, 'type': task.type} task = self.create_task(**task_args) self.log.debug('Got error 396. Recreating task %s' % task) else: raise exceptions.ResourceFailure( "{name} transitioned to failure state {status}".format( name=name, status=new_status ) ) self.log.debug( 'Still waiting for resource %s to reach state %s, ' 'current state is %s', name, status, new_status, ) # ====== STORES ====== def stores(self, details=False, **query): """Return a generator of supported image stores :returns: A generator of store objects :rtype: :class:`~openstack.image.v2.service_info.Store` """ if details: query['base_path'] = utils.urljoin(_si.Store.base_path, 'detail') return self._list(_si.Store, **query) # ====== IMPORTS ====== def get_import_info(self): """Get a info about image constraints :returns: One :class:`~openstack.image.v2.service_info.Import` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_si.Import, requires_id=False) # ====== UTILS ====== def wait_for_delete(self, res, interval=2, wait=120): """Wait for a resource to be deleted. :param res: The resource to wait on to be deleted. :type resource: A :class:`~openstack.resource.Resource` object. :param interval: Number of seconds to wait before to consecutive checks. Default to 2. :param wait: Maximum number of seconds to wait before the change. Default to 120. :returns: The resource is returned on success. :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition to delete failed to occur in the specified seconds. """ return resource.wait_for_delete(self, res, interval, wait) def _get_cleanup_dependencies(self): return {'image': {'before': ['identity']}} def _service_cleanup( self, dry_run=True, client_status_queue=None, identified_resources=None, filters=None, resource_evaluation_fn=None, skip_resources=None, ): if self.should_skip_resource_cleanup("image", skip_resources): return project_id = self.get_project_id() # Note that images cannot be deleted when they are still being used for obj in self.images(owner=project_id): self._service_cleanup_del_res( self.delete_image, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/v2/cache.py0000664000175000017500000000502600000000000021247 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class CachedImage(resource.Resource): image_id = resource.Body('image_id') hits = resource.Body('hits') last_accessed = resource.Body('last_accessed') last_modified = resource.Body('last_modified') size = resource.Body('size') class Cache(resource.Resource): base_path = '/cache' allow_fetch = True allow_delete = True allow_create = True _max_microversion = '2.14' cached_images = resource.Body( 'cached_images', type=list, list_type=CachedImage, ) queued_images = resource.Body('queued_images', type=list) def queue(self, session, image, *, microversion=None): """Queue an image into cache. :param session: The session to use for making this request :param image: The image to be queued into cache. :returns: The server response """ if microversion is None: microversion = self._get_microversion(session, action='commit') image_id = resource.Resource._get_id(image) url = utils.urljoin(self.base_path, image_id) response = session.put(url, microversion=microversion) exceptions.raise_from_response(response) return response def clear(self, session, target='both'): """Clears the cache. :param session: The session to use for making this request :param target: Specify which target you want to clear One of: ``both``(default), ``cache``, ``queue``. :returns: The server response """ headers = {} if target in ('cache', 'queue'): headers = {'x-image-cache-clear-target': target} elif target != "both": raise exceptions.InvalidRequest( 'Target must be "cache", "queue" or "both".' ) response = session.delete(self.base_path, headers=headers) exceptions.raise_from_response(response) return response ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/v2/image.py0000664000175000017500000004225000000000000021266 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty from openstack.common import tag from openstack import exceptions from openstack.image import _download from openstack import resource from openstack import utils class Image(resource.Resource, tag.TagMixin, _download.DownloadMixin): resources_key = 'images' base_path = '/images' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True commit_method = 'PATCH' commit_jsonpatch = True # Store all unknown attributes under 'properties' in the object. # Remotely they would be still in the resource root _store_unknown_attrs_as_properties = True _query_mapping = resource.QueryParameters( "id", "name", "visibility", "member_status", "owner", "status", "size_min", "size_max", "protected", "is_hidden", "sort_key", "sort_dir", "sort", "tag", "created_at", "updated_at", is_hidden="os_hidden", ) # NOTE: Do not add "self" support here. If you've used Python before, # you know that self, while not being a reserved word, has special # meaning. You can't call a class initializer with the self name # as the first argument and then additionally in kwargs, as we # do when we're constructing instances from the JSON body. # Resource.list explicitly pops off any "self" keys from bodies so # that we don't end up getting the following: # TypeError: __init__() got multiple values for argument 'self' # The image data (bytes or a file-like object) data = None # Properties #: Hash of the image data used. The Image service uses this value #: for verification. checksum = resource.Body('checksum') #: The container format refers to whether the VM image is in a file #: format that also contains metadata about the actual VM. #: Container formats include OVF and Amazon AMI. In addition, #: a VM image might not have a container format - instead, #: the image is just a blob of unstructured data. container_format = resource.Body('container_format') #: The date and time when the image was created. created_at = resource.Body('created_at') #: Valid values are: aki, ari, ami, raw, iso, vhd, vdi, qcow2, or vmdk. #: The disk format of a VM image is the format of the underlying #: disk image. Virtual appliance vendors have different formats #: for laying out the information contained in a VM disk image. disk_format = resource.Body('disk_format') #: This field controls whether an image is displayed in the default #: image-list response is_hidden = resource.Body('os_hidden', type=bool) #: Defines whether the image can be deleted. #: *Type: bool* is_protected = resource.Body('protected', type=bool) #: The algorithm used to compute a secure hash of the image data #: for this image hash_algo = resource.Body('os_hash_algo') #: The hexdigest of the secure hash of the image data computed using #: the algorithm whose name is the value of the os_hash_algo property. hash_value = resource.Body('os_hash_value') #: The minimum disk size in GB that is required to boot the image. min_disk = resource.Body('min_disk') #: The minimum amount of RAM in MB that is required to boot the image. min_ram = resource.Body('min_ram') #: The name of the image. name = resource.Body('name') #: The ID of the owner, or project, of the image. owner = resource.Body('owner', alias='owner_id') #: The ID of the owner, or project, of the image. (backwards compat) owner_id = resource.Body('owner', alias='owner') # TODO(mordred) This is not how this works in v2. I mean, it's how it # should work, but it's not. We need to fix properties. They work right # in shade, so we can draw some logic from there. #: Properties, if any, that are associated with the image. properties = resource.Body('properties') #: The size of the image data, in bytes. size = resource.Body('size', type=int) #: When present, Glance will attempt to store the disk image data in the #: backing store indicated by the value of the header. When not present, #: Glance will store the disk image data in the backing store that is #: marked default. Valid values are: file, s3, rbd, swift, cinder, #: gridfs, sheepdog, or vsphere. store = resource.Body('store') #: The image status. status = resource.Body('status') #: The date and time when the image was updated. updated_at = resource.Body('updated_at') #: The virtual size of the image. virtual_size = resource.Body('virtual_size') #: The image visibility. visibility = resource.Body('visibility') #: The URL for the virtual machine image file. file = resource.Body('file') #: A list of URLs to access the image file in external store. #: This list appears if the show_multiple_locations option is set #: to true in the Image service's configuration file. locations = resource.Body('locations') #: The URL to access the image file kept in external store. It appears #: when you set the show_image_direct_url option to true in the #: Image service's configuration file. direct_url = resource.Body('direct_url') #: The URL to access the image file kept in external store. url = resource.Body('url') #: The location metadata. metadata = resource.Body('metadata', type=dict) # Additional Image Properties # https://docs.openstack.org/glance/latest/user/common-image-properties.html # http://docs.openstack.org/cli-reference/glance-property-keys.html #: The CPU architecture that must be supported by the hypervisor. architecture = resource.Body("architecture") #: The hypervisor type. Note that qemu is used for both QEMU and #: KVM hypervisor types. hypervisor_type = resource.Body("hypervisor_type") #: Optional property allows created servers to have a different bandwidth #: cap than that defined in the network they are attached to. instance_type_rxtx_factor = resource.Body( "instance_type_rxtx_factor", type=float, ) # For snapshot images, this is the UUID of the server used to #: create this image. instance_uuid = resource.Body('instance_uuid') #: Specifies whether the image needs a config drive. #: `mandatory` or `optional` (default if property is not used). needs_config_drive = resource.Body('img_config_drive') #: The ID of an image stored in the Image service that should be used #: as the kernel when booting an AMI-style image. kernel_id = resource.Body('kernel_id') #: The common name of the operating system distribution in lowercase os_distro = resource.Body('os_distro') #: The operating system version as specified by the distributor. os_version = resource.Body('os_version') #: Secure Boot is a security standard. When the instance starts, #: Secure Boot first examines software such as firmware and OS by #: their signature and only allows them to run if the signatures are valid. needs_secure_boot = resource.Body('os_secure_boot') #: Time for graceful shutdown os_shutdown_timeout = resource.Body('os_shutdown_timeout', type=int) #: The ID of image stored in the Image service that should be used as #: the ramdisk when booting an AMI-style image. ramdisk_id = resource.Body('ramdisk_id') #: The virtual machine mode. This represents the host/guest ABI #: (application binary interface) used for the virtual machine. vm_mode = resource.Body('vm_mode') #: The preferred number of sockets to expose to the guest. hw_cpu_sockets = resource.Body('hw_cpu_sockets', type=int) #: The preferred number of cores to expose to the guest. hw_cpu_cores = resource.Body('hw_cpu_cores', type=int) #: The preferred number of threads to expose to the guest. hw_cpu_threads = resource.Body('hw_cpu_threads', type=int) #: Specifies the type of disk controller to attach disk devices to. #: One of scsi, virtio, uml, xen, ide, or usb. hw_disk_bus = resource.Body('hw_disk_bus') #: Used to pin the virtual CPUs (vCPUs) of instances to the #: host's physical CPU cores (pCPUs). hw_cpu_policy = resource.Body('hw_cpu_policy') #: Defines how hardware CPU threads in a simultaneous #: multithreading-based (SMT) architecture be used. hw_cpu_thread_policy = resource.Body('hw_cpu_thread_policy') #: Adds a random-number generator device to the image's instances. hw_rng_model = resource.Body('hw_rng_model') #: For libvirt: Enables booting an ARM system using the specified #: machine type. #: For Hyper-V: Specifies whether the Hyper-V instance will be a #: generation 1 or generation 2 VM. hw_machine_type = resource.Body('hw_machine_type') #: Enables the use of VirtIO SCSI (virtio-scsi) to provide block device #: access for compute instances; by default, instances use VirtIO Block #: (virtio-blk). hw_scsi_model = resource.Body('hw_scsi_model') #: Specifies the count of serial ports that should be provided. hw_serial_port_count = resource.Body('hw_serial_port_count', type=int) #: The video image driver used. hw_video_model = resource.Body('hw_video_model') #: Maximum RAM for the video image. hw_video_ram = resource.Body('hw_video_ram', type=int) #: Enables a virtual hardware watchdog device that carries out the #: specified action if the server hangs. hw_watchdog_action = resource.Body('hw_watchdog_action') #: The kernel command line to be used by the libvirt driver, instead #: of the default. os_command_line = resource.Body('os_command_line') #: Specifies the model of virtual network interface device to use. hw_vif_model = resource.Body('hw_vif_model') #: If true, this enables the virtio-net multiqueue feature. #: In this case, the driver sets the number of queues equal to the #: number of guest vCPUs. This makes the network performance scale #: across a number of vCPUs. is_hw_vif_multiqueue_enabled = resource.Body( 'hw_vif_multiqueue_enabled', type=bool, ) #: If true, enables the BIOS bootmenu. is_hw_boot_menu_enabled = resource.Body('hw_boot_menu', type=bool) #: The virtual SCSI or IDE controller used by the hypervisor. vmware_adaptertype = resource.Body('vmware_adaptertype') #: A VMware GuestID which describes the operating system installed #: in the image. vmware_ostype = resource.Body('vmware_ostype') #: If true, the root partition on the disk is automatically resized #: before the instance boots. has_auto_disk_config = resource.Body('auto_disk_config') #: The operating system installed on the image. os_type = resource.Body('os_type') #: The operating system admin username. os_admin_user = resource.Body('os_admin_user') #: A string boolean, which if "true", QEMU guest agent will be exposed #: to the instance. hw_qemu_guest_agent = resource.Body('hw_qemu_guest_agent', type=str) #: If true, require quiesce on snapshot via QEMU guest agent. os_require_quiesce = resource.Body('os_require_quiesce', type=bool) #: The URL for the schema describing a virtual machine image. schema = resource.Body('schema') def _action(self, session, action): """Call an action on an image ID.""" url = utils.urljoin(self.base_path, self.id, 'actions', action) return session.post(url) def deactivate(self, session): """Deactivate an image Note: Only administrative users can view image locations for deactivated images. """ self._action(session, "deactivate") def reactivate(self, session): """Reactivate an image Note: The image must exist in order to be reactivated. """ self._action(session, "reactivate") def upload(self, session, *, data=None): """Upload data into an existing image :param session: The session to use for making this request :param data: Optional data to be uploaded. If not provided, the `~Image.data` attribute will be used :returns: The server response """ if data: self.data = data url = utils.urljoin(self.base_path, self.id, 'file') return session.put( url, data=self.data, headers={"Content-Type": "application/octet-stream", "Accept": ""}, ) def stage(self, session, *, data=None): """Stage binary image data into an existing image :param session: The session to use for making this request :param data: Optional data to be uploaded. If not provided, the `~Image.data` attribute will be used :returns: The server response """ if data: self.data = data url = utils.urljoin(self.base_path, self.id, 'stage') response = session.put( url, data=self.data, headers={"Content-Type": "application/octet-stream", "Accept": ""}, ) self._translate_response(response, has_body=False) return self def import_image( self, session, method='glance-direct', *, uri=None, remote_region=None, remote_image_id=None, remote_service_interface=None, store=None, stores=None, all_stores=None, all_stores_must_succeed=None, ): """Import Image via interoperable image import process""" if all_stores and (store or stores): raise exceptions.InvalidRequest( 'all_stores is mutually exclusive with store and stores' ) if store and stores: raise exceptions.InvalidRequest( 'store and stores are mutually exclusive. stores should be ' 'preferred.' ) if store: stores = [store] else: stores = stores or [] url = utils.urljoin(self.base_path, self.id, 'import') data: ty.Dict[str, ty.Any] = {'method': {'name': method}} if uri: if method != 'web-download': raise exceptions.InvalidRequest( 'URI is only supported with method: "web-download"' ) data['method']['uri'] = uri if remote_region and remote_image_id: if remote_service_interface: data['method'][ 'glance_service_interface' ] = remote_service_interface data['method']['glance_region'] = remote_region data['method']['glance_image_id'] = remote_image_id if all_stores is not None: data['all_stores'] = all_stores if all_stores_must_succeed is not None: data['all_stores_must_succeed'] = all_stores_must_succeed if stores: data['stores'] = [s.id for s in stores] headers = {} # Backward compat if store is not None: headers = {'X-Image-Meta-Store': store.id} return session.post(url, json=data, headers=headers) def _consume_header_attrs(self, attrs): self.image_import_methods = [] _image_import_methods = attrs.pop('OpenStack-image-import-methods', '') if _image_import_methods: self.image_import_methods = _image_import_methods.split(',') return super()._consume_header_attrs(attrs) def _prepare_request( self, requires_id=None, prepend_key=False, patch=False, base_path=None, **kwargs, ): request = super()._prepare_request( requires_id=requires_id, prepend_key=prepend_key, patch=patch, base_path=base_path, ) if patch: headers = { 'Content-Type': 'application/openstack-images-v2.1-json-patch', 'Accept': '', } request.headers.update(headers) return request @classmethod def find(cls, session, name_or_id, ignore_missing=True, **params): # Do a regular search first (ignoring missing) result = super().find(session, name_or_id, True, **params) if result: return result else: # Search also in hidden images params['is_hidden'] = True data = cls.list(session, **params) result = cls._get_one_match(name_or_id, data) if result is not None: return result if ignore_missing: return None raise exceptions.NotFoundException( f"No {cls.__name__} found for {name_or_id}" ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/v2/member.py0000664000175000017500000000315700000000000021456 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Member(resource.Resource): resources_key = 'members' base_path = '/images/%(image_id)s/members' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # See https://bugs.launchpad.net/glance/+bug/1526991 for member/member_id # 'member' is documented incorrectly as being deprecated but it's the # only thing that works. 'member_id' is not accepted. #: The ID of the image member. An image member is a tenant #: with whom the image is shared. member_id = resource.Body('member', alternate_id=True) #: The date and time when the member was created. created_at = resource.Body('created_at') #: Image ID stored through the image API. Typically a UUID. image_id = resource.URI('image_id') #: The status of the image. status = resource.Body('status') #: The URL for schema of the member. schema = resource.Body('schema') #: The date and time when the member was updated. updated_at = resource.Body('updated_at') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/v2/metadef_namespace.py0000664000175000017500000000630400000000000023625 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class MetadefNamespace(resource.Resource): resources_key = 'namespaces' base_path = '/metadefs/namespaces' allow_create = True allow_fetch = True allow_commit = True allow_list = True allow_delete = True _query_mapping = resource.QueryParameters( "limit", "marker", "resource_types", "sort_dir", "sort_key", "visibility", ) created_at = resource.Body('created_at') description = resource.Body('description') display_name = resource.Body('display_name') is_protected = resource.Body('protected', type=bool) namespace = resource.Body('namespace', alternate_id=True) owner = resource.Body('owner') resource_type_associations = resource.Body( 'resource_type_associations', type=list, list_type=dict, ) updated_at = resource.Body('updated_at') visibility = resource.Body('visibility') def _commit( self, session, request, method, microversion, has_body=True, retry_on_conflict=None, ): # Rather annoyingly, Glance insists on us providing the 'namespace' # argument, even if we're not changing it. We need to add this here # since it won't be included if Resource.commit thinks its unchanged # TODO(stephenfin): Eventually we could indicate attributes that are # required in the body on update, like the 'requires_id' and # 'create_requires_id' do for the ID in the URL request.body['namespace'] = self.namespace return super()._commit( session, request, method, microversion, has_body=True, retry_on_conflict=None, ) def _delete_all(self, session, url): response = session.delete(url) exceptions.raise_from_response(response) self._translate_response(response, has_body=False) return self def delete_all_properties(self, session): """Delete all properties in a namespace. :param session: The session to use for making this request :returns: The server response """ url = utils.urljoin(self.base_path, self.id, 'properties') return self._delete_all(session, url) def delete_all_objects(self, session): """Delete all objects in a namespace. :param session: The session to use for making this request :returns: The server response """ url = utils.urljoin(self.base_path, self.id, 'objects') return self._delete_all(session, url) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/v2/metadef_object.py0000664000175000017500000000245200000000000023137 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class MetadefObject(resource.Resource): resources_key = 'objects' base_path = '/metadefs/namespaces/%(namespace_name)s/objects' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( "visibility", "resource_types", "sort_key", "sort_dir", ) created_at = resource.Body('created_at') description = resource.Body('description') name = resource.Body('name', alternate_id=True) namespace_name = resource.URI('namespace_name') properties = resource.Body('properties') required = resource.Body('required') updated_at = resource.Body('updated_at') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/v2/metadef_property.py0000664000175000017500000001544300000000000023561 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource class MetadefProperty(resource.Resource): base_path = '/metadefs/namespaces/%(namespace_name)s/properties' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True #: An identifier (a name) for the namespace. namespace_name = resource.URI('namespace_name') #: The name of the property name = resource.Body('name', alternate_id=True) #: The property type. type = resource.Body('type') #: The title of the property. title = resource.Body('title') #: Detailed description of the property. description = resource.Body('description') #: A list of operator operators = resource.Body('operators', type=list) #: Default property description. default = resource.Body('default') #: Indicates whether this is a read-only property. is_readonly = resource.Body('readonly', type=bool) #: Minimum allowed numerical value. minimum = resource.Body('minimum', type=int) #: Maximum allowed numerical value. maximum = resource.Body('maximum', type=int) #: Enumerated list of property values. enum = resource.Body('enum', type=list) #: A regular expression #: (`ECMA 262 `_) #: that a string value must match. pattern = resource.Body('pattern') #: Minimum allowed string length. min_length = resource.Body('minLength', type=int, minimum=0, default=0) #: Maximum allowed string length. max_length = resource.Body('maxLength', type=int, minimum=0) # FIXME(stephenfin): This is causing conflicts due to the 'dict.items' # method. Perhaps we need to rename it? #: Schema for the items in an array. items = resource.Body('items', type=dict) # type: ignore #: Indicates whether all values in the array must be distinct. require_unique_items = resource.Body( 'uniqueItems', type=bool, default=False ) #: Minimum length of an array. min_items = resource.Body('minItems', type=int, minimum=0, default=0) #: Maximum length of an array. max_items = resource.Body('maxItems', type=int, minimum=0) #: Describes extra items, if you use tuple typing. If the value of #: ``items`` is an array (tuple typing) and the instance is longer than #: the list of schemas in ``items``, the additional items are described by #: the schema in this property. If this value is ``false``, the instance #: cannot be longer than the list of schemas in ``items``. If this value #: is ``true``, that is equivalent to the empty schema (anything goes). allow_additional_items = resource.Body('additionalItems', type=bool) # TODO(stephenfin): It would be nicer if we could do this in Resource # itself since the logic is also found elsewhere (e.g. # openstack.identity.v2.extension.Extension) but that code is a bit of a # rat's nest right now and needs a spring clean @classmethod def list( cls, session, paginated=True, base_path=None, allow_unknown_params=False, *, microversion=None, **params, ): """This method is a generator which yields resource objects. A re-implementation of :meth:`~openstack.resource.Resource.list` that handles glance's single, unpaginated list implementation. Refer to :meth:`~openstack.resource.Resource.list` for full documentation including parameter, exception and return type documentation. """ session = cls._get_session(session) if microversion is None: microversion = cls._get_microversion(session, action='list') if base_path is None: base_path = cls.base_path # There is no server-side filtering, only client-side client_filters = {} # Gather query parameters which are not supported by the server for k, v in params.items(): if ( # Known attr hasattr(cls, k) # Is real attr property and isinstance(getattr(cls, k), resource.Body) # not included in the query_params and k not in cls._query_mapping._mapping.keys() ): client_filters[k] = v uri = base_path % params uri_params = {} for k, v in params.items(): # We need to gather URI parts to set them on the resource later if hasattr(cls, k) and isinstance(getattr(cls, k), resource.URI): uri_params[k] = v def _dict_filter(f, d): """Dict param based filtering""" if not d: return False for key in f.keys(): if isinstance(f[key], dict): if not _dict_filter(f[key], d.get(key, None)): return False elif d.get(key, None) != f[key]: return False return True response = session.get( uri, headers={"Accept": "application/json"}, params={}, microversion=microversion, ) exceptions.raise_from_response(response) data = response.json() for name, property_data in data['properties'].items(): property = { 'name': name, **property_data, **uri_params, } value = cls.existing( microversion=microversion, connection=session._get_connection(), **property, ) filters_matched = True # Iterate over client filters and return only if matching for key in client_filters.keys(): if isinstance(client_filters[key], dict): if not _dict_filter( client_filters[key], value.get(key, None), ): filters_matched = False break elif value.get(key, None) != client_filters[key]: filters_matched = False break if filters_matched: yield value return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/v2/metadef_resource_type.py0000664000175000017500000000444200000000000024562 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class MetadefResourceType(resource.Resource): resources_key = 'resource_types' base_path = '/metadefs/resource_types' # capabilities allow_list = True #: The name of metadata definition resource type name = resource.Body('name', alternate_id=True) #: The date and time when the resource type was created. created_at = resource.Body('created_at') #: The date and time when the resource type was updated. updated_at = resource.Body('updated_at') class MetadefResourceTypeAssociation(resource.Resource): resources_key = 'resource_type_associations' base_path = '/metadefs/namespaces/%(namespace_name)s/resource_types' # capabilities allow_create = True allow_delete = True allow_list = True #: The name of the namespace whose details you want to see. namespace_name = resource.URI('namespace_name') #: The name of metadata definition resource type name = resource.Body('name', alternate_id=True) #: The date and time when the resource type was created. created_at = resource.Body('created_at') #: The date and time when the resource type was updated. updated_at = resource.Body('updated_at') #: Prefix for any properties in the namespace that you want to apply #: to the resource type. If you specify a prefix, you must append #: a prefix separator, such as the colon (:) character. prefix = resource.Body('prefix') #: Some resource types allow more than one key and value pair #: for each instance. For example, the Image service allows #: both user and image metadata on volumes. The properties_target parameter #: enables a namespace target to remove the ambiguity properties_target = resource.Body('properties_target') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/v2/metadef_schema.py0000664000175000017500000000212700000000000023130 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class MetadefSchema(resource.Resource): base_path = '/schemas/metadefs' # capabilities allow_fetch = True #: A boolean value that indicates allows users to add custom properties. additional_properties = resource.Body('additionalProperties', type=bool) #: A set of definitions. definitions = resource.Body('definitions', type=dict) #: A list of required resources. required = resource.Body('required', type=list) #: Schema properties. properties = resource.Body('properties', type=dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/v2/schema.py0000664000175000017500000000154600000000000021447 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Schema(resource.Resource): base_path = '/schemas' # capabilities allow_fetch = True #: Additional properties additional_properties = resource.Body('additionalProperties', type=dict) #: Schema properties properties = resource.Body('properties', type=dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/v2/service_info.py0000664000175000017500000000412600000000000022657 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class Import(resource.Resource): base_path = '/info/import' # capabilities allow_fetch = True #: import methods import_methods = resource.Body('import-methods', type=dict) class Store(resource.Resource): resources_key = 'stores' base_path = '/info/stores' # capabilities allow_list = True #: Description of the store description = resource.Body('description') #: default is_default = resource.Body('default', type=bool) #: properties properties = resource.Body('properties', type=dict) def delete_image(self, session, image, *, ignore_missing=False): """Delete image from store :param session: The session to use for making this request. :param image: The value can be either the ID of an image or a :class:`~openstack.image.v2.image.Image` instance. :returns: The result of the ``delete`` if resource found, else None. :raises: :class:`~openstack.exceptions.NotFoundException` when ignore_missing if ``False`` and a nonexistent resource is attempted to be deleted. """ image_id = resource.Resource._get_id(image) url = utils.urljoin('/stores', self.id, image_id) try: response = session.delete(url) exceptions.raise_from_response(response) except exceptions.NotFoundException: if ignore_missing: return None raise return response ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/image/v2/task.py0000664000175000017500000000352100000000000021144 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Task(resource.Resource): resources_key = 'tasks' base_path = '/tasks' # capabilities allow_create = True allow_fetch = True allow_list = True _query_mapping = resource.QueryParameters( 'type', 'status', 'sort_dir', 'sort_key' ) #: The date and time when the task was created. created_at = resource.Body('created_at') #: The date and time when the task is subject to removal. expires_at = resource.Body('expires_at') #: A JSON object specifying the input parameters to the task. input = resource.Body('input') #: Human-readable text, possibly an empty string, usually displayed #: in an error situation to provide more information about what #: has occurred. message = resource.Body('message') #: The ID of the owner, or project, of the task. owner_id = resource.Body('owner') #: A JSON object specifying the outcome of the task. result = resource.Body('result') #: The URL for schema of the task. schema = resource.Body('schema') #: The status of the task. status = resource.Body('status') #: The type of task represented by this content. type = resource.Body('type') #: The date and time when the task was updated. updated_at = resource.Body('updated_at') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2813306 openstacksdk-4.0.0/openstack/instance_ha/0000775000175000017500000000000000000000000020472 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/instance_ha/__init__.py0000664000175000017500000000000000000000000022571 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/instance_ha/instance_ha_service.py0000664000175000017500000000152200000000000025040 0ustar00zuulzuul00000000000000# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack.instance_ha.v1 import _proxy from openstack import service_description class InstanceHaService(service_description.ServiceDescription): """The HA service.""" supported_versions = { '1': _proxy.Proxy, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2813306 openstacksdk-4.0.0/openstack/instance_ha/v1/0000775000175000017500000000000000000000000021020 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/instance_ha/v1/__init__.py0000664000175000017500000000000000000000000023117 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/instance_ha/v1/_proxy.py0000664000175000017500000002372000000000000022716 0ustar00zuulzuul00000000000000# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack import exceptions from openstack.instance_ha.v1 import host as _host from openstack.instance_ha.v1 import notification as _notification from openstack.instance_ha.v1 import segment as _segment from openstack.instance_ha.v1 import vmove as _vmove from openstack import proxy from openstack import resource class Proxy(proxy.Proxy): """Proxy class for ha resource handling. Create method for each action of each API. """ _resource_registry = { "host": _host.Host, "notification": _notification.Notification, "segment": _segment.Segment, "vmove": _vmove.VMove, } def notifications(self, **query): """Return a generator of notifications. :param kwargs query: Optional query parameters to be sent to limit the notifications being returned. :returns: A generator of notifications """ return self._list(_notification.Notification, **query) def get_notification(self, notification): """Get a single notification. :param notification: The value can be the ID of a notification or a :class:`~masakariclient.sdk.ha.v1.notification.Notification` instance. :returns: One :class:`~masakariclient.sdk.ha.v1.notification.Notification` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_notification.Notification, notification) def create_notification(self, **attrs): """Create a new notification. :param dict attrs: Keyword arguments which will be used to create a :class:`masakariclient.sdk.ha.v1.notification.Notification`, comprised of the propoerties on the Notification class. :returns: The result of notification creation :rtype: :class:`masakariclient.sdk.ha.v1.notification.Notification` """ return self._create(_notification.Notification, **attrs) def segments(self, **query): """Return a generator of segments. :param kwargs query: Optional query parameters to be sent to limit the segments being returned. :returns: A generator of segments """ return self._list(_segment.Segment, **query) def get_segment(self, segment): """Get a single segment. :param segment: The value can be the ID of a segment or a :class:`~masakariclient.sdk.ha.v1.segment.Segment` instance. :returns: One :class:`~masakariclient.sdk.ha.v1.segment.Segment` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_segment.Segment, segment) def create_segment(self, **attrs): """Create a new segment. :param dict attrs: Keyword arguments which will be used to create a :class:`masakariclient.sdk.ha.v1.segment.Segment`, comprised of the propoerties on the Segment class. :returns: The result of segment creation :rtype: :class:`masakariclient.sdk.ha.v1.segment.Segment` """ return self._create(_segment.Segment, **attrs) def update_segment(self, segment, **attrs): """Update a segment. :param segment: The value can be the ID of a segment or a :class:`~masakariclient.sdk.ha.v1.segment.Segment` instance. :param dict attrs: Keyword arguments which will be used to update a :class:`masakariclient.sdk.ha.v1.segment.Segment`, comprised of the propoerties on the Segment class. :returns: The updated segment. :rtype: :class:`masakariclient.sdk.ha.v1.segment.Segment` """ return self._update(_segment.Segment, segment, **attrs) def delete_segment(self, segment, ignore_missing=True): """Delete a segment. :param segment: The value can be either the ID of a segment or a :class:`~masakariclient.sdk.ha.v1.segment.Segment` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the segment does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent segment. :returns: ``None`` """ return self._delete( _segment.Segment, segment, ignore_missing=ignore_missing ) def hosts(self, segment_id, **query): """Return a generator of hosts. :param segment_id: The ID of a failover segment. :param kwargs query: Optional query parameters to be sent to limit the hosts being returned. :returns: A generator of hosts """ return self._list(_host.Host, segment_id=segment_id, **query) def create_host(self, segment_id, **attrs): """Create a new host. :param segment_id: The ID of a failover segment. :param dict attrs: Keyword arguments which will be used to create a :class:`masakariclient.sdk.ha.v1.host.Host`, comprised of the propoerties on the Host class. :returns: The results of host creation """ return self._create(_host.Host, segment_id=segment_id, **attrs) def get_host(self, host, segment_id=None): """Get a single host. :param segment_id: The ID of a failover segment. :param host: The value can be the ID of a host or a :class: `~masakariclient.sdk.ha.v1.host.Host` instance. :returns: One :class:`~masakariclient.sdk.ha.v1.host.Host` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.InvalidRequest` when segment_id is None. """ if segment_id is None: raise exceptions.InvalidRequest("'segment_id' must be specified.") host_id = resource.Resource._get_id(host) return self._get(_host.Host, host_id, segment_id=segment_id) def update_host(self, host, segment_id, **attrs): """Update the host. :param segment_id: The ID of a failover segment. :param host: The value can be the ID of a host or a :class: `~masakariclient.sdk.ha.v1.host.Host` instance. :param dict attrs: The attributes to update on the host represented. :returns: The updated host :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.InvalidRequest` when segment_id is None. """ host_id = resource.Resource._get_id(host) return self._update( _host.Host, host_id, segment_id=segment_id, **attrs ) def delete_host(self, host, segment_id=None, ignore_missing=True): """Delete the host. :param segment_id: The ID of a failover segment. :param host: The value can be the ID of a host or a :class: `~masakariclient.sdk.ha.v1.host.Host` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the host does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent host. :returns: ``None`` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.InvalidRequest` when segment_id is None. """ if segment_id is None: raise exceptions.InvalidRequest("'segment_id' must be specified.") host_id = resource.Resource._get_id(host) return self._delete( _host.Host, host_id, segment_id=segment_id, ignore_missing=ignore_missing, ) def vmoves(self, notification, **query): """Return a generator of vmoves. :param notification: The value can be the UUID of a notification or a :class: `~masakariclient.sdk.ha.v1.notification.Notification` instance. :param kwargs query: Optional query parameters to be sent to limit the vmoves being returned. :returns: A generator of vmoves """ notification_id = resource.Resource._get_id(notification) return self._list( _vmove.VMove, notification_id=notification_id, **query, ) def get_vmove(self, vmove, notification): """Get a single vmove. :param vmove: The value can be the UUID of one vmove or a :class: `~masakariclient.sdk.ha.v1.vmove.VMove` instance. :param notification: The value can be the UUID of a notification or a :class: `~masakariclient.sdk.ha.v1.notification.Notification` instance. :returns: one 'VMove' resource class. :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.InvalidRequest` when notification_id is None. """ notification_id = resource.Resource._get_id(notification) vmove_id = resource.Resource._get_id(vmove) return self._get( _vmove.VMove, vmove_id, notification_id=notification_id, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/instance_ha/v1/host.py0000664000175000017500000000431000000000000022345 0ustar00zuulzuul00000000000000# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack import resource class Host(resource.Resource): resource_key = "host" resources_key = "hosts" base_path = "/segments/%(segment_id)s/hosts" # capabilities # 1] GET /v1/segments//hosts # 2] GET /v1/segments//hosts/ # 3] POST /v1/segments//hosts # 4] PUT /v1/segments//hosts # 5] DELETE /v1/segments//hosts allow_list = True allow_fetch = True allow_create = True allow_commit = True allow_delete = True #: A Uuid of representing this host uuid = resource.Body("uuid") #: A failover segment ID of this host(in URI) segment_id = resource.URI("segment_id") #: A created time of this host created_at = resource.Body("created_at") #: A latest updated time of this host updated_at = resource.Body("updated_at") #: A name of this host name = resource.Body("name") #: A type of this host type = resource.Body("type") #: A control attributes of this host control_attributes = resource.Body("control_attributes") #: A maintenance status of this host on_maintenance = resource.Body("on_maintenance") #: A reservation status of this host reserved = resource.Body("reserved") #: A failover segment ID of this host(in Body) failover_segment_id = resource.Body("failover_segment_id") _query_mapping = resource.QueryParameters( "sort_key", "sort_dir", failover_segment_id="failover_segment_id", type="type", on_maintenance="on_maintenance", reserved="reserved", ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/instance_ha/v1/notification.py0000664000175000017500000000622100000000000024061 0ustar00zuulzuul00000000000000# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack import resource class ProgressDetailsItem(resource.Resource): #: The timestamp of recovery workflow task. timestamp = resource.Body("timestamp") #: The message of recovery workflow task. message = resource.Body("message") #: The progress of recovery workflow task. progress = resource.Body("progress") class RecoveryWorkflowDetailItem(resource.Resource): #: The progress of recovery workflow. progress = resource.Body("progress") #: The name of recovery workflow. name = resource.Body("name") #: The state of recovery workflow. state = resource.Body("state") #: The progress details of this recovery workflow. progress_details = resource.Body( "progress_details", type=list, list_type=ProgressDetailsItem ) class Notification(resource.Resource): resource_key = "notification" resources_key = "notifications" base_path = "/notifications" # capabilities # 1] GET /v1/notifications # 2] GET /v1/notifications/ # 3] POST /v1/notifications allow_list = True allow_fetch = True allow_create = True allow_commit = False allow_delete = False #: A ID of representing this notification. id = resource.Body("id") #: A Uuid of representing this notification. notification_uuid = resource.Body("notification_uuid") #: A created time of representing this notification. created_at = resource.Body("created_at") #: A latest updated time of representing this notification. updated_at = resource.Body("updated_at") #: The type of failure. Valuse values include ''COMPUTE_HOST'', #: ''VM'', ''PROCESS'' type = resource.Body("type") #: The hostname of this notification. hostname = resource.Body("hostname") #: The status for this notitication. status = resource.Body("status") #: The generated_time for this notitication. generated_time = resource.Body("generated_time") #: The payload of this notification. payload = resource.Body("payload") #: The source host uuid of this notification. source_host_uuid = resource.Body("source_host_uuid") #: The recovery workflow details of this notification. recovery_workflow_details = resource.Body( "recovery_workflow_details", type=list, list_type=RecoveryWorkflowDetailItem, ) _query_mapping = resource.QueryParameters( "sort_key", "sort_dir", source_host_uuid="source_host_uuid", type="type", status="status", generated_since="generated-since", ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/instance_ha/v1/segment.py0000664000175000017500000000413700000000000023041 0ustar00zuulzuul00000000000000# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack import resource class Segment(resource.Resource): resource_key = "segment" resources_key = "segments" base_path = "/segments" # capabilities # 1] GET /v1/segments # 2] GET /v1/segments/ # 3] POST /v1/segments # 4] PUT /v1/segments/ # 5] DELETE /v1/segments/ allow_list = True allow_fetch = True allow_create = True allow_commit = True allow_delete = True # add enabled flag to segment in 1.2 _max_microversion = '1.2' #: A ID of representing this segment. id = resource.Body("id") #: A Uuid of representing this segment. uuid = resource.Body("uuid") #: A created time of representing this segment. created_at = resource.Body("created_at") #: A latest updated time of representing this segment. updated_at = resource.Body("updated_at") #: The name of this segment. name = resource.Body("name") #: The description of this segment. description = resource.Body("description") #: The recovery method of this segment. recovery_method = resource.Body("recovery_method") #: The service type of this segment. service_type = resource.Body("service_type") #: The enabled flag of this segment. is_enabled = resource.Body("enabled", type=bool) _query_mapping = resource.QueryParameters( "sort_key", "sort_dir", recovery_method="recovery_method", service_type="service_type", is_enabled="enabled", ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/instance_ha/v1/vmove.py0000664000175000017500000000422200000000000022526 0ustar00zuulzuul00000000000000# Copyright(c) 2022 Inspur # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack import resource class VMove(resource.Resource): resource_key = "vmove" resources_key = "vmoves" base_path = "/notifications/%(notification_id)s/vmoves" # capabilities # 1] GET /v1/notifications/{notification_uuid}/vmoves # 2] GET /v1/notifications/{notification_uuid}/vmoves/{vmove_uuid} allow_list = True allow_fetch = True _query_mapping = resource.QueryParameters( "sort_key", "sort_dir", "type", "status", ) #: A ID of representing this vmove id = resource.Body("id") #: A UUID of representing this vmove uuid = resource.Body("uuid") #: The notification UUID this vmove belongs to(in URI) notification_id = resource.URI("notification_id") #: A created time of this vmove created_at = resource.Body("created_at") #: A latest updated time of this vmove updated_at = resource.Body("updated_at") #: The instance uuid of this vmove server_id = resource.Body("instance_uuid") #: The instance name of this vmove server_name = resource.Body("instance_name") #: The source host of this vmove source_host = resource.Body("source_host") #: The dest host of this vmove dest_host = resource.Body("dest_host") #: A start time of this vmove start_time = resource.Body("start_time") #: A end time of this vmove end_time = resource.Body("end_time") #: The status of this vmove status = resource.Body("status") #: The type of this vmove type = resource.Body("type") #: The message of this vmove message = resource.Body("message") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2853327 openstacksdk-4.0.0/openstack/key_manager/0000775000175000017500000000000000000000000020500 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/key_manager/__init__.py0000664000175000017500000000000000000000000022577 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/key_manager/key_manager_service.py0000664000175000017500000000143300000000000025055 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.key_manager.v1 import _proxy from openstack import service_description class KeyManagerService(service_description.ServiceDescription): """The key manager service.""" supported_versions = { '1': _proxy.Proxy, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2853327 openstacksdk-4.0.0/openstack/key_manager/v1/0000775000175000017500000000000000000000000021026 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/key_manager/v1/__init__.py0000664000175000017500000000000000000000000023125 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/key_manager/v1/_format.py0000664000175000017500000000210600000000000023026 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from urllib import parse from openstack import format class HREFToUUID(format.Formatter): @classmethod def deserialize(cls, value): """Convert a HREF to the UUID portion""" parts = parse.urlsplit(value) # Only try to proceed if we have an actual URI. # Just check that we have a scheme, netloc, and path. if not all(parts[:3]): raise ValueError("Unable to convert %s to an ID" % value) # The UUID will be the last portion of the URI. return parts.path.split("/")[-1] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/key_manager/v1/_proxy.py0000664000175000017500000002404500000000000022725 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.key_manager.v1 import container as _container from openstack.key_manager.v1 import order as _order from openstack.key_manager.v1 import secret as _secret from openstack import proxy class Proxy(proxy.Proxy): _resource_registry = { "container": _container.Container, "order": _order.Order, "secret": _secret.Secret, } def create_container(self, **attrs): """Create a new container from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.key_manager.v1.container.Container`, comprised of the properties on the Container class. :returns: The results of container creation :rtype: :class:`~openstack.key_manager.v1.container.Container` """ return self._create(_container.Container, **attrs) def delete_container(self, container, ignore_missing=True): """Delete a container :param container: The value can be either the ID of a container or a :class:`~openstack.key_manager.v1.container.Container` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the container does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent container. :returns: ``None`` """ self._delete( _container.Container, container, ignore_missing=ignore_missing ) def find_container(self, name_or_id, ignore_missing=True): """Find a single container :param name_or_id: The name or ID of a container. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.key_manager.v1.container.Container` or None """ return self._find( _container.Container, name_or_id, ignore_missing=ignore_missing ) def get_container(self, container): """Get a single container :param container: The value can be the ID of a container or a :class:`~openstack.key_manager.v1.container.Container` instance. :returns: One :class:`~openstack.key_manager.v1.container.Container` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_container.Container, container) def containers(self, **query): """Return a generator of containers :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of container objects :rtype: :class:`~openstack.key_manager.v1.container.Container` """ return self._list(_container.Container, **query) def update_container(self, container, **attrs): """Update a container :param container: Either the id of a container or a :class:`~openstack.key_manager.v1.container.Container` instance. :param attrs: The attributes to update on the container represented by ``container``. :returns: The updated container :rtype: :class:`~openstack.key_manager.v1.container.Container` """ return self._update(_container.Container, container, **attrs) def create_order(self, **attrs): """Create a new order from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.key_manager.v1.order.Order`, comprised of the properties on the Order class. :returns: The results of order creation :rtype: :class:`~openstack.key_manager.v1.order.Order` """ return self._create(_order.Order, **attrs) def delete_order(self, order, ignore_missing=True): """Delete an order :param order: The value can be either the ID of a order or a :class:`~openstack.key_manager.v1.order.Order` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the order does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent order. :returns: ``None`` """ self._delete(_order.Order, order, ignore_missing=ignore_missing) def find_order(self, name_or_id, ignore_missing=True): """Find a single order :param name_or_id: The name or ID of a order. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.key_manager.v1.order.Order` or None """ return self._find( _order.Order, name_or_id, ignore_missing=ignore_missing ) def get_order(self, order): """Get a single order :param order: The value can be the ID of an order or a :class:`~openstack.key_manager.v1.order.Order` instance. :returns: One :class:`~openstack.key_manager.v1.order.Order` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_order.Order, order) def orders(self, **query): """Return a generator of orders :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of order objects :rtype: :class:`~openstack.key_manager.v1.order.Order` """ return self._list(_order.Order, **query) def update_order(self, order, **attrs): """Update a order :param order: Either the id of a order or a :class:`~openstack.key_manager.v1.order.Order` instance. :param attrs: The attributes to update on the order represented by ``order``. :returns: The updated order :rtype: :class:`~openstack.key_manager.v1.order.Order` """ return self._update(_order.Order, order, **attrs) def create_secret(self, **attrs): """Create a new secret from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.key_manager.v1.secret.Secret`, comprised of the properties on the Order class. :returns: The results of secret creation :rtype: :class:`~openstack.key_manager.v1.secret.Secret` """ return self._create(_secret.Secret, **attrs) def delete_secret(self, secret, ignore_missing=True): """Delete a secret :param secret: The value can be either the ID of a secret or a :class:`~openstack.key_manager.v1.secret.Secret` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the secret does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent secret. :returns: ``None`` """ self._delete(_secret.Secret, secret, ignore_missing=ignore_missing) def find_secret(self, name_or_id, ignore_missing=True): """Find a single secret :param name_or_id: The name or ID of a secret. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.key_manager.v1.secret.Secret` or None """ return self._find( _secret.Secret, name_or_id, ignore_missing=ignore_missing ) def get_secret(self, secret): """Get a single secret :param secret: The value can be the ID of a secret or a :class:`~openstack.key_manager.v1.secret.Secret` instance. :returns: One :class:`~openstack.key_manager.v1.secret.Secret` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_secret.Secret, secret) def secrets(self, **query): """Return a generator of secrets :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of secret objects :rtype: :class:`~openstack.key_manager.v1.secret.Secret` """ return self._list(_secret.Secret, **query) def update_secret(self, secret, **attrs): """Update a secret :param secret: Either the id of a secret or a :class:`~openstack.key_manager.v1.secret.Secret` instance. :param attrs: The attributes to update on the secret represented by ``secret``. :returns: The updated secret :rtype: :class:`~openstack.key_manager.v1.secret.Secret` """ return self._update(_secret.Secret, secret, **attrs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/key_manager/v1/container.py0000664000175000017500000000326300000000000023366 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.key_manager.v1 import _format from openstack import resource class Container(resource.Resource): resources_key = 'containers' base_path = '/containers' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: A URI for this container container_ref = resource.Body('container_ref') #: The ID for this container container_id = resource.Body( 'container_ref', alternate_id=True, type=_format.HREFToUUID ) #: The timestamp when this container was created. created_at = resource.Body('created') #: The name of this container name = resource.Body('name') #: A list of references to secrets in this container secret_refs = resource.Body('secret_refs', type=list) #: The status of this container status = resource.Body('status') #: The type of this container type = resource.Body('type') #: The timestamp when this container was updated. updated_at = resource.Body('updated') #: A party interested in this container. consumers = resource.Body('consumers', type=list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/key_manager/v1/order.py0000664000175000017500000000376500000000000022526 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.key_manager.v1 import _format from openstack import resource class Order(resource.Resource): resources_key = 'orders' base_path = '/orders' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True #: Timestamp in ISO8601 format of when the order was created created_at = resource.Body('created') #: Keystone Id of the user who created the order creator_id = resource.Body('creator_id') #: A dictionary containing key-value parameters which specify the #: details of an order request meta = resource.Body('meta', type=dict) #: A URI for this order order_ref = resource.Body('order_ref') #: The ID of this order order_id = resource.Body( 'order_ref', alternate_id=True, type=_format.HREFToUUID ) #: Secret href associated with the order secret_ref = resource.Body('secret_ref') #: Secret ID associated with the order secret_id = resource.Body('secret_ref', type=_format.HREFToUUID) # The status of this order status = resource.Body('status') #: Metadata associated with the order sub_status = resource.Body('sub_status') #: Metadata associated with the order sub_status_message = resource.Body('sub_status_message') # The type of order type = resource.Body('type') #: Timestamp in ISO8601 format of the last time the order was updated. updated_at = resource.Body('updated') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/key_manager/v1/secret.py0000664000175000017500000001053000000000000022664 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.key_manager.v1 import _format from openstack import resource from openstack import utils class Secret(resource.Resource): resources_key = 'secrets' base_path = '/secrets' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( "name", "mode", "bits", "secret_type", "acl_only", "created", "updated", "expiration", "sort", algorithm="alg", ) # Properties #: Metadata provided by a user or system for informational purposes algorithm = resource.Body('algorithm') #: Metadata provided by a user or system for informational purposes. #: Value must be greater than zero. bit_length = resource.Body('bit_length') #: A list of content types content_types = resource.Body('content_types', type=dict) #: Once this timestamp has past, the secret will no longer be available. expires_at = resource.Body('expiration') #: Timestamp of when the secret was created. created_at = resource.Body('created') #: Timestamp of when the secret was last updated. updated_at = resource.Body('updated') #: The type/mode of the algorithm associated with the secret information. mode = resource.Body('mode') #: The name of the secret set by the user name = resource.Body('name') #: A URI to the sercret secret_ref = resource.Body('secret_ref') #: The ID of the secret # NOTE: This is not really how alternate IDs are supposed to work and # ultimately means this has to work differently than all other services # in all of OpenStack because of the departure from using actual IDs # that even this service can't even use itself. secret_id = resource.Body( 'secret_ref', alternate_id=True, type=_format.HREFToUUID ) #: Used to indicate the type of secret being stored. secret_type = resource.Body('secret_type') #: The status of this secret status = resource.Body('status') #: A timestamp when this secret was updated. updated_at = resource.Body('updated') #: The secret's data to be stored. payload_content_type must also #: be supplied if payload is included. (optional) payload = resource.Body('payload') #: The media type for the content of the payload. #: (required if payload is included) payload_content_type = resource.Body('payload_content_type') #: The encoding used for the payload to be able to include it in #: the JSON request. Currently only base64 is supported. #: (required if payload is encoded) payload_content_encoding = resource.Body('payload_content_encoding') def fetch( self, session, requires_id=True, base_path=None, error_message=None, skip_cache=False, ): request = self._prepare_request( requires_id=requires_id, base_path=base_path ) response = session.get(request.url).json() content_type = None if self.payload_content_type is not None: content_type = self.payload_content_type elif "content_types" in response: content_type = response["content_types"]["default"] # Only try to get the payload if a content type has been explicitly # specified or if one was found in the metadata response if content_type is not None: payload = session.get( utils.urljoin(request.url, "payload"), headers={"Accept": content_type}, skip_cache=skip_cache, ) response["payload"] = payload.text # We already have the JSON here so don't call into _translate_response self._update_from_body_attrs(response) return self ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2853327 openstacksdk-4.0.0/openstack/load_balancer/0000775000175000017500000000000000000000000020764 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/__init__.py0000664000175000017500000000000000000000000023063 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/load_balancer_service.py0000664000175000017500000000144100000000000025624 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.load_balancer.v2 import _proxy from openstack import service_description class LoadBalancerService(service_description.ServiceDescription): """The load balancer service.""" supported_versions = { '2': _proxy.Proxy, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2933364 openstacksdk-4.0.0/openstack/load_balancer/v2/0000775000175000017500000000000000000000000021313 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/v2/__init__.py0000664000175000017500000000000000000000000023412 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/v2/_proxy.py0000664000175000017500000014163700000000000023221 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.load_balancer.v2 import amphora as _amphora from openstack.load_balancer.v2 import availability_zone as _availability_zone from openstack.load_balancer.v2 import ( availability_zone_profile as _availability_zone_profile, ) from openstack.load_balancer.v2 import flavor as _flavor from openstack.load_balancer.v2 import flavor_profile as _flavor_profile from openstack.load_balancer.v2 import health_monitor as _hm from openstack.load_balancer.v2 import l7_policy as _l7policy from openstack.load_balancer.v2 import l7_rule as _l7rule from openstack.load_balancer.v2 import listener as _listener from openstack.load_balancer.v2 import load_balancer as _lb from openstack.load_balancer.v2 import member as _member from openstack.load_balancer.v2 import pool as _pool from openstack.load_balancer.v2 import provider as _provider from openstack.load_balancer.v2 import quota as _quota from openstack import proxy from openstack import resource class Proxy(proxy.Proxy): _resource_registry = { "amphora": _amphora.Amphora, "availability_zone": _availability_zone.AvailabilityZone, "availability_zone_profile": _availability_zone_profile.AvailabilityZoneProfile, # noqa: E501 "flavor": _flavor.Flavor, "flavor_profile": _flavor_profile.FlavorProfile, "health_monitor": _hm.HealthMonitor, "l7_policy": _l7policy.L7Policy, "l7_rule": _l7rule.L7Rule, "load_balancer": _lb.LoadBalancer, "member": _member.Member, "pool": _pool.Pool, "provider": _provider.Provider, "quota": _quota.Quota, } def create_load_balancer(self, **attrs): """Create a new load balancer from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer`, comprised of the properties on the LoadBalancer class. :returns: The results of load balancer creation :rtype: :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer` """ return self._create(_lb.LoadBalancer, **attrs) def get_load_balancer(self, *attrs): """Get a load balancer :param load_balancer: The value can be the ID of a load balancer or :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer` instance. :returns: One :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer` """ return self._get(_lb.LoadBalancer, *attrs) def get_load_balancer_statistics(self, load_balancer): """Get the load balancer statistics :param load_balancer: The value can be the ID of a load balancer or :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer` instance. :returns: One :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancerStats` """ return self._get( _lb.LoadBalancerStats, lb_id=load_balancer, requires_id=False ) def load_balancers(self, **query): """Retrieve a generator of load balancers :returns: A generator of load balancer instances """ return self._list(_lb.LoadBalancer, **query) def delete_load_balancer( self, load_balancer, ignore_missing=True, cascade=False ): """Delete a load balancer :param load_balancer: The load_balancer can be either the ID or a :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer` instance :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the load balancer does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent load balancer. :param bool cascade: If true will delete all child objects of the load balancer. :returns: ``None`` """ load_balancer = self._get_resource(_lb.LoadBalancer, load_balancer) load_balancer.cascade = cascade return self._delete( _lb.LoadBalancer, load_balancer, ignore_missing=ignore_missing ) def find_load_balancer(self, name_or_id, ignore_missing=True): """Find a single load balancer :param name_or_id: The name or ID of a load balancer :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the load balancer does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent load balancer. :returns: ``None`` """ return self._find( _lb.LoadBalancer, name_or_id, ignore_missing=ignore_missing ) def update_load_balancer(self, load_balancer, **attrs): """Update a load balancer :param load_balancer: The load_balancer can be either the ID or a :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer` instance :param dict attrs: The attributes to update on the load balancer represented by ``load_balancer``. :returns: The updated load_balancer :rtype: :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer` """ return self._update(_lb.LoadBalancer, load_balancer, **attrs) def wait_for_load_balancer( self, name_or_id, status='ACTIVE', failures=['ERROR'], interval=2, wait=300, ): """Wait for load balancer status :param name_or_id: The name or ID of the load balancer. :param status: Desired status. :param failures: Statuses that would be interpreted as failures. Default to ['ERROR']. :type failures: :py:class:`list` :param interval: Number of seconds to wait between consecutive checks. Defaults to 2. :param wait: Maximum number of seconds to wait before the status to be reached. Defaults to 300. :returns: The load balancer is returned on success. :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition to the desired status failed to occur within the specified wait time. :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource has transited to one of the failure statuses. :raises: :class:`~AttributeError` if the resource does not have a ``status`` attribute. """ lb = self._find(_lb.LoadBalancer, name_or_id, ignore_missing=False) return resource.wait_for_status( self, lb, status, failures, interval, wait, attribute='provisioning_status', ) def failover_load_balancer(self, load_balancer, **attrs): """Failover a load balancer :param load_balancer: The value can be the ID of a load balancer or :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer` instance. :returns: ``None`` """ return self._update(_lb.LoadBalancerFailover, lb_id=load_balancer) def create_listener(self, **attrs): """Create a new listener from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.load_balancer.v2.listener.Listener`, comprised of the properties on the Listener class. :returns: The results of listener creation :rtype: :class:`~openstack.load_balancer.v2.listener.Listener` """ return self._create(_listener.Listener, **attrs) def delete_listener(self, listener, ignore_missing=True): """Delete a listener :param listener: The value can be either the ID of a listener or a :class:`~openstack.load_balancer.v2.listener.Listener` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the listner does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent listener. :returns: ``None`` """ self._delete( _listener.Listener, listener, ignore_missing=ignore_missing ) def find_listener(self, name_or_id, ignore_missing=True): """Find a single listener :param name_or_id: The name or ID of a listener. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.load_balancer.v2.listener.Listener` or None """ return self._find( _listener.Listener, name_or_id, ignore_missing=ignore_missing ) def get_listener(self, listener): """Get a single listener :param listener: The value can be the ID of a listener or a :class:`~openstack.load_balancer.v2.listener.Listener` instance. :returns: One :class:`~openstack.load_balancer.v2.listener.Listener` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_listener.Listener, listener) def get_listener_statistics(self, listener): """Get the listener statistics :param listener: The value can be the ID of a listener or a :class:`~openstack.load_balancer.v2.listener.Listener` instance. :returns: One :class:`~openstack.load_balancer.v2.listener.ListenerStats` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _listener.ListenerStats, listener_id=listener, requires_id=False ) def listeners(self, **query): """Return a generator of listeners :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: :returns: A generator of listener objects :rtype: :class:`~openstack.load_balancer.v2.listener.Listener` """ return self._list(_listener.Listener, **query) def update_listener(self, listener, **attrs): """Update a listener :param listener: Either the id of a listener or a :class:`~openstack.load_balancer.v2.listener.Listener` instance. :param dict attrs: The attributes to update on the listener represented by ``listener``. :returns: The updated listener :rtype: :class:`~openstack.load_balancer.v2.listener.Listener` """ return self._update(_listener.Listener, listener, **attrs) def create_pool(self, **attrs): """Create a new pool from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.load_balancer.v2.pool.Pool`, comprised of the properties on the Pool class. :returns: The results of Pool creation :rtype: :class:`~openstack.load_balancer.v2.pool.Pool` """ return self._create(_pool.Pool, **attrs) def get_pool(self, *attrs): """Get a pool :param pool: Value is either a pool ID or a :class:`~openstack.load_balancer.v2.pool.Pool` instance. :returns: One :class:`~openstack.load_balancer.v2.pool.Pool` """ return self._get(_pool.Pool, *attrs) def pools(self, **query): """Retrieve a generator of pools :returns: A generator of Pool instances """ return self._list(_pool.Pool, **query) def delete_pool(self, pool, ignore_missing=True): """Delete a pool :param pool: The pool is either a pool ID or a :class:`~openstack.load_balancer.v2.pool.Pool` instance :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the pool does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent pool. :returns: ``None`` """ return self._delete(_pool.Pool, pool, ignore_missing=ignore_missing) def find_pool(self, name_or_id, ignore_missing=True): """Find a single pool :param name_or_id: The name or ID of a pool :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the pool does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent pool. :returns: ``None`` """ return self._find( _pool.Pool, name_or_id, ignore_missing=ignore_missing ) def update_pool(self, pool, **attrs): """Update a pool :param pool: Either the id of a pool or a :class:`~openstack.load_balancer.v2.pool.Pool` instance. :param dict attrs: The attributes to update on the pool represented by ``pool``. :returns: The updated pool :rtype: :class:`~openstack.load_balancer.v2.pool.Pool` """ return self._update(_pool.Pool, pool, **attrs) def create_member(self, pool, **attrs): """Create a new member from attributes :param pool: The pool can be either the ID of a pool or a :class:`~openstack.load_balancer.v2.pool.Pool` instance that the member will be created in. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.load_balancer.v2.member.Member`, comprised of the properties on the Member class. :returns: The results of member creation :rtype: :class:`~openstack.load_balancer.v2.member.Member` """ poolobj = self._get_resource(_pool.Pool, pool) return self._create(_member.Member, pool_id=poolobj.id, **attrs) def delete_member(self, member, pool, ignore_missing=True): """Delete a member :param member: The member can be either the ID of a member or a :class:`~openstack.load_balancer.v2.member.Member` instance. :param pool: The pool can be either the ID of a pool or a :class:`~openstack.load_balancer.v2.pool.Pool` instance that the member belongs to. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the member does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent member. :returns: ``None`` """ poolobj = self._get_resource(_pool.Pool, pool) self._delete( _member.Member, member, ignore_missing=ignore_missing, pool_id=poolobj.id, ) def find_member(self, name_or_id, pool, ignore_missing=True): """Find a single member :param str name_or_id: The name or ID of a member. :param pool: The pool can be either the ID of a pool or a :class:`~openstack.load_balancer.v2.pool.Pool` instance that the member belongs to. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.load_balancer.v2.member.Member` or None """ poolobj = self._get_resource(_pool.Pool, pool) return self._find( _member.Member, name_or_id, ignore_missing=ignore_missing, pool_id=poolobj.id, ) def get_member(self, member, pool): """Get a single member :param member: The member can be the ID of a member or a :class:`~openstack.load_balancer.v2.member.Member` instance. :param pool: The pool can be either the ID of a pool or a :class:`~openstack.load_balancer.v2.pool.Pool` instance that the member belongs to. :returns: One :class:`~openstack.load_balancer.v2.member.Member` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ poolobj = self._get_resource(_pool.Pool, pool) return self._get(_member.Member, member, pool_id=poolobj.id) def members(self, pool, **query): """Return a generator of members :param pool: The pool can be either the ID of a pool or a :class:`~openstack.load_balancer.v2.pool.Pool` instance that the member belongs to. :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: :returns: A generator of member objects :rtype: :class:`~openstack.load_balancer.v2.member.Member` """ poolobj = self._get_resource(_pool.Pool, pool) return self._list(_member.Member, pool_id=poolobj.id, **query) def update_member(self, member, pool, **attrs): """Update a member :param member: Either the ID of a member or a :class:`~openstack.load_balancer.v2.member.Member` instance. :param pool: The pool can be either the ID of a pool or a :class:`~openstack.load_balancer.v2.pool.Pool` instance that the member belongs to. :param dict attrs: The attributes to update on the member represented by ``member``. :returns: The updated member :rtype: :class:`~openstack.load_balancer.v2.member.Member` """ poolobj = self._get_resource(_pool.Pool, pool) return self._update( _member.Member, member, pool_id=poolobj.id, **attrs ) def find_health_monitor(self, name_or_id, ignore_missing=True): """Find a single health monitor :param name_or_id: The name or ID of a health monitor :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the health monitor does not exist. When set to ``True``, no exception will be set when attempting to find a nonexistent health monitor. :returns: The :class:`openstack.load_balancer.v2.healthmonitor.HealthMonitor` object matching the given name or id or None if nothing matches. :raises: :class:`openstack.exceptions.DuplicateResource` if more than one resource is found for this request. :raises: :class:`openstack.exceptions.NotFoundException` if nothing is found and ignore_missing is ``False``. """ return self._find( _hm.HealthMonitor, name_or_id, ignore_missing=ignore_missing ) def create_health_monitor(self, **attrs): """Create a new health monitor from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.load_balancer.v2.healthmonitor.HealthMonitor`, comprised of the properties on the HealthMonitor class. :returns: The results of HealthMonitor creation :rtype: :class:`~openstack.load_balancer.v2.healthmonitor.HealthMonitor` """ return self._create(_hm.HealthMonitor, **attrs) def get_health_monitor(self, healthmonitor): """Get a health monitor :param healthmonitor: The value can be the ID of a health monitor or :class:`~openstack.load_balancer.v2.healthmonitor.HealthMonitor` instance. :returns: One health monitor :rtype: :class:`~openstack.load_balancer.v2.healthmonitor.HealthMonitor` """ return self._get(_hm.HealthMonitor, healthmonitor) def health_monitors(self, **query): """Retrieve a generator of health monitors :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: 'name', 'created_at', 'updated_at', 'delay', 'expected_codes', 'http_method', 'max_retries', 'max_retries_down', 'pool_id', 'provisioning_status', 'operating_status', 'timeout', 'project_id', 'type', 'url_path', 'is_admin_state_up'. :returns: A generator of health monitor instances """ return self._list(_hm.HealthMonitor, **query) def delete_health_monitor(self, healthmonitor, ignore_missing=True): """Delete a health monitor :param healthmonitor: The healthmonitor can be either the ID of the health monitor or a :class:`~openstack.load_balancer.v2.healthmonitor.HealthMonitor` instance :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the healthmonitor does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent healthmonitor. :returns: ``None`` """ return self._delete( _hm.HealthMonitor, healthmonitor, ignore_missing=ignore_missing ) def update_health_monitor(self, healthmonitor, **attrs): """Update a health monitor :param healthmonitor: The healthmonitor can be either the ID of the health monitor or a :class:`~openstack.load_balancer.v2.healthmonitor.HealthMonitor` instance :param dict attrs: The attributes to update on the health monitor represented by ``healthmonitor``. :returns: The updated health monitor :rtype: :class:`~openstack.load_balancer.v2.healthmonitor.HealthMonitor` """ return self._update(_hm.HealthMonitor, healthmonitor, **attrs) def create_l7_policy(self, **attrs): """Create a new l7policy from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.load_balancer.v2.l7_policy.L7Policy`, comprised of the properties on the L7Policy class. :returns: The results of l7policy creation :rtype: :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` """ return self._create(_l7policy.L7Policy, **attrs) def delete_l7_policy(self, l7_policy, ignore_missing=True): """Delete a l7policy :param l7_policy: The value can be either the ID of a l7policy or a :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the l7policy does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent l7policy. :returns: ``None`` """ self._delete( _l7policy.L7Policy, l7_policy, ignore_missing=ignore_missing ) def find_l7_policy(self, name_or_id, ignore_missing=True): """Find a single l7policy :param name_or_id: The name or ID of a l7policy. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` or None """ return self._find( _l7policy.L7Policy, name_or_id, ignore_missing=ignore_missing ) def get_l7_policy(self, l7_policy): """Get a single l7policy :param l7_policy: The value can be the ID of a l7policy or a :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` instance. :returns: One :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_l7policy.L7Policy, l7_policy) def l7_policies(self, **query): """Return a generator of l7policies :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: :returns: A generator of l7policy objects :rtype: :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` """ return self._list(_l7policy.L7Policy, **query) def update_l7_policy(self, l7_policy, **attrs): """Update a l7policy :param l7_policy: Either the id of a l7policy or a :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` instance. :param dict attrs: The attributes to update on the l7policy represented by ``l7policy``. :returns: The updated l7policy :rtype: :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` """ return self._update(_l7policy.L7Policy, l7_policy, **attrs) def create_l7_rule(self, l7_policy, **attrs): """Create a new l7rule from attributes :param l7_policy: The l7_policy can be either the ID of a l7policy or :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` instance that the l7rule will be created in. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.load_balancer.v2.l7_rule.L7Rule`, comprised of the properties on the L7Rule class. :returns: The results of l7rule creation :rtype: :class:`~openstack.load_balancer.v2.l7_rule.L7Rule` """ l7policyobj = self._get_resource(_l7policy.L7Policy, l7_policy) return self._create( _l7rule.L7Rule, l7policy_id=l7policyobj.id, **attrs ) def delete_l7_rule(self, l7rule, l7_policy, ignore_missing=True): """Delete a l7rule :param l7rule: The l7rule can be either the ID of a l7rule or a :class:`~openstack.load_balancer.v2.l7_rule.L7Rule` instance. :param l7_policy: The l7_policy can be either the ID of a l7policy or :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` instance that the l7rule belongs to. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the l7rule does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent l7rule. :returns: ``None`` """ l7policyobj = self._get_resource(_l7policy.L7Policy, l7_policy) self._delete( _l7rule.L7Rule, l7rule, ignore_missing=ignore_missing, l7policy_id=l7policyobj.id, ) def find_l7_rule(self, name_or_id, l7_policy, ignore_missing=True): """Find a single l7rule :param str name_or_id: The name or ID of a l7rule. :param l7_policy: The l7_policy can be either the ID of a l7policy or :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` instance that the l7rule belongs to. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.load_balancer.v2.l7_rule.L7Rule` or None """ l7policyobj = self._get_resource(_l7policy.L7Policy, l7_policy) return self._find( _l7rule.L7Rule, name_or_id, ignore_missing=ignore_missing, l7policy_id=l7policyobj.id, ) def get_l7_rule(self, l7rule, l7_policy): """Get a single l7rule :param l7rule: The l7rule can be the ID of a l7rule or a :class:`~openstack.load_balancer.v2.l7_rule.L7Rule` instance. :param l7_policy: The l7_policy can be either the ID of a l7policy or :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` instance that the l7rule belongs to. :returns: One :class:`~openstack.load_balancer.v2.l7_rule.L7Rule` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ l7policyobj = self._get_resource(_l7policy.L7Policy, l7_policy) return self._get(_l7rule.L7Rule, l7rule, l7policy_id=l7policyobj.id) def l7_rules(self, l7_policy, **query): """Return a generator of l7rules :param l7_policy: The l7_policy can be either the ID of a l7_policy or :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` instance that the l7rule belongs to. :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: :returns: A generator of l7rule objects :rtype: :class:`~openstack.load_balancer.v2.l7_rule.L7Rule` """ l7policyobj = self._get_resource(_l7policy.L7Policy, l7_policy) return self._list(_l7rule.L7Rule, l7policy_id=l7policyobj.id, **query) def update_l7_rule(self, l7rule, l7_policy, **attrs): """Update a l7rule :param l7rule: Either the ID of a l7rule or a :class:`~openstack.load_balancer.v2.l7_rule.L7Rule` instance. :param l7_policy: The l7_policy can be either the ID of a l7policy or :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` instance that the l7rule belongs to. :param dict attrs: The attributes to update on the l7rule represented by ``l7rule``. :returns: The updated l7rule :rtype: :class:`~openstack.load_balancer.v2.l7_rule.L7Rule` """ l7policyobj = self._get_resource(_l7policy.L7Policy, l7_policy) return self._update( _l7rule.L7Rule, l7rule, l7policy_id=l7policyobj.id, **attrs ) def quotas(self, **query): """Return a generator of quotas :param dict query: Optional query parameters to be sent to limit the resources being returned. Currently no query parameter is supported. :returns: A generator of quota objects :rtype: :class:`~openstack.load_balancer.v2.quota.Quota` """ return self._list(_quota.Quota, **query) def get_quota(self, quota): """Get a quota :param quota: The value can be the ID of a quota or a :class:`~openstack.load_balancer.v2.quota.Quota` instance. The ID of a quota is the same as the project ID for the quota. :returns: One :class:`~openstack.load_balancer.v2.quota.Quota` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_quota.Quota, quota) def update_quota(self, quota, **attrs): """Update a quota :param quota: Either the ID of a quota or a :class:`~openstack.load_balancer.v2.quota.Quota` instance. The ID of a quota is the same as the project ID for the quota. :param dict attrs: The attributes to update on the quota represented by ``quota``. :returns: The updated quota :rtype: :class:`~openstack.load_balancer.v2.quota.Quota` """ return self._update(_quota.Quota, quota, **attrs) def get_quota_default(self): """Get a default quota :returns: One :class:`~openstack.load_balancer.v2.quota.QuotaDefault` """ return self._get(_quota.QuotaDefault, requires_id=False) def delete_quota(self, quota, ignore_missing=True): """Delete a quota (i.e. reset to the default quota) :param quota: The value can be either the ID of a quota or a :class:`~openstack.load_balancer.v2.quota.Quota` instance. The ID of a quota is the same as the project ID for the quota. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when quota does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent quota. :returns: ``None`` """ self._delete(_quota.Quota, quota, ignore_missing=ignore_missing) def providers(self, **query): """Retrieve a generator of providers :returns: A generator of providers instances """ return self._list(_provider.Provider, **query) def provider_flavor_capabilities(self, provider, **query): """Retrieve a generator of provider flavor capabilities :returns: A generator of provider flavor capabilities instances """ return self._list( _provider.ProviderFlavorCapabilities, provider=provider, **query ) def create_flavor_profile(self, **attrs): """Create a new flavor profile from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.load_balancer.v2.flavor_profile.FlavorProfile`, comprised of the properties on the FlavorProfile class. :returns: The results of profile creation creation :rtype: :class:`~openstack.load_balancer.v2.flavor_profile.FlavorProfile` """ return self._create(_flavor_profile.FlavorProfile, **attrs) def get_flavor_profile(self, *attrs): """Get a flavor profile :param flavor_profile: The value can be the name of a flavor profile or :class:`~openstack.load_balancer.v2.flavor_profile.FlavorProfile` instance. :returns: One :class:`~openstack.load_balancer.v2.flavor_profile.FlavorProfile` """ return self._get(_flavor_profile.FlavorProfile, *attrs) def flavor_profiles(self, **query): """Retrieve a generator of flavor profiles :returns: A generator of flavor profiles instances """ return self._list(_flavor_profile.FlavorProfile, **query) def delete_flavor_profile(self, flavor_profile, ignore_missing=True): """Delete a flavor profile :param flavor_profile: The flavor_profile can be either the ID or a :class:`~openstack.load_balancer.v2.flavor_profile.FlavorProfile` instance :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the flavor profile does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent flavor profile. :returns: ``None`` """ self._delete( _flavor_profile.FlavorProfile, flavor_profile, ignore_missing=ignore_missing, ) def find_flavor_profile(self, name_or_id, ignore_missing=True): """Find a single flavor profile :param name_or_id: The name or ID of a flavor profile :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the flavor profile does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent flavor profile. :returns: ``None`` """ return self._find( _flavor_profile.FlavorProfile, name_or_id, ignore_missing=ignore_missing, ) def update_flavor_profile(self, flavor_profile, **attrs): """Update a flavor profile :param flavor_profile: The flavor_profile can be either the ID or a :class:`~openstack.load_balancer.v2.flavor_profile.FlavorProfile` instance :param dict attrs: The attributes to update on the flavor profile represented by ``flavor_profile``. :returns: The updated flavor profile :rtype: :class:`~openstack.load_balancer.v2.flavor_profile.FlavorProfile` """ return self._update( _flavor_profile.FlavorProfile, flavor_profile, **attrs ) def create_flavor(self, **attrs): """Create a new flavor from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.load_balancer.v2.flavor.Flavor`, comprised of the properties on the Flavorclass. :returns: The results of flavor creation creation :rtype: :class:`~openstack.load_balancer.v2.flavor.Flavor` """ return self._create(_flavor.Flavor, **attrs) def get_flavor(self, *attrs): """Get a flavor :param flavor: The value can be the ID of a flavor or :class:`~openstack.load_balancer.v2.flavor.Flavor` instance. :returns: One :class:`~openstack.load_balancer.v2.flavor.Flavor` """ return self._get(_flavor.Flavor, *attrs) def flavors(self, **query): """Retrieve a generator of flavors :returns: A generator of flavor instances """ return self._list(_flavor.Flavor, **query) def delete_flavor(self, flavor, ignore_missing=True): """Delete a flavor :param flavor: The flavorcan be either the ID or a :class:`~openstack.load_balancer.v2.flavor.Flavor` instance :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the flavor does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent flavor. :returns: ``None`` """ self._delete(_flavor.Flavor, flavor, ignore_missing=ignore_missing) def find_flavor(self, name_or_id, ignore_missing=True): """Find a single flavor :param name_or_id: The name or ID of a flavor :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the flavor does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent flavor. :returns: ``None`` """ return self._find( _flavor.Flavor, name_or_id, ignore_missing=ignore_missing ) def update_flavor(self, flavor, **attrs): """Update a flavor :param flavor: The flavor can be either the ID or a :class:`~openstack.load_balancer.v2.flavor.Flavor` instance :param dict attrs: The attributes to update on the flavor represented by ``flavor``. :returns: The updated flavor :rtype: :class:`~openstack.load_balancer.v2.flavor.Flavor` """ return self._update(_flavor.Flavor, flavor, **attrs) def amphorae(self, **query): """Retrieve a generator of amphorae :returns: A generator of amphora instances """ return self._list(_amphora.Amphora, **query) def get_amphora(self, *attrs): """Get a amphora :param amphora: The value can be the ID of an amphora or :class:`~openstack.load_balancer.v2.amphora.Amphora` instance. :returns: One :class:`~openstack.load_balancer.v2.amphora.Amphora` """ return self._get(_amphora.Amphora, *attrs) def find_amphora(self, amphora_id, ignore_missing=True): """Find a single amphora :param amphora_id: The ID of a amphora :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the amphora does not exist. When set to ``True``, no exception will be set when attempting to find a nonexistent amphora. :returns: ``None`` """ return self._find( _amphora.Amphora, amphora_id, ignore_missing=ignore_missing ) def configure_amphora(self, amphora_id, **attrs): """Update the configuration of an amphora agent :param amphora_id: The ID of an amphora :returns: ``None`` """ return self._update(_amphora.AmphoraConfig, amphora_id=amphora_id) def failover_amphora(self, amphora_id, **attrs): """Failover an amphora :param amphora_id: The ID of an amphora :returns: ``None`` """ return self._update(_amphora.AmphoraFailover, amphora_id=amphora_id) def create_availability_zone_profile(self, **attrs): """Create a new availability zone profile from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile` comprised of the properties on the AvailabilityZoneProfile class. :returns: The results of profile creation :rtype: :class:`~openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile` """ return self._create( _availability_zone_profile.AvailabilityZoneProfile, **attrs ) def get_availability_zone_profile(self, *attrs): """Get an availability zone profile :param availability_zone_profile: The value can be the ID of an availability_zone profile or :class:`~openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile` instance. :returns: One :class:`~openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile` """ return self._get( _availability_zone_profile.AvailabilityZoneProfile, *attrs ) def availability_zone_profiles(self, **query): """Retrieve a generator of availability zone profiles :returns: A generator of availability zone profiles instances """ return self._list( _availability_zone_profile.AvailabilityZoneProfile, **query ) def delete_availability_zone_profile( self, availability_zone_profile, ignore_missing=True ): """Delete an availability zone profile :param availability_zone_profile: The availability_zone_profile can be either the ID or a :class:`~openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile` instance :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the availability zone profile does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent availability zone profile. :returns: ``None`` """ self._delete( _availability_zone_profile.AvailabilityZoneProfile, availability_zone_profile, ignore_missing=ignore_missing, ) def find_availability_zone_profile(self, name_or_id, ignore_missing=True): """Find a single availability zone profile :param name_or_id: The name or ID of a availability zone profile :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the availability zone profile does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent availability zone profile. :returns: ``None`` """ return self._find( _availability_zone_profile.AvailabilityZoneProfile, name_or_id, ignore_missing=ignore_missing, ) def update_availability_zone_profile( self, availability_zone_profile, **attrs ): """Update an availability zone profile :param availability_zone_profile: The availability_zone_profile can be either the ID or a :class:`~openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile` instance :param dict attrs: The attributes to update on the availability_zone profile represented by ``availability_zone_profile``. :returns: The updated availability zone profile :rtype: :class:`~openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile` """ return self._update( _availability_zone_profile.AvailabilityZoneProfile, availability_zone_profile, **attrs ) def create_availability_zone(self, **attrs): """Create a new availability zone from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone` comprised of the properties on the AvailabilityZoneclass. :returns: The results of availability_zone creation creation :rtype: :class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone` """ return self._create(_availability_zone.AvailabilityZone, **attrs) def get_availability_zone(self, *attrs): """Get an availability zone :param availability_zone: The value can be the ID of a availability_zone or :class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone` instance. :returns: One :class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone` """ return self._get(_availability_zone.AvailabilityZone, *attrs) def availability_zones(self, **query): """Retrieve a generator of availability zones :returns: A generator of availability zone instances """ return self._list(_availability_zone.AvailabilityZone, **query) def delete_availability_zone(self, availability_zone, ignore_missing=True): """Delete an availability_zone :param availability_zone: The availability_zone can be either the ID or a :class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone` instance :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the availability zone does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent availability zone. :returns: ``None`` """ self._delete( _availability_zone.AvailabilityZone, availability_zone, ignore_missing=ignore_missing, ) def find_availability_zone(self, name_or_id, ignore_missing=True): """Find a single availability zone :param name_or_id: The name or ID of a availability zone :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the availability zone does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent availability zone. :returns: ``None`` """ return self._find( _availability_zone.AvailabilityZone, name_or_id, ignore_missing=ignore_missing, ) def update_availability_zone(self, availability_zone, **attrs): """Update an availability zone :param availability_zone: The availability_zone can be either the ID or a :class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone` instance :param dict attrs: The attributes to update on the availability_zone represented by ``availability_zone``. :returns: The updated availability_zone :rtype: :class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone` """ return self._update( _availability_zone.AvailabilityZone, availability_zone, **attrs ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/v2/amphora.py0000664000175000017500000001120600000000000023314 0ustar00zuulzuul00000000000000# Copyright 2019 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Amphora(resource.Resource): resource_key = 'amphora' resources_key = 'amphorae' base_path = '/octavia/amphorae' # capabilities allow_create = False allow_fetch = True allow_commit = False allow_delete = False allow_list = True _query_mapping = resource.QueryParameters( 'id', 'loadbalancer_id', 'compute_id', 'lb_network_ip', 'vrrp_ip', 'ha_ip', 'vrrp_port_id', 'ha_port_id', 'cert_expiration', 'cert_busy', 'role', 'status', 'vrrp_interface', 'vrrp_id', 'vrrp_priority', 'cached_zone', 'created_at', 'updated_at', 'image_id', 'compute_flavor', ) # Properties #: The ID of the amphora. id = resource.Body('id') #: The ID of the load balancer. loadbalancer_id = resource.Body('loadbalancer_id') #: The ID of the amphora resource in the compute system. compute_id = resource.Body('compute_id') #: The management IP of the amphora. lb_network_ip = resource.Body('lb_network_ip') #: The address of the vrrp port on the amphora. vrrp_ip = resource.Body('vrrp_ip') #: The IP address of the Virtual IP (VIP). ha_ip = resource.Body('ha_ip') #: The vrrp port's ID in the networking system. vrrp_port_id = resource.Body('vrrp_port_id') #: The ID of the Virtual IP (VIP) port. ha_port_id = resource.Body('ha_port_id') #: The date the certificate for the amphora expires. cert_expiration = resource.Body('cert_expiration') #: Whether the certificate is in the process of being replaced. cert_busy = resource.Body('cert_busy') #: The role configured for the amphora. One of STANDALONE, MASTER, BACKUP. role = resource.Body('role') #: The status of the amphora. One of: BOOTING, ALLOCATED, READY, #: PENDING_CREATE, PENDING_DELETE, DELETED, ERROR. status = resource.Body('status') #: The bound interface name of the vrrp port on the amphora. vrrp_interface = resource.Body('vrrp_interface') #: The vrrp group's ID for the amphora. vrrp_id = resource.Body('vrrp_id') #: The priority of the amphora in the vrrp group. vrrp_priority = resource.Body('vrrp_priority') #: The availability zone of a compute instance, cached at create time. cached_zone = resource.Body('cached_zone') #: The UTC date and timestamp when the resource was created. created_at = resource.Body('created_at') #: The UTC date and timestamp when the resource was last updated. updated_at = resource.Body('updated_at') #: The ID of the glance image used for the amphora. image_id = resource.Body('image_id') #: The ID of the compute flavor used for the amphora. compute_flavor = resource.Body('compute_flavor') class AmphoraConfig(resource.Resource): base_path = '/octavia/amphorae/%(amphora_id)s/config' # capabilities allow_create = False allow_fetch = False allow_commit = True allow_delete = False allow_list = False allow_empty_commit = True requires_id = False # Properties #: The ID of the amphora. amphora_id = resource.URI('amphora_id') # The default _update code path also has no # way to pass has_body into this function, so overriding the method here. def commit(self, session, base_path=None): return super().commit(session, base_path=base_path, has_body=False) class AmphoraFailover(resource.Resource): base_path = '/octavia/amphorae/%(amphora_id)s/failover' # capabilities allow_create = False allow_fetch = False allow_commit = True allow_delete = False allow_list = False allow_empty_commit = True requires_id = False # Properties #: The ID of the amphora. amphora_id = resource.URI('amphora_id') # The default _update code path also has no # way to pass has_body into this function, so overriding the method here. def commit(self, session, base_path=None): return super().commit(session, base_path=base_path, has_body=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/v2/availability_zone.py0000664000175000017500000000271100000000000025373 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class AvailabilityZone(resource.Resource): resource_key = 'availability_zone' resources_key = 'availability_zones' base_path = '/lbaas/availabilityzones' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'name', 'description', 'availability_zone_profile_id', is_enabled='enabled', ) # Properties #: The name of the availability zone. name = resource.Body('name') #: The availability zone description. description = resource.Body('description') #: The associated availability zone profile ID availability_zone_profile_id = resource.Body( 'availability_zone_profile_id' ) #: Whether the availability zone is enabled for use or not. is_enabled = resource.Body('enabled') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/v2/availability_zone_profile.py0000664000175000017500000000265700000000000027124 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class AvailabilityZoneProfile(resource.Resource): resource_key = 'availability_zone_profile' resources_key = 'availability_zone_profiles' base_path = '/lbaas/availabilityzoneprofiles' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'id', 'name', 'provider_name', 'availability_zone_data' ) # Properties #: The ID of the availability zone profile. id = resource.Body('id') #: The name of the availability zone profile. name = resource.Body('name') #: The provider this availability zone profile is for. provider_name = resource.Body('provider_name') #: The JSON string containing the availability zone metadata. availability_zone_data = resource.Body('availability_zone_data') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/v2/flavor.py0000664000175000017500000000261700000000000023164 0ustar00zuulzuul00000000000000# Copyright 2019 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Flavor(resource.Resource): resource_key = 'flavor' resources_key = 'flavors' base_path = '/lbaas/flavors' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'id', 'name', 'description', 'flavor_profile_id', is_enabled='enabled' ) # Properties #: The ID of the flavor. id = resource.Body('id') #: The name of the flavor. name = resource.Body('name') #: The flavor description. description = resource.Body('description') #: The associated flavor profile ID flavor_profile_id = resource.Body('flavor_profile_id') #: Whether the flavor is enabled for use or not. is_enabled = resource.Body('enabled') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/v2/flavor_profile.py0000664000175000017500000000253200000000000024700 0ustar00zuulzuul00000000000000# Copyright 2019 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class FlavorProfile(resource.Resource): resource_key = 'flavorprofile' resources_key = 'flavorprofiles' base_path = '/lbaas/flavorprofiles' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'id', 'name', 'provider_name', 'flavor_data' ) # Properties #: The ID of the flavor profile. id = resource.Body('id') #: The name of the flavor profile. name = resource.Body('name') #: The provider this flavor profile is for. provider_name = resource.Body('provider_name') #: The JSON string containing the flavor metadata. flavor_data = resource.Body('flavor_data') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/v2/health_monitor.py0000664000175000017500000000630500000000000024705 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack import resource class HealthMonitor(resource.Resource, tag.TagMixin): resource_key = 'healthmonitor' resources_key = 'healthmonitors' base_path = '/lbaas/healthmonitors' # capabilities allow_create = True allow_list = True allow_fetch = True allow_delete = True allow_commit = True _query_mapping = resource.QueryParameters( 'name', 'created_at', 'updated_at', 'delay', 'expected_codes', 'http_method', 'max_retries', 'max_retries_down', 'pool_id', 'provisioning_status', 'operating_status', 'timeout', 'project_id', 'type', 'url_path', is_admin_state_up='admin_state_up', **tag.TagMixin._tag_query_parameters ) #: Properties #: Timestamp when the health monitor was created. created_at = resource.Body('created_at') #: The time, in seconds, between sending probes to members. delay = resource.Body('delay', type=int) #: The expected http status codes to get from a successful health check expected_codes = resource.Body('expected_codes') #: The HTTP method that the monitor uses for requests http_method = resource.Body('http_method') #: The administrative state of the health monitor *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: The number of successful checks before changing the operating status #: of the member to ONLINE. max_retries = resource.Body('max_retries', type=int) #: The number of allowed check failures before changing the operating #: status of the member to ERROR. max_retries_down = resource.Body('max_retries_down', type=int) #: The health monitor name name = resource.Body('name') #: Operating status of the member. operating_status = resource.Body('operating_status') #: List of associated pools. #: *Type: list of dicts which contain the pool IDs* pools = resource.Body('pools', type=list) #: The ID of the associated Pool pool_id = resource.Body('pool_id') #: The ID of the project project_id = resource.Body('project_id') #: The provisioning status of this member. provisioning_status = resource.Body('provisioning_status') #: The time, in seconds, after which a health check times out timeout = resource.Body('timeout', type=int) #: The type of health monitor type = resource.Body('type') #: Timestamp when the member was last updated. updated_at = resource.Body('updated_at') #: The HTTP path of the request to test the health of a member url_path = resource.Body('url_path') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/v2/l7_policy.py0000664000175000017500000000541200000000000023570 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack import resource class L7Policy(resource.Resource, tag.TagMixin): resource_key = 'l7policy' resources_key = 'l7policies' base_path = '/lbaas/l7policies' # capabilities allow_create = True allow_list = True allow_fetch = True allow_commit = True allow_delete = True _query_mapping = resource.QueryParameters( 'action', 'description', 'listener_id', 'name', 'position', 'redirect_pool_id', 'redirect_url', 'provisioning_status', 'operating_status', 'redirect_prefix', 'project_id', is_admin_state_up='admin_state_up', **tag.TagMixin._tag_query_parameters ) #: Properties #: The action to be taken l7policy is matched action = resource.Body('action') #: Timestamp when the L7 policy was created. created_at = resource.Body('created_at') #: The l7policy description description = resource.Body('description') #: The administrative state of the l7policy *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: The ID of the listener associated with this l7policy listener_id = resource.Body('listener_id') #: The l7policy name name = resource.Body('name') #: Operating status of the member. operating_status = resource.Body('operating_status') #: Sequence number of this l7policy position = resource.Body('position', type=int) #: The ID of the project this l7policy is associated with. project_id = resource.Body('project_id') #: The provisioning status of this l7policy provisioning_status = resource.Body('provisioning_status') #: The ID of the pool to which the requests will be redirected redirect_pool_id = resource.Body('redirect_pool_id') #: The URL prefix to which the requests should be redirected redirect_prefix = resource.Body('redirect_prefix') #: The URL to which the requests should be redirected redirect_url = resource.Body('redirect_url') #: The list of L7Rules associated with the l7policy rules = resource.Body('rules', type=list) #: Timestamp when the member was last updated. updated_at = resource.Body('updated_at') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/v2/l7_rule.py0000664000175000017500000000471200000000000023242 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack import resource class L7Rule(resource.Resource, tag.TagMixin): resource_key = 'rule' resources_key = 'rules' base_path = '/lbaas/l7policies/%(l7policy_id)s/rules' # capabilities allow_create = True allow_list = True allow_fetch = True allow_commit = True allow_delete = True _query_mapping = resource.QueryParameters( 'compare_type', 'created_at', 'invert', 'key', 'project_id', 'provisioning_status', 'type', 'updated_at', 'rule_value', 'operating_status', is_admin_state_up='admin_state_up', l7_policy_id='l7policy_id', **tag.TagMixin._tag_query_parameters ) #: Properties #: The administrative state of the l7policy *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: comparison type to be used with the value in this L7 rule. compare_type = resource.Body('compare_type') #: Timestamp when the L7 rule was created. created_at = resource.Body('created_at') #: inverts the logic of the rule if True # (ie. perform a logical NOT on the rule) invert = resource.Body('invert', type=bool) #: The key to use for the comparison. key = resource.Body('key') #: The ID of the associated l7 policy l7_policy_id = resource.URI('l7policy_id') #: The operating status of this l7rule operating_status = resource.Body('operating_status') #: The ID of the project this l7policy is associated with. project_id = resource.Body('project_id') #: The provisioning status of this l7policy provisioning_status = resource.Body('provisioning_status') #: The type of L7 rule type = resource.Body('type') #: Timestamp when the L7 rule was updated. updated_at = resource.Body('updated_at') #: value to be compared with rule_value = resource.Body('value') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/v2/listener.py0000664000175000017500000001435500000000000023522 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack import resource class Listener(resource.Resource, tag.TagMixin): resource_key = 'listener' resources_key = 'listeners' base_path = '/lbaas/listeners' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'connection_limit', 'default_pool_id', 'default_tls_container_ref', 'description', 'name', 'project_id', 'protocol', 'protocol_port', 'created_at', 'updated_at', 'provisioning_status', 'operating_status', 'sni_container_refs', 'insert_headers', 'load_balancer_id', 'timeout_client_data', 'timeout_member_connect', 'timeout_member_data', 'timeout_tcp_inspect', 'allowed_cidrs', 'tls_ciphers', 'tls_versions', 'alpn_protocols', 'hsts_max_age', is_hsts_include_subdomains='hsts_include_subdomains', is_hsts_preload='hsts_preload', is_admin_state_up='admin_state_up', **tag.TagMixin._tag_query_parameters ) # Properties #: List of IPv4 or IPv6 CIDRs. allowed_cidrs = resource.Body('allowed_cidrs', type=list) #: List of ALPN protocols. alpn_protocols = resource.Body('alpn_protocols', type=list) #: The maximum number of connections permitted for this load balancer. #: Default is infinite. connection_limit = resource.Body('connection_limit') #: Timestamp when the listener was created. created_at = resource.Body('created_at') #: Default pool to which the requests will be routed. default_pool = resource.Body('default_pool') #: ID of default pool. Must have compatible protocol with listener. default_pool_id = resource.Body('default_pool_id') #: A reference to a container of TLS secrets. default_tls_container_ref = resource.Body('default_tls_container_ref') #: Description for the listener. description = resource.Body('description') #: Defines whether the `include_subdomains` directive is used for HSTS or #: not is_hsts_include_subdomains = resource.Body( 'hsts_include_subdomains', type=bool ) #: Enables HTTP Strict Transport Security (HSTS) and sets the `max_age` #: directive to given value hsts_max_age = resource.Body('hsts_max_age', type=int) #: Defines whether the `hsts_preload` directive is used for HSTS or not is_hsts_preload = resource.Body('hsts_preload', type=bool) #: Dictionary of additional headers insertion into HTTP header. insert_headers = resource.Body('insert_headers', type=dict) #: The administrative state of the listener, which is up #: ``True`` or down ``False``. *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: List of l7policies associated with this listener. l7_policies = resource.Body('l7policies', type=list) #: The ID of the parent load balancer. load_balancer_id = resource.Body('loadbalancer_id') #: List of load balancers associated with this listener. #: *Type: list of dicts which contain the load balancer IDs* load_balancers = resource.Body('loadbalancers', type=list) #: Name of the listener name = resource.Body('name') #: Operating status of the listener. operating_status = resource.Body('operating_status') #: The ID of the project this listener is associated with. project_id = resource.Body('project_id') #: The protocol of the listener, which is TCP, HTTP, HTTPS #: or TERMINATED_HTTPS. protocol = resource.Body('protocol') #: Port the listener will listen to, e.g. 80. protocol_port = resource.Body('protocol_port', type=int) #: The provisioning status of this listener. provisioning_status = resource.Body('provisioning_status') #: A list of references to TLS secrets. #: *Type: list* sni_container_refs = resource.Body('sni_container_refs') #: Timestamp when the listener was last updated. updated_at = resource.Body('updated_at') #: Frontend client inactivity timeout in milliseconds. timeout_client_data = resource.Body('timeout_client_data', type=int) #: Backend member connection timeout in milliseconds. timeout_member_connect = resource.Body('timeout_member_connect', type=int) #: Backend member inactivity timeout in milliseconds. timeout_member_data = resource.Body('timeout_member_data', type=int) #: Time, in milliseconds, to wait for additional TCP packets for content #: inspection. timeout_tcp_inspect = resource.Body('timeout_tcp_inspect', type=int) #: Stores a cipher string in OpenSSL format. tls_ciphers = resource.Body('tls_ciphers') #: A lsit of TLS protocols to be used by the listener tls_versions = resource.Body('tls_versions', type=list) class ListenerStats(resource.Resource): resource_key = 'stats' base_path = '/lbaas/listeners/%(listener_id)s/stats' # capabilities allow_create = False allow_fetch = True allow_commit = False allow_delete = False allow_list = False # Properties #: The ID of the listener. listener_id = resource.URI('listener_id') #: The currently active connections. active_connections = resource.Body('active_connections', type=int) #: The total bytes received. bytes_in = resource.Body('bytes_in', type=int) #: The total bytes sent. bytes_out = resource.Body('bytes_out', type=int) #: The total requests that were unable to be fulfilled. request_errors = resource.Body('request_errors', type=int) #: The total connections handled. total_connections = resource.Body('total_connections', type=int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/v2/load_balancer.py0000664000175000017500000001202600000000000024434 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack import resource class LoadBalancer(resource.Resource, tag.TagMixin): resource_key = 'loadbalancer' resources_key = 'loadbalancers' base_path = '/lbaas/loadbalancers' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'description', 'flavor_id', 'name', 'project_id', 'provider', 'vip_address', 'vip_network_id', 'vip_port_id', 'vip_subnet_id', 'vip_qos_policy_id', 'provisioning_status', 'operating_status', 'availability_zone', is_admin_state_up='admin_state_up', **tag.TagMixin._tag_query_parameters ) # Properties #: The administrative state of the load balancer *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: Name of the target Octavia availability zone availability_zone = resource.Body('availability_zone') #: Timestamp when the load balancer was created created_at = resource.Body('created_at') #: The load balancer description description = resource.Body('description') #: The load balancer flavor ID flavor_id = resource.Body('flavor_id') #: List of listeners associated with this load balancer listeners = resource.Body('listeners', type=list) #: The load balancer name name = resource.Body('name') #: Operating status of the load balancer operating_status = resource.Body('operating_status') #: List of pools associated with this load balancer pools = resource.Body('pools', type=list) #: The ID of the project this load balancer is associated with. project_id = resource.Body('project_id') #: Provider name for the load balancer. provider = resource.Body('provider') #: The provisioning status of this load balancer provisioning_status = resource.Body('provisioning_status') #: Timestamp when the load balancer was last updated updated_at = resource.Body('updated_at') #: VIP address of load balancer vip_address = resource.Body('vip_address') #: VIP netowrk ID vip_network_id = resource.Body('vip_network_id') #: VIP port ID vip_port_id = resource.Body('vip_port_id') #: VIP subnet ID vip_subnet_id = resource.Body('vip_subnet_id') # VIP qos policy id vip_qos_policy_id = resource.Body('vip_qos_policy_id') #: Additional VIPs additional_vips = resource.Body('additional_vips', type=list) def delete(self, session, error_message=None): request = self._prepare_request() params = {} if ( hasattr(self, 'cascade') and isinstance(self.cascade, bool) and self.cascade ): params['cascade'] = True response = session.delete(request.url, params=params) self._translate_response( response, has_body=False, error_message=error_message ) return self class LoadBalancerStats(resource.Resource): resource_key = 'stats' base_path = '/lbaas/loadbalancers/%(lb_id)s/stats' # capabilities allow_create = False allow_fetch = True allow_commit = False allow_delete = False allow_list = False # Properties #: The ID of the load balancer. lb_id = resource.URI('lb_id') #: The currently active connections. active_connections = resource.Body('active_connections', type=int) #: The total bytes received. bytes_in = resource.Body('bytes_in', type=int) #: The total bytes sent. bytes_out = resource.Body('bytes_out', type=int) #: The total requests that were unable to be fulfilled. request_errors = resource.Body('request_errors', type=int) #: The total connections handled. total_connections = resource.Body('total_connections', type=int) class LoadBalancerFailover(resource.Resource): base_path = '/lbaas/loadbalancers/%(lb_id)s/failover' # capabilities allow_create = False allow_fetch = False allow_commit = True allow_delete = False allow_list = False allow_empty_commit = True requires_id = False # Properties #: The ID of the load balancer. lb_id = resource.URI('lb_id') # The default _update code path also has no # way to pass has_body into this function, so overriding the method here. def commit(self, session, base_path=None): return super().commit(session, base_path=base_path, has_body=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/v2/member.py0000664000175000017500000000612400000000000023137 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack import resource class Member(resource.Resource, tag.TagMixin): resource_key = 'member' resources_key = 'members' base_path = '/lbaas/pools/%(pool_id)s/members' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'address', 'name', 'protocol_port', 'subnet_id', 'weight', 'created_at', 'updated_at', 'provisioning_status', 'operating_status', 'project_id', 'monitor_address', 'monitor_port', 'backup', is_admin_state_up='admin_state_up', **tag.TagMixin._tag_query_parameters ) # Properties #: The IP address of the member. address = resource.Body('address') #: Timestamp when the member was created. created_at = resource.Body('created_at') #: The administrative state of the member, which is up ``True`` or #: down ``False``. *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: IP address used to monitor this member monitor_address = resource.Body('monitor_address') #: Port used to monitor this member monitor_port = resource.Body('monitor_port', type=int) #: Name of the member. name = resource.Body('name') #: Operating status of the member. operating_status = resource.Body('operating_status') #: The ID of the owning pool. pool_id = resource.URI('pool_id') #: The provisioning status of this member. provisioning_status = resource.Body('provisioning_status') #: The ID of the project this member is associated with. project_id = resource.Body('project_id') #: The port on which the application is hosted. protocol_port = resource.Body('protocol_port', type=int) #: Subnet ID in which to access this member. subnet_id = resource.Body('subnet_id') #: Timestamp when the member was last updated. updated_at = resource.Body('updated_at') #: A positive integer value that indicates the relative portion of traffic #: that this member should receive from the pool. For example, a member #: with a weight of 10 receives five times as much traffic as a member #: with weight of 2. weight = resource.Body('weight', type=int) #: A bool value that indicates whether the member is a backup or not. #: Backup members only receive traffic when all non-backup members #: are down. backup = resource.Body('backup', type=bool) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/v2/pool.py0000664000175000017500000000730300000000000022641 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack import resource class Pool(resource.Resource, tag.TagMixin): resource_key = 'pool' resources_key = 'pools' base_path = '/lbaas/pools' # capabilities allow_create = True allow_list = True allow_fetch = True allow_delete = True allow_commit = True _query_mapping = resource.QueryParameters( 'health_monitor_id', 'lb_algorithm', 'listener_id', 'loadbalancer_id', 'description', 'name', 'project_id', 'protocol', 'created_at', 'updated_at', 'provisioning_status', 'operating_status', 'tls_enabled', 'tls_ciphers', 'tls_versions', 'alpn_protocols', 'ca_tls_container_ref', 'crl_container_ref', is_admin_state_up='admin_state_up', **tag.TagMixin._tag_query_parameters ) #: Properties #: List of ALPN protocols. alpn_protocols = resource.Body('alpn_protocols', type=list) #: Timestamp when the pool was created created_at = resource.Body('created_at') #: Description for the pool. description = resource.Body('description') #: Health Monitor ID health_monitor_id = resource.Body('healthmonitor_id') #: The administrative state of the pool *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: The loadbalancing algorithm used in the pool lb_algorithm = resource.Body('lb_algorithm') #: ID of listener associated with this pool listener_id = resource.Body('listener_id') #: List of listeners associated with this pool listeners = resource.Body('listeners', type=list) #: ID of load balancer associated with this pool loadbalancer_id = resource.Body('loadbalancer_id') #: List of loadbalancers associated with this pool loadbalancers = resource.Body('loadbalancers', type=list) #: Members associated with this pool members = resource.Body('members', type=list) #: The pool name name = resource.Body('name') #: Operating status of the pool operating_status = resource.Body('operating_status') #: The ID of the project project_id = resource.Body('project_id') #: The protocol of the pool protocol = resource.Body('protocol') #: Provisioning status of the pool provisioning_status = resource.Body('provisioning_status') #: Stores a string of cipher strings in OpenSSL format. tls_ciphers = resource.Body('tls_ciphers') #: A JSON object specifying the session persistence for the pool. session_persistence = resource.Body('session_persistence', type=dict) #: A list of TLS protocol versions to be used in by the pool tls_versions = resource.Body('tls_versions', type=list) #: Timestamp when the pool was updated updated_at = resource.Body('updated_at') #: Use TLS for connections to backend member servers *Type: bool* tls_enabled = resource.Body('tls_enabled', type=bool) #: Stores the ca certificate used by backend servers ca_tls_container_ref = resource.Body('ca_tls_container_ref') #: Stores the revocation list file crl_container_ref = resource.Body('crl_container_ref') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/v2/provider.py0000664000175000017500000000323700000000000023524 0ustar00zuulzuul00000000000000# Copyright 2019 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Provider(resource.Resource): resources_key = 'providers' base_path = '/lbaas/providers' # capabilities allow_create = False allow_fetch = False allow_commit = False allow_delete = False allow_list = True _query_mapping = resource.QueryParameters('description', 'name') # Properties #: The provider name. name = resource.Body('name') #: The provider description. description = resource.Body('description') class ProviderFlavorCapabilities(resource.Resource): resources_key = 'flavor_capabilities' base_path = '/lbaas/providers/%(provider)s/flavor_capabilities' # capabilities allow_create = False allow_fetch = False allow_commit = False allow_delete = False allow_list = True _query_mapping = resource.QueryParameters('description', 'name') # Properties #: The provider name to query. provider = resource.URI('provider') #: The provider name. name = resource.Body('name') #: The provider description. description = resource.Body('description') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/v2/quota.py0000664000175000017500000000435000000000000023020 0ustar00zuulzuul00000000000000# Copyright (c) 2018 China Telecom Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Quota(resource.Resource): resource_key = 'quota' resources_key = 'quotas' base_path = '/lbaas/quotas' # capabilities allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: The maximum amount of load balancers you can have. *Type: int* load_balancers = resource.Body('load_balancer', type=int) #: The maximum amount of listeners you can create. *Type: int* listeners = resource.Body('listener', type=int) #: The maximum amount of pools you can create. *Type: int* pools = resource.Body('pool', type=int) #: The maximum amount of health monitors you can create. *Type: int* health_monitors = resource.Body('health_monitor', type=int) #: The maximum amount of members you can create. *Type: int* members = resource.Body('member', type=int) #: The ID of the project this quota is associated with. project_id = resource.Body('project_id', alternate_id=True) def _prepare_request( self, requires_id=True, base_path=None, prepend_key=False, **kwargs ): _request = super()._prepare_request( requires_id, prepend_key, base_path=base_path ) if self.resource_key in _request.body: _body = _request.body[self.resource_key] else: _body = _request.body if 'id' in _body: del _body['id'] return _request class QuotaDefault(Quota): base_path = '/lbaas/quotas/defaults' allow_retrieve = True allow_commit = False allow_delete = False allow_list = False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/load_balancer/version.py0000664000175000017500000000147400000000000023031 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Version(resource.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' # capabilities allow_list = True # Properties links = resource.Body('links') status = resource.Body('status') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2933364 openstacksdk-4.0.0/openstack/message/0000775000175000017500000000000000000000000017642 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/message/__init__.py0000664000175000017500000000000000000000000021741 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/message/message_service.py0000664000175000017500000000142000000000000023355 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.message.v2 import _proxy from openstack import service_description class MessageService(service_description.ServiceDescription): """The message service.""" supported_versions = { '2': _proxy.Proxy, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2933364 openstacksdk-4.0.0/openstack/message/v2/0000775000175000017500000000000000000000000020171 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/message/v2/__init__.py0000664000175000017500000000000000000000000022270 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/message/v2/_proxy.py0000664000175000017500000003063400000000000022071 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.message.v2 import claim as _claim from openstack.message.v2 import message as _message from openstack.message.v2 import queue as _queue from openstack.message.v2 import subscription as _subscription from openstack import proxy from openstack import resource class Proxy(proxy.Proxy): _resource_registry = { "claim": _claim.Claim, "message": _message.Message, "queue": _queue.Queue, "subscription": _subscription.Subscription, } def create_queue(self, **attrs): """Create a new queue from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.message.v2.queue.Queue`, comprised of the properties on the Queue class. :returns: The results of queue creation :rtype: :class:`~openstack.message.v2.queue.Queue` """ return self._create(_queue.Queue, **attrs) def get_queue(self, queue): """Get a queue :param queue: The value can be the name of a queue or a :class:`~openstack.message.v2.queue.Queue` instance. :returns: One :class:`~openstack.message.v2.queue.Queue` :raises: :class:`~openstack.exceptions.NotFoundException` when no queue matching the name could be found. """ return self._get(_queue.Queue, queue) def queues(self, **query): """Retrieve a generator of queues :param kwargs query: Optional query parameters to be sent to restrict the queues to be returned. Available parameters include: * limit: Requests at most the specified number of items be returned from the query. * marker: Specifies the ID of the last-seen queue. Use the limit parameter to make an initial limited request and use the ID of the last-seen queue from the response as the marker parameter value in a subsequent limited request. :returns: A generator of queue instances. """ return self._list(_queue.Queue, **query) def delete_queue(self, value, ignore_missing=True): """Delete a queue :param value: The value can be either the name of a queue or a :class:`~openstack.message.v2.queue.Queue` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the queue does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent queue. :returns: ``None`` """ return self._delete(_queue.Queue, value, ignore_missing=ignore_missing) def post_message(self, queue_name, messages): """Post messages to given queue :param queue_name: The name of target queue to post message to. :param messages: List of messages body and TTL to post. :type messages: :py:class:`list` :returns: A string includes location of messages successfully posted. """ message = self._get_resource( _message.Message, None, queue_name=queue_name ) return message.post(self, messages) def messages(self, queue_name, **query): """Retrieve a generator of messages :param queue_name: The name of target queue to query messages from. :param kwargs query: Optional query parameters to be sent to restrict the messages to be returned. Available parameters include: * limit: Requests at most the specified number of items be returned from the query. * marker: Specifies the ID of the last-seen subscription. Use the limit parameter to make an initial limited request and use the ID of the last-seen subscription from the response as the marker parameter value in a subsequent limited request. * echo: Indicate if the messages can be echoed back to the client that posted them. * include_claimed: Indicate if the messages list should include the claimed messages. :returns: A generator of message instances. """ query["queue_name"] = queue_name return self._list(_message.Message, **query) def get_message(self, queue_name, message): """Get a message :param queue_name: The name of target queue to get message from. :param message: The value can be the name of a message or a :class:`~openstack.message.v2.message.Message` instance. :returns: One :class:`~openstack.message.v2.message.Message` :raises: :class:`~openstack.exceptions.NotFoundException` when no message matching the criteria could be found. """ message = self._get_resource( _message.Message, message, queue_name=queue_name ) return self._get(_message.Message, message) def delete_message( self, queue_name, value, claim=None, ignore_missing=True ): """Delete a message :param queue_name: The name of target queue to delete message from. :param value: The value can be either the name of a message or a :class:`~openstack.message.v2.message.Message` instance. :param claim: The value can be the ID or a :class:`~openstack.message.v2.claim.Claim` instance of the claim seizing the message. If None, the message has not been claimed. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the message does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent message. :returns: ``None`` """ message = self._get_resource( _message.Message, value, queue_name=queue_name ) message.claim_id = resource.Resource._get_id(claim) return self._delete( _message.Message, message, ignore_missing=ignore_missing ) def create_subscription(self, queue_name, **attrs): """Create a new subscription from attributes :param queue_name: The name of target queue to subscribe on. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.message.v2.subscription.Subscription`, comprised of the properties on the Subscription class. :returns: The results of subscription creation :rtype: :class:`~openstack.message.v2.subscription.Subscription` """ return self._create( _subscription.Subscription, queue_name=queue_name, **attrs ) def subscriptions(self, queue_name, **query): """Retrieve a generator of subscriptions :param queue_name: The name of target queue to subscribe on. :param kwargs query: Optional query parameters to be sent to restrict the subscriptions to be returned. Available parameters include: * limit: Requests at most the specified number of items be returned from the query. * marker: Specifies the ID of the last-seen subscription. Use the limit parameter to make an initial limited request and use the ID of the last-seen subscription from the response as the marker parameter value in a subsequent limited request. :returns: A generator of subscription instances. """ query["queue_name"] = queue_name return self._list(_subscription.Subscription, **query) def get_subscription(self, queue_name, subscription): """Get a subscription :param queue_name: The name of target queue of subscription. :param message: The value can be the ID of a subscription or a :class:`~openstack.message.v2.subscription.Subscription` instance. :returns: One :class:`~openstack.message.v2.subscription.Subscription` :raises: :class:`~openstack.exceptions.NotFoundException` when no subscription matching the criteria could be found. """ subscription = self._get_resource( _subscription.Subscription, subscription, queue_name=queue_name ) return self._get(_subscription.Subscription, subscription) def delete_subscription(self, queue_name, value, ignore_missing=True): """Delete a subscription :param queue_name: The name of target queue to delete subscription from. :param value: The value can be either the name of a subscription or a :class:`~openstack.message.v2.subscription.Subscription` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the subscription does not exist. When set to ``True``, no exception will be thrown when attempting to delete a nonexistent subscription. :returns: ``None`` """ subscription = self._get_resource( _subscription.Subscription, value, queue_name=queue_name ) return self._delete( _subscription.Subscription, subscription, ignore_missing=ignore_missing, ) def create_claim(self, queue_name, **attrs): """Create a new claim from attributes :param queue_name: The name of target queue to claim message from. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.message.v2.claim.Claim`, comprised of the properties on the Claim class. :returns: The results of claim creation :rtype: :class:`~openstack.message.v2.claim.Claim` """ return self._create(_claim.Claim, queue_name=queue_name, **attrs) def get_claim(self, queue_name, claim): """Get a claim :param queue_name: The name of target queue to claim message from. :param claim: The value can be either the ID of a claim or a :class:`~openstack.message.v2.claim.Claim` instance. :returns: One :class:`~openstack.message.v2.claim.Claim` :raises: :class:`~openstack.exceptions.NotFoundException` when no claim matching the criteria could be found. """ return self._get(_claim.Claim, claim, queue_name=queue_name) def update_claim(self, queue_name, claim, **attrs): """Update an existing claim from attributes :param queue_name: The name of target queue to claim message from. :param claim: The value can be either the ID of a claim or a :class:`~openstack.message.v2.claim.Claim` instance. :param dict attrs: Keyword arguments which will be used to update a :class:`~openstack.message.v2.claim.Claim`, comprised of the properties on the Claim class. :returns: The results of claim update :rtype: :class:`~openstack.message.v2.claim.Claim` """ return self._update( _claim.Claim, claim, queue_name=queue_name, **attrs ) def delete_claim(self, queue_name, claim, ignore_missing=True): """Delete a claim :param queue_name: The name of target queue to claim messages from. :param claim: The value can be either the ID of a claim or a :class:`~openstack.message.v2.claim.Claim` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the claim does not exist. When set to ``True``, no exception will be thrown when attempting to delete a nonexistent claim. :returns: ``None`` """ return self._delete( _claim.Claim, claim, queue_name=queue_name, ignore_missing=ignore_missing, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/message/v2/claim.py0000664000175000017500000001152200000000000021631 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack import resource class Claim(resource.Resource): # FIXME(anyone): The name string of `location` field of Zaqar API response # is lower case. That is inconsistent with the guide from API-WG. This is # a workaround for this issue. location = resource.Header("location") resources_key = 'claims' base_path = '/queues/%(queue_name)s/claims' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True commit_method = 'PATCH' # Properties #: The value in seconds indicating how long the claim has existed. age = resource.Body("age") #: In case worker stops responding for a long time, the server will #: extend the lifetime of claimed messages to be at least as long as #: the lifetime of the claim itself, plus the specified grace period. #: Must between 60 and 43200 seconds(12 hours). grace = resource.Body("grace") #: The number of messages to claim. Default 10, up to 20. limit = resource.Body("limit") #: Messages have been successfully claimed. messages = resource.Body("messages") #: Number of seconds the server wait before releasing the claim. Must #: between 60 and 43200 seconds(12 hours). ttl = resource.Body("ttl") #: The name of queue to claim message from. queue_name = resource.URI("queue_name") #: The ID to identify the client accessing Zaqar API. Must be specified #: in header for each API request. client_id = resource.Header("Client-ID") #: The ID to identify the project. Must be provided when keystone #: authentication is not enabled in Zaqar service. project_id = resource.Header("X-PROJECT-ID") def _translate_response(self, response, has_body=True): super()._translate_response(response, has_body=has_body) if has_body and self.location: # Extract claim ID from location self.id = self.location.split("claims/")[1] def create(self, session, prepend_key=False, base_path=None): request = self._prepare_request( requires_id=False, prepend_key=prepend_key, base_path=base_path ) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) response = session.post( request.url, json=request.body, headers=request.headers ) # For case no message was claimed successfully, 204 No Content # message will be returned. In other cases, we translate response # body which has `messages` field(list) included. if response.status_code != 204: self._translate_response(response) return self def fetch( self, session, requires_id=True, base_path=None, error_message=None, skip_cache=False, ): request = self._prepare_request( requires_id=requires_id, base_path=base_path ) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) response = session.get( request.url, headers=request.headers, skip_cache=False ) self._translate_response(response) return self def commit( self, session, prepend_key=False, has_body=False, base_path=None ): request = self._prepare_request( prepend_key=prepend_key, base_path=base_path ) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) session.patch(request.url, json=request.body, headers=request.headers) return self def delete(self, session): request = self._prepare_request() headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) response = session.delete(request.url, headers=request.headers) self._translate_response(response, has_body=False) return self ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/message/v2/message.py0000664000175000017500000001312700000000000022173 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty import uuid from openstack import resource class Message(resource.Resource): # FIXME(anyone): The name string of `location` field of Zaqar API response # is lower case. That is inconsistent with the guide from API-WG. This is # a workaround for this issue. location = resource.Header("location") resources_key = 'messages' base_path = '/queues/%(queue_name)s/messages' # capabilities allow_create = True allow_list = True allow_fetch = True allow_delete = True _query_mapping = resource.QueryParameters("echo", "include_claimed") # Properties #: The value in second to specify how long the message has been #: posted to the queue. age = resource.Body("age") #: A dictionary specifies an arbitrary document that constitutes the #: body of the message being sent. body = resource.Body("body") #: An uri string describe the location of the message resource. href = resource.Body("href") #: The value in seconds to specify how long the server waits before #: marking the message as expired and removing it from the queue. ttl = resource.Body("ttl") #: The name of target queue message is post to or got from. queue_name = resource.URI("queue_name") #: The ID to identify the client accessing Zaqar API. Must be specified #: in header for each API request. client_id = resource.Header("Client-ID") #: The ID to identify the project accessing Zaqar API. Must be specified #: in case keystone auth is not enabled in Zaqar service. project_id = resource.Header("X-PROJECT-ID") # FIXME(stephenfin): This is actually a query arg but we need it for # deletions and resource.delete doesn't respect these currently claim_id: ty.Optional[str] = None def post(self, session, messages): request = self._prepare_request(requires_id=False, prepend_key=True) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) request.body = {'messages': messages} response = session.post( request.url, json=request.body, headers=request.headers ) return response.json()['resources'] @classmethod def list(cls, session, paginated=True, base_path=None, **params): """This method is a generator which yields message objects. This is almost the copy of list method of resource.Resource class. The only difference is the request header now includes `Client-ID` and `X-PROJECT-ID` fields which are required by Zaqar v2 API. """ more_data = True if base_path is None: base_path = cls.base_path uri = base_path % params headers = { "Client-ID": params.get('client_id', None) or str(uuid.uuid4()), "X-PROJECT-ID": params.get('project_id', None) or session.get_project_id(), } query_params = cls._query_mapping._transpose(params, cls) while more_data: resp = session.get(uri, headers=headers, params=query_params) resp = resp.json() resp = resp[cls.resources_key] if not resp: more_data = False yielded = 0 new_marker = None for data in resp: value = cls.existing(**data) new_marker = value.id yielded += 1 yield value if not paginated: return if "limit" in query_params and yielded < query_params["limit"]: return query_params["limit"] = yielded query_params["marker"] = new_marker def fetch( self, session, requires_id=True, base_path=None, error_message=None, skip_cache=False, ): request = self._prepare_request( requires_id=requires_id, base_path=base_path ) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) response = session.get( request.url, headers=headers, skip_cache=skip_cache ) self._translate_response(response) return self def delete(self, session): request = self._prepare_request() headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) # For Zaqar v2 API requires client to specify claim_id as query # parameter when deleting a message that has been claimed, we # rebuild the request URI if claim_id is not None. if self.claim_id: request.url += '?claim_id=%s' % self.claim_id response = session.delete(request.url, headers=headers) self._translate_response(response, has_body=False) return self ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/message/v2/queue.py0000664000175000017500000001216300000000000021672 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack import resource class Queue(resource.Resource): # FIXME(anyone): The name string of `location` field of Zaqar API response # is lower case. That is inconsistent with the guide from API-WG. This is # a workaround for this issue. location = resource.Header("location") resources_key = "queues" base_path = "/queues" # capabilities allow_create = True allow_list = True allow_fetch = True allow_delete = True # Properties #: The default TTL of messages defined for a queue, which will effect for #: any messages posted to the queue. default_message_ttl = resource.Body("_default_message_ttl") #: Description of the queue. description = resource.Body("description") #: The max post size of messages defined for a queue, which will effect #: for any messages posted to the queue. max_messages_post_size = resource.Body("_max_messages_post_size") #: Name of the queue. The name is the unique identity of a queue. It #: must not exceed 64 bytes in length, and it is limited to US-ASCII #: letters, digits, underscores, and hyphens. name = resource.Body("name", alternate_id=True) #: The ID to identify the client accessing Zaqar API. Must be specified #: in header for each API request. client_id = resource.Header("Client-ID") #: The ID to identify the project accessing Zaqar API. Must be specified #: in case keystone auth is not enabled in Zaqar service. project_id = resource.Header("X-PROJECT-ID") def create(self, session, prepend_key=True, base_path=None): request = self._prepare_request( requires_id=True, prepend_key=prepend_key, base_path=None ) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) response = session.put( request.url, json=request.body, headers=request.headers ) self._translate_response(response, has_body=False) return self @classmethod def list(cls, session, paginated=False, base_path=None, **params): """This method is a generator which yields queue objects. This is almost the copy of list method of resource.Resource class. The only difference is the request header now includes `Client-ID` and `X-PROJECT-ID` fields which are required by Zaqar v2 API. """ more_data = True query_params = cls._query_mapping._transpose(params, cls) if base_path is None: base_path = cls.base_path uri = base_path % params headers = { "Client-ID": params.get('client_id', None) or str(uuid.uuid4()), "X-PROJECT-ID": params.get('project_id', None) or session.get_project_id(), } while more_data: resp = session.get(uri, headers=headers, params=query_params) resp = resp.json() resp = resp[cls.resources_key] if not resp: more_data = False yielded = 0 new_marker = None for data in resp: value = cls.existing(**data) new_marker = value.id yielded += 1 yield value if not paginated: return if "limit" in query_params and yielded < query_params["limit"]: return query_params["limit"] = yielded query_params["marker"] = new_marker def fetch( self, session, requires_id=True, base_path=None, error_message=None, skip_cache=False, ): request = self._prepare_request( requires_id=requires_id, base_path=base_path ) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) response = session.get( request.url, headers=headers, skip_cache=skip_cache ) self._translate_response(response) return self def delete(self, session): request = self._prepare_request() headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) response = session.delete(request.url, headers=headers) self._translate_response(response, has_body=False) return self ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/message/v2/subscription.py0000664000175000017500000001325700000000000023277 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack import resource class Subscription(resource.Resource): # FIXME(anyone): The name string of `location` field of Zaqar API response # is lower case. That is inconsistent with the guide from API-WG. This is # a workaround for this issue. location = resource.Header("location") resources_key = 'subscriptions' base_path = '/queues/%(queue_name)s/subscriptions' # capabilities allow_create = True allow_list = True allow_fetch = True allow_delete = True # Properties #: The value in seconds indicating how long the subscription has existed. age = resource.Body("age") #: Alternate id of the subscription. This key is used in response of #: subscription create API to return id of subscription created. subscription_id = resource.Body("subscription_id", alternate_id=True) #: The extra metadata for the subscription. The value must be a dict. #: If the subscriber is `mailto`. The options can contain `from` and #: `subject` to indicate the email's author and title. options = resource.Body("options", type=dict) #: The queue name which the subscription is registered on. source = resource.Body("source") #: The destination of the message. Two kinds of subscribers are supported: #: http/https and email. The http/https subscriber should start with #: `http/https`. The email subscriber should start with `mailto`. subscriber = resource.Body("subscriber") #: Number of seconds the subscription remains alive? The ttl value must #: be great than 60 seconds. The default value is 3600 seconds. ttl = resource.Body("ttl") #: The queue name which the subscription is registered on. queue_name = resource.URI("queue_name") #: The ID to identify the client accessing Zaqar API. Must be specified #: in header for each API request. client_id = resource.Header("Client-ID") #: The ID to identify the project. Must be provided when keystone #: authentication is not enabled in Zaqar service. project_id = resource.Header("X-PROJECT-ID") def create(self, session, prepend_key=True, base_path=None): request = self._prepare_request( requires_id=False, prepend_key=prepend_key, base_path=base_path ) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) response = session.post( request.url, json=request.body, headers=request.headers ) self._translate_response(response) return self @classmethod def list(cls, session, paginated=True, base_path=None, **params): """This method is a generator which yields subscription objects. This is almost the copy of list method of resource.Resource class. The only difference is the request header now includes `Client-ID` and `X-PROJECT-ID` fields which are required by Zaqar v2 API. """ more_data = True if base_path is None: base_path = cls.base_path uri = base_path % params headers = { "Client-ID": params.get('client_id', None) or str(uuid.uuid4()), "X-PROJECT-ID": params.get('project_id', None) or session.get_project_id(), } query_params = cls._query_mapping._transpose(params, cls) while more_data: resp = session.get(uri, headers=headers, params=query_params) resp = resp.json() resp = resp[cls.resources_key] if not resp: more_data = False yielded = 0 new_marker = None for data in resp: value = cls.existing(**data) new_marker = value.id yielded += 1 yield value if not paginated: return if "limit" in query_params and yielded < query_params["limit"]: return query_params["limit"] = yielded query_params["marker"] = new_marker def fetch( self, session, requires_id=True, base_path=None, error_message=None, skip_cache=False, ): request = self._prepare_request( requires_id=requires_id, base_path=base_path ) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) response = session.get( request.url, headers=request.headers, skip_cache=skip_cache ) self._translate_response(response) return self def delete(self, session): request = self._prepare_request() headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) response = session.delete(request.url, headers=request.headers) self._translate_response(response, has_body=False) return self ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/message/version.py0000664000175000017500000000147100000000000021704 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Version(resource.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' # capabilities allow_list = True # Properties links = resource.Body('links') status = resource.Body('status') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.2933364 openstacksdk-4.0.0/openstack/network/0000775000175000017500000000000000000000000017707 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/__init__.py0000664000175000017500000000000000000000000022006 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/network_service.py0000664000175000017500000000142000000000000023467 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import _proxy from openstack import service_description class NetworkService(service_description.ServiceDescription): """The network service.""" supported_versions = { '2': _proxy.Proxy, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3133461 openstacksdk-4.0.0/openstack/network/v2/0000775000175000017500000000000000000000000020236 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/__init__.py0000664000175000017500000000000000000000000022335 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/_base.py0000664000175000017500000000245500000000000021667 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class NetworkResource(resource.Resource): #: Revision number of the resource. *Type: int* revision_number = resource.Body('revision_number', type=int) _allow_unknown_attrs_in_body = True def _prepare_request( self, requires_id=None, prepend_key=False, patch=False, base_path=None, params=None, if_revision=None, **kwargs ): req = super()._prepare_request( requires_id=requires_id, prepend_key=prepend_key, patch=patch, base_path=base_path, params=params, ) if if_revision is not None: req.headers['If-Match'] = "revision_number=%d" % if_revision return req ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/_proxy.py0000664000175000017500000107457600000000000022154 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty from openstack import exceptions from openstack.network.v2 import address_group as _address_group from openstack.network.v2 import address_scope as _address_scope from openstack.network.v2 import agent as _agent from openstack.network.v2 import ( auto_allocated_topology as _auto_allocated_topology, ) from openstack.network.v2 import availability_zone from openstack.network.v2 import bgp_peer as _bgp_peer from openstack.network.v2 import bgp_speaker as _bgp_speaker from openstack.network.v2 import bgpvpn as _bgpvpn from openstack.network.v2 import ( bgpvpn_network_association as _bgpvpn_network_association, ) from openstack.network.v2 import ( bgpvpn_port_association as _bgpvpn_port_association, ) from openstack.network.v2 import ( bgpvpn_router_association as _bgpvpn_router_association, ) from openstack.network.v2 import ( default_security_group_rule as _default_security_group_rule, ) from openstack.network.v2 import extension from openstack.network.v2 import firewall_group as _firewall_group from openstack.network.v2 import firewall_policy as _firewall_policy from openstack.network.v2 import firewall_rule as _firewall_rule from openstack.network.v2 import flavor as _flavor from openstack.network.v2 import floating_ip as _floating_ip from openstack.network.v2 import health_monitor as _health_monitor from openstack.network.v2 import l3_conntrack_helper as _l3_conntrack_helper from openstack.network.v2 import listener as _listener from openstack.network.v2 import load_balancer as _load_balancer from openstack.network.v2 import local_ip as _local_ip from openstack.network.v2 import local_ip_association as _local_ip_association from openstack.network.v2 import metering_label as _metering_label from openstack.network.v2 import metering_label_rule as _metering_label_rule from openstack.network.v2 import ndp_proxy as _ndp_proxy from openstack.network.v2 import network as _network from openstack.network.v2 import network_ip_availability from openstack.network.v2 import ( network_segment_range as _network_segment_range, ) from openstack.network.v2 import pool as _pool from openstack.network.v2 import pool_member as _pool_member from openstack.network.v2 import port as _port from openstack.network.v2 import port_forwarding as _port_forwarding from openstack.network.v2 import ( qos_bandwidth_limit_rule as _qos_bandwidth_limit_rule, ) from openstack.network.v2 import ( qos_dscp_marking_rule as _qos_dscp_marking_rule, ) from openstack.network.v2 import ( qos_minimum_bandwidth_rule as _qos_minimum_bandwidth_rule, ) from openstack.network.v2 import ( qos_minimum_packet_rate_rule as _qos_minimum_packet_rate_rule, ) from openstack.network.v2 import qos_policy as _qos_policy from openstack.network.v2 import qos_rule_type as _qos_rule_type from openstack.network.v2 import quota as _quota from openstack.network.v2 import rbac_policy as _rbac_policy from openstack.network.v2 import router as _router from openstack.network.v2 import security_group as _security_group from openstack.network.v2 import security_group_rule as _security_group_rule from openstack.network.v2 import segment as _segment from openstack.network.v2 import service_profile as _service_profile from openstack.network.v2 import service_provider as _service_provider from openstack.network.v2 import sfc_flow_classifier as _sfc_flow_classifier from openstack.network.v2 import sfc_port_chain as _sfc_port_chain from openstack.network.v2 import sfc_port_pair as _sfc_port_pair from openstack.network.v2 import sfc_port_pair_group as _sfc_port_pair_group from openstack.network.v2 import sfc_service_graph as _sfc_sservice_graph from openstack.network.v2 import subnet as _subnet from openstack.network.v2 import subnet_pool as _subnet_pool from openstack.network.v2 import tap_flow as _tap_flow from openstack.network.v2 import tap_mirror as _tap_mirror from openstack.network.v2 import tap_service as _tap_service from openstack.network.v2 import trunk as _trunk from openstack.network.v2 import vpn_endpoint_group as _vpn_endpoint_group from openstack.network.v2 import vpn_ike_policy as _ike_policy from openstack.network.v2 import vpn_ipsec_policy as _ipsec_policy from openstack.network.v2 import ( vpn_ipsec_site_connection as _ipsec_site_connection, ) from openstack.network.v2 import vpn_service as _vpn_service from openstack import proxy from openstack import resource class Proxy(proxy.Proxy): _resource_registry = { "address_group": _address_group.AddressGroup, "address_scope": _address_scope.AddressScope, "agent": _agent.Agent, "auto_allocated_topology": ( _auto_allocated_topology.AutoAllocatedTopology ), "availability_zone": availability_zone.AvailabilityZone, "bgp_peer": _bgp_peer.BgpPeer, "bgp_speaker": _bgp_speaker.BgpSpeaker, "bgpvpn": _bgpvpn.BgpVpn, "bgpvpn_network_association": ( _bgpvpn_network_association.BgpVpnNetworkAssociation ), "bgpvpn_port_association": ( _bgpvpn_port_association.BgpVpnPortAssociation ), "bgpvpn_router_association": ( _bgpvpn_router_association.BgpVpnRouterAssociation ), "default_security_group_rule": ( _default_security_group_rule.DefaultSecurityGroupRule ), "extension": extension.Extension, "firewall_group": _firewall_group.FirewallGroup, "firewall_policy": _firewall_policy.FirewallPolicy, "firewall_rule": _firewall_rule.FirewallRule, "flavor": _flavor.Flavor, "floating_ip": _floating_ip.FloatingIP, "health_monitor": _health_monitor.HealthMonitor, "l3_conntrack_helper": _l3_conntrack_helper.ConntrackHelper, "listener": _listener.Listener, "load_balancer": _load_balancer.LoadBalancer, "local_ip": _local_ip.LocalIP, "local_ip_association": _local_ip_association.LocalIPAssociation, "metering_label": _metering_label.MeteringLabel, "metering_label_rule": _metering_label_rule.MeteringLabelRule, "ndp_proxy": _ndp_proxy.NDPProxy, "network": _network.Network, "network_ip_availability": ( network_ip_availability.NetworkIPAvailability ), "network_segment_range": _network_segment_range.NetworkSegmentRange, "pool": _pool.Pool, "pool_member": _pool_member.PoolMember, "port": _port.Port, "port_forwarding": _port_forwarding.PortForwarding, "qos_bandwidth_limit_rule": ( _qos_bandwidth_limit_rule.QoSBandwidthLimitRule ), "qos_dscp_marking_rule": _qos_dscp_marking_rule.QoSDSCPMarkingRule, "qos_minimum_bandwidth_rule": ( _qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule ), "qos_minimum_packet_rate_rule": ( _qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule ), "qos_policy": _qos_policy.QoSPolicy, "qos_rule_type": _qos_rule_type.QoSRuleType, "quota": _quota.Quota, "rbac_policy": _rbac_policy.RBACPolicy, "router": _router.Router, "security_group": _security_group.SecurityGroup, "security_group_rule": _security_group_rule.SecurityGroupRule, "segment": _segment.Segment, "service_profile": _service_profile.ServiceProfile, "service_provider": _service_provider.ServiceProvider, "sfc_flow_classifier": _sfc_flow_classifier.SfcFlowClassifier, "sfc_port_chain": _sfc_port_chain.SfcPortChain, "sfc_port_pair": _sfc_port_pair.SfcPortPair, "sfc_port_pair_group": _sfc_port_pair_group.SfcPortPairGroup, "sfc_service_graph": _sfc_sservice_graph.SfcServiceGraph, "subnet": _subnet.Subnet, "subnet_pool": _subnet_pool.SubnetPool, "tap_flow": _tap_flow.TapFlow, "tap_mirror": _tap_mirror.TapMirror, "tap_service": _tap_service.TapService, "trunk": _trunk.Trunk, "vpn_endpoint_group": _vpn_endpoint_group.VpnEndpointGroup, "vpn_ike_policy": _ike_policy.VpnIkePolicy, "vpn_ipsec_policy": _ipsec_policy.VpnIpsecPolicy, "vpn_ipsec_site_connection": ( _ipsec_site_connection.VpnIPSecSiteConnection ), "vpn_service": _vpn_service.VpnService, } @proxy._check_resource(strict=False) def _update( self, resource_type: ty.Type[resource.Resource], value, base_path=None, if_revision=None, **attrs, ) -> resource.Resource: res = self._get_resource(resource_type, value, **attrs) return res.commit(self, base_path=base_path, if_revision=if_revision) @proxy._check_resource(strict=False) def _delete( self, resource_type: ty.Type[resource.Resource], value, ignore_missing=True, if_revision=None, **attrs, ) -> ty.Optional[resource.Resource]: res = self._get_resource(resource_type, value, **attrs) try: rv = res.delete(self, if_revision=if_revision) except exceptions.NotFoundException: if ignore_missing: return None raise return rv def create_address_group(self, **attrs): """Create a new address group from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.address_group.AddressGroup`, comprised of the properties on the AddressGroup class. :returns: The results of address group creation :rtype: :class:`~openstack.network.v2.address_group.AddressGroup` """ return self._create(_address_group.AddressGroup, **attrs) def delete_address_group(self, address_group, ignore_missing=True): """Delete an address group :param address_group: The value can be either the ID of an address group or a :class:`~openstack.network.v2.address_group.AddressGroup` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the address group does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent address group. :returns: ``None`` """ self._delete( _address_group.AddressGroup, address_group, ignore_missing=ignore_missing, ) def find_address_group(self, name_or_id, ignore_missing=True, **query): """Find a single address group :param name_or_id: The name or ID of an address group. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.address_group.AddressGroup` or None """ return self._find( _address_group.AddressGroup, name_or_id, ignore_missing=ignore_missing, **query, ) def get_address_group(self, address_group): """Get a single address group :param address_group: The value can be the ID of an address group or a :class:`~openstack.network.v2.address_group.AddressGroup` instance. :returns: One :class:`~openstack.network.v2.address_group.AddressGroup` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_address_group.AddressGroup, address_group) def address_groups(self, **query): """Return a generator of address groups :param dict query: Optional query parameters to be sent to limit the resources being returned. * ``name``: Address group name * ``description``: Address group description * ``project_id``: Owner project ID :returns: A generator of address group objects :rtype: :class:`~openstack.network.v2.address_group.AddressGroup` """ return self._list(_address_group.AddressGroup, **query) def update_address_group( self, address_group, **attrs ) -> _address_group.AddressGroup: """Update an address group :param address_group: Either the ID of an address group or a :class:`~openstack.network.v2.address_group.AddressGroup` instance. :param attrs: The attributes to update on the address group represented by ``value``. :returns: The updated address group :rtype: :class:`~openstack.network.v2.address_group.AddressGroup` """ return self._update( _address_group.AddressGroup, address_group, **attrs ) def add_addresses_to_address_group(self, address_group, addresses): """Add addresses to a address group :param address_group: Either the ID of an address group or a :class:`~openstack.network.v2.address_group.AddressGroup` instance. :param list addresses: List of address strings. :returns: AddressGroup with updated addresses :rtype: :class:`~openstack.network.v2.address_group.AddressGroup` """ ag = self._get_resource(_address_group.AddressGroup, address_group) return ag.add_addresses(self, addresses) def remove_addresses_from_address_group(self, address_group, addresses): """Remove addresses from a address group :param address_group: Either the ID of an address group or a :class:`~openstack.network.v2.address_group.AddressGroup` instance. :param list addresses: List of address strings. :returns: AddressGroup with updated addresses :rtype: :class:`~openstack.network.v2.address_group.AddressGroup` """ ag = self._get_resource(_address_group.AddressGroup, address_group) return ag.remove_addresses(self, addresses) def create_address_scope(self, **attrs): """Create a new address scope from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.address_scope.AddressScope`, comprised of the properties on the AddressScope class. :returns: The results of address scope creation :rtype: :class:`~openstack.network.v2.address_scope.AddressScope` """ return self._create(_address_scope.AddressScope, **attrs) def delete_address_scope(self, address_scope, ignore_missing=True): """Delete an address scope :param address_scope: The value can be either the ID of an address scope or a :class:`~openstack.network.v2.address_scope.AddressScope` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the address scope does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent address scope. :returns: ``None`` """ self._delete( _address_scope.AddressScope, address_scope, ignore_missing=ignore_missing, ) def find_address_scope(self, name_or_id, ignore_missing=True, **query): """Find a single address scope :param name_or_id: The name or ID of an address scope. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.address_scope.AddressScope` or None """ return self._find( _address_scope.AddressScope, name_or_id, ignore_missing=ignore_missing, **query, ) def get_address_scope(self, address_scope): """Get a single address scope :param address_scope: The value can be the ID of an address scope or a :class:`~openstack.network.v2.address_scope.AddressScope` instance. :returns: One :class:`~openstack.network.v2.address_scope.AddressScope` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_address_scope.AddressScope, address_scope) def address_scopes(self, **query): """Return a generator of address scopes :param dict query: Optional query parameters to be sent to limit the resources being returned. * ``name``: Address scope name * ``ip_version``: Address scope IP address version * ``tenant_id``: Owner tenant ID * ``shared``: Address scope is shared (boolean) :returns: A generator of address scope objects :rtype: :class:`~openstack.network.v2.address_scope.AddressScope` """ return self._list(_address_scope.AddressScope, **query) def update_address_scope(self, address_scope, **attrs): """Update an address scope :param address_scope: Either the ID of an address scope or a :class:`~openstack.network.v2.address_scope.AddressScope` instance. :param attrs: The attributes to update on the address scope represented by ``value``. :returns: The updated address scope :rtype: :class:`~openstack.network.v2.address_scope.AddressScope` """ return self._update( _address_scope.AddressScope, address_scope, **attrs ) def agents(self, **query): """Return a generator of network agents :param dict query: Optional query parameters to be sent to limit the resources being returned. * ``agent_type``: Agent type. * ``availability_zone``: The availability zone for an agent. * ``binary``: The name of the agent's application binary. * ``description``: The description of the agent. * ``host``: The host (host name or host address) the agent is running on. * ``topic``: The message queue topic used. * ``is_admin_state_up``: The administrative state of the agent. * ``is_alive``: Whether the agent is alive. :returns: A generator of agents :rtype: :class:`~openstack.network.v2.agent.Agent` """ return self._list(_agent.Agent, **query) def delete_agent(self, agent, ignore_missing=True): """Delete a network agent :param agent: The value can be the ID of a agent or a :class:`~openstack.network.v2.agent.Agent` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the agent does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent agent. :returns: ``None`` """ self._delete(_agent.Agent, agent, ignore_missing=ignore_missing) def get_agent(self, agent): """Get a single network agent :param agent: The value can be the ID of a agent or a :class:`~openstack.network.v2.agent.Agent` instance. :returns: One :class:`~openstack.network.v2.agent.Agent` :rtype: :class:`~openstack.network.v2.agent.Agent` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_agent.Agent, agent) def update_agent(self, agent, **attrs): """Update a network agent :param agent: The value can be the ID of a agent or a :class:`~openstack.network.v2.agent.Agent` instance. :param attrs: The attributes to update on the agent represented by ``value``. :returns: One :class:`~openstack.network.v2.agent.Agent` :rtype: :class:`~openstack.network.v2.agent.Agent` """ return self._update(_agent.Agent, agent, **attrs) def dhcp_agent_hosting_networks(self, agent, **query): """A generator of networks hosted by a DHCP agent. :param agent: Either the agent id of an instance of :class:`~openstack.network.v2.network_agent.Agent` :param query: kwargs query: Optional query parameters to be sent to limit the resources being returned. :return: A generator of networks """ agent_obj = self._get_resource(_agent.Agent, agent) return self._list( _network.DHCPAgentHostingNetwork, agent_id=agent_obj.id, **query ) def add_dhcp_agent_to_network(self, agent, network): """Add a DHCP Agent to a network :param agent: Either the agent id of an instance of :class:`~openstack.network.v2.network_agent.Agent` :param network: Network instance :return: """ network = self._get_resource(_network.Network, network) agent = self._get_resource(_agent.Agent, agent) return agent.add_agent_to_network(self, network.id) def remove_dhcp_agent_from_network(self, agent, network): """Remove a DHCP Agent from a network :param agent: Either the agent id of an instance of :class:`~openstack.network.v2.network_agent.Agent` :param network: Network instance :return: """ network = self._get_resource(_network.Network, network) agent = self._get_resource(_agent.Agent, agent) return agent.remove_agent_from_network(self, network.id) def network_hosting_dhcp_agents(self, network, **query): """A generator of DHCP agents hosted on a network. :param network: The instance of :class:`~openstack.network.v2.network.Network` :param dict query: Optional query parameters to be sent to limit the resources returned. :return: A generator of hosted DHCP agents """ net = self._get_resource(_network.Network, network) return self._list( _agent.NetworkHostingDHCPAgent, network_id=net.id, **query ) def get_auto_allocated_topology(self, project=None): """Get the auto-allocated topology of a given tenant :param project: The value is the ID or name of a project :returns: The auto-allocated topology :rtype: :class:`~openstack.network.v2.auto_allocated_topology.AutoAllocatedTopology` """ # If project option is not given, grab project id from session if project is None: project = self.get_project_id() return self._get( _auto_allocated_topology.AutoAllocatedTopology, project ) def delete_auto_allocated_topology( self, project=None, ignore_missing=False ): """Delete auto-allocated topology :param project: The value is the ID or name of a project :param ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the topology does not exist. When set to ``True``, no exception will be raised when attempting to delete nonexistant topology :returns: ``None`` """ # If project option is not given, grab project id from session if project is None: project = self.get_project_id() self._delete( _auto_allocated_topology.AutoAllocatedTopology, project, ignore_missing=ignore_missing, ) def validate_auto_allocated_topology(self, project=None): """Validate the resources for auto allocation :param project: The value is the ID or name of a project :returns: Whether all resources are correctly configured or not :rtype: :class:`~openstack.network.v2.auto_allocated_topology.ValidateTopology` """ # If project option is not given, grab project id from session if project is None: project = self.get_project_id() return self._get( _auto_allocated_topology.ValidateTopology, project=project, requires_id=False, ) def availability_zones(self, **query): """Return a generator of availability zones :param dict query: optional query parameters to be set to limit the returned resources. Valid parameters include: * ``name``: The name of an availability zone. * ``resource``: The type of resource for the availability zone. :returns: A generator of availability zone objects :rtype: :class:`~openstack.network.v2.availability_zone.AvailabilityZone` """ return self._list(availability_zone.AvailabilityZone) def create_bgp_peer(self, **attrs): """Create a new BGP Peer from attributes""" return self._create(_bgp_peer.BgpPeer, **attrs) def delete_bgp_peer(self, peer, ignore_missing=True): """Delete a BGP Peer""" self._delete(_bgp_peer.BgpPeer, peer, ignore_missing=ignore_missing) def find_bgp_peer(self, name_or_id, ignore_missing=True, **query): """Find a single BGP Peer""" return self._find( _bgp_peer.BgpPeer, name_or_id, ignore_missing=ignore_missing, **query, ) def get_bgp_peer(self, peer): """Get a signle BGP Peer""" return self._get(_bgp_peer.BgpPeer, peer) def update_bgp_peer(self, peer, **attrs): """Update a BGP Peer""" return self._update(_bgp_peer.BgpPeer, peer, **attrs) def bgp_peers(self, **query): """Return a generator of BGP Peers""" return self._list(_bgp_peer.BgpPeer, **query) def create_bgp_speaker(self, **attrs): """Create a new BGP Speaker""" return self._create(_bgp_speaker.BgpSpeaker, **attrs) def delete_bgp_speaker(self, speaker, ignore_missing=True): """Delete a BGP Speaker""" self._delete( _bgp_speaker.BgpSpeaker, speaker, ignore_missing=ignore_missing ) def find_bgp_speaker(self, name_or_id, ignore_missing=True, **query): """Find a single BGP Peer""" return self._find( _bgp_speaker.BgpSpeaker, name_or_id, ignore_missing=ignore_missing, **query, ) def get_bgp_speaker(self, speaker): """Get a signle BGP Speaker""" return self._get(_bgp_speaker.BgpSpeaker, speaker) def update_bgp_speaker(self, speaker, **attrs): """Update a BGP Speaker""" return self._update(_bgp_speaker.BgpSpeaker, speaker, **attrs) def bgp_speakers(self, **query): """Return a generator of BGP Peers""" return self._list(_bgp_speaker.BgpSpeaker, **query) def add_bgp_peer_to_speaker(self, speaker, peer_id): """Bind the BGP peer to the specified BGP Speaker.""" speaker = self._get_resource(_bgp_speaker.BgpSpeaker, speaker) return speaker.add_bgp_peer(self, peer_id) def remove_bgp_peer_from_speaker(self, speaker, peer_id): """Unbind the BGP peer from a BGP Speaker.""" speaker = self._get_resource(_bgp_speaker.BgpSpeaker, speaker) return speaker.remove_bgp_peer(self, peer_id) def add_gateway_network_to_speaker(self, speaker, network_id): """Add a network to the specified BGP speaker.""" speaker = self._get_resource(_bgp_speaker.BgpSpeaker, speaker) return speaker.add_gateway_network(self, network_id) def remove_gateway_network_from_speaker(self, speaker, network_id): """Remove a network from the specified BGP speaker.""" speaker = self._get_resource(_bgp_speaker.BgpSpeaker, speaker) return speaker.remove_gateway_network(self, network_id) def get_advertised_routes_of_speaker(self, speaker): """List all routes advertised by the specified BGP Speaker.""" speaker = self._get_resource(_bgp_speaker.BgpSpeaker, speaker) return speaker.get_advertised_routes(self) def get_bgp_dragents_hosting_speaker(self, speaker): """List all BGP dynamic agents which are hosting the specified BGP Speaker.""" speaker = self._get_resource(_bgp_speaker.BgpSpeaker, speaker) return speaker.get_bgp_dragents(self) def add_bgp_speaker_to_dragent(self, bgp_agent, bgp_speaker_id): """Add a BGP Speaker to the specified dynamic routing agent.""" speaker = self._get_resource(_bgp_speaker.BgpSpeaker, bgp_speaker_id) speaker.add_bgp_speaker_to_dragent(self, bgp_agent) def get_bgp_speakers_hosted_by_dragent(self, bgp_agent): """List all BGP Seakers hosted on the specified dynamic routing agent.""" agent = self._get_resource(_agent.Agent, bgp_agent) return agent.get_bgp_speakers_hosted_by_dragent(self) def remove_bgp_speaker_from_dragent(self, bgp_agent, bgp_speaker_id): """Delete the BGP Speaker hosted by the specified dynamic routing agent.""" speaker = self._get_resource(_bgp_speaker.BgpSpeaker, bgp_speaker_id) speaker.remove_bgp_speaker_from_dragent(self, bgp_agent) def create_bgpvpn(self, **attrs): """Create a new BGPVPN :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.bgpvpn.BgpVpn`, comprised of the properties on the BGPVPN class, for details see the Neutron api-ref. :returns: The result of BGPVPN creation :rtype: :class:`~openstack.network.v2.bgpvpn.BgpVpn` """ return self._create(_bgpvpn.BgpVpn, **attrs) def delete_bgpvpn(self, bgpvpn, ignore_missing=True): """Delete a BGPVPN :param bgpvpn: The value can be either the ID of a bgpvpn or a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the BGPVPN does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent BGPVPN. :returns: ``None`` """ self._delete(_bgpvpn.BgpVpn, bgpvpn, ignore_missing=ignore_missing) def find_bgpvpn(self, name_or_id, ignore_missing=True, **query): """Find a single BGPVPN :param name_or_id: The name or ID of a BGPVPN. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.bgpvpn.BGPVPN` or None """ return self._find( _bgpvpn.BgpVpn, name_or_id, ignore_missing=ignore_missing, **query ) def get_bgpvpn(self, bgpvpn): """Get a signle BGPVPN :param bgpvpn: The value can be the ID of a BGPVPN or a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. :returns: One :class:`~openstack.network.v2.bgpvpn.BgpVpn` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_bgpvpn.BgpVpn, bgpvpn) def update_bgpvpn(self, bgppvpn, **attrs): """Update a BGPVPN :param bgpvpn: Either the ID of a BGPVPN or a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. :param attrs: The attributes to update on the BGPVPN represented by ``value``. :returns: The updated BGPVPN :rtype: :class:`~openstack.network.v2.bgpvpn.BgpVpn` """ return self._update(_bgpvpn.BgpVpn, bgppvpn, **attrs) def bgpvpns(self, **query): """Return a generator of BGP VPNs :param dict query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of BgpVPN objects :rtype: :class:`~openstack.network.v2.bgpvpn.BgpVpn` """ return self._list(_bgpvpn.BgpVpn, **query) def create_bgpvpn_network_association(self, bgpvpn, **attrs): """Create a new BGPVPN Network Association :param bgpvpn: The value can be either the ID of a bgpvpn or a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.bgpvpn_network_association. BgpVpnNetworkAssociation`, comprised of the properties on the BgpVpnNetworkAssociation class. :returns: The results of BgpVpnNetworkAssociation creation :rtype: :class:`~openstack.network.v2.bgpvpn_network_association. BgpVpnNetworkAssociation` """ bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) return self._create( _bgpvpn_network_association.BgpVpnNetworkAssociation, bgpvpn_id=bgpvpn_res.id, **attrs, ) def delete_bgpvpn_network_association( self, bgpvpn, net_association, ignore_missing=True ): """Delete a BGPVPN Network Association :param bgpvpn: The value can be either the ID of a bgpvpn or a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. :param net_association: The value can be either the ID of a bgpvpn_network_association or a :class:`~openstack.network.v2.bgpvpn_network_association. BgpVpnNetworkAssociation` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the BgpVpnNetworkAssociation does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent BgpVpnNetworkAssociation. :returns: ``None`` """ bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) self._delete( _bgpvpn_network_association.BgpVpnNetworkAssociation, net_association, ignore_missing=ignore_missing, bgpvpn_id=bgpvpn_res.id, ) def get_bgpvpn_network_association(self, bgpvpn, net_association): """Get a signle BGPVPN Network Association :param bgpvpn: The value can be the ID of a BGPVPN or a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. :param net_association: The value can be the ID of a BgpVpnNetworkAssociation or a :class:`~openstack.network.v2.bgpvpn_network_association. BgpVpnNetworkAssociation` instance. :returns: One :class:`~openstack.network.v2. bgpvpn_network_associaition.BgpVpnNetworkAssociation` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) return self._get( _bgpvpn_network_association.BgpVpnNetworkAssociation, net_association, bgpvpn_id=bgpvpn_res.id, ) def bgpvpn_network_associations(self, bgpvpn, **query): """Return a generator of BGP VPN Network Associations :param: bgpvpn: The value can be the ID of a BGPVPN or a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. :param dict query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of BgpVpnNetworkAssociation objects :rtype: :class:`~openstack.network.v2.bgpvpn_network_association. BgpVpnNetworkAssociation` """ bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) return self._list( _bgpvpn_network_association.BgpVpnNetworkAssociation, bgpvpn_id=bgpvpn_res.id, **query, ) def create_bgpvpn_port_association(self, bgpvpn, **attrs): """Create a new BGPVPN Port Association :param bgpvpn: The value can be either the ID of a bgpvpn or a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.bgpvpn_port_association. BgpVpnPortAssociation`, comprised of the properties on the BgpVpnPortAssociation class. :returns: The results of BgpVpnPortAssociation creation :rtype: :class:`~openstack.network.v2.bgpvpn_port_association. BgpVpnPortAssociation` """ bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) return self._create( _bgpvpn_port_association.BgpVpnPortAssociation, bgpvpn_id=bgpvpn_res.id, **attrs, ) def delete_bgpvpn_port_association( self, bgpvpn, port_association, ignore_missing=True ): """Delete a BGPVPN Port Association :param bgpvpn: The value can be either the ID of a bgpvpn or a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. :param port_association: The value can be either the ID of a bgpvpn_port_association or a :class:`~openstack.network.v2.bgpvpn_port_association. BgpVpnPortAssociation` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the BgpVpnPortAssociation does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent BgpVpnPortAssociation. :returns: ``None`` """ bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) self._delete( _bgpvpn_port_association.BgpVpnPortAssociation, port_association, ignore_missing=ignore_missing, bgpvpn_id=bgpvpn_res.id, ) def find_bgpvpn_port_association( self, name_or_id, bgpvpn_id, ignore_missing=True, **query ): """Find a single BGPVPN Port Association :param name_or_id: The name or ID of a BgpVpnNetworkAssociation. :param bgpvpn_id: The value can be the ID of a BGPVPN. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.bgpvpn.BGPVPN` or None """ return self._find( _bgpvpn_port_association.BgpVpnPortAssociation, name_or_id, ignore_missing=ignore_missing, bgpvpn_id=bgpvpn_id, **query, ) def get_bgpvpn_port_association(self, bgpvpn, port_association): """Get a signle BGPVPN Port Association :param bgpvpn: The value can be the ID of a BGPVPN or a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. :param port_association: The value can be the ID of a BgpVpnPortAssociation or a :class:`~openstack.network.v2.bgpvpn_port_association. BgpVpnPortAssociation` instance. :returns: One :class:`~openstack.network.v2. bgpvpn_port_associaition.BgpVpnPortAssociation` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) return self._get( _bgpvpn_port_association.BgpVpnPortAssociation, port_association, bgpvpn_id=bgpvpn_res.id, ) def update_bgpvpn_port_association( self, bgpvpn, port_association, **attrs ): """Update a BPGPN Port Association :param bgpvpn: Either the ID of a BGPVPN or a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. :param port_association: The value can be the ID of a BgpVpnPortAssociation or a :class:`~openstack.network.v2.bgpvpn_port_association. BgpVpnPortAssociation` instance. :param attrs: The attributes to update on the BGPVPN represented by ``value``. :returns: The updated BgpVpnPortAssociation. :rtype: :class:`~openstack.network.v2.bgpvpn.BgpVpn` """ bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) return self._update( _bgpvpn_port_association.BgpVpnPortAssociation, port_association, bgpvpn_id=bgpvpn_res.id, **attrs, ) def bgpvpn_port_associations(self, bgpvpn, **query): """Return a generator of BGP VPN Port Associations :param: bgpvpn: The value can be the ID of a BGPVPN or a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. :param dict query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of BgpVpnNetworkAssociation objects :rtype: :class:`~openstack.network.v2.bgpvpn_network_association. BgpVpnNetworkAssociation` """ bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) return self._list( _bgpvpn_port_association.BgpVpnPortAssociation, bgpvpn_id=bgpvpn_res.id, **query, ) def create_bgpvpn_router_association(self, bgpvpn, **attrs): """Create a new BGPVPN Router Association :param bgpvpn: The value can be either the ID of a bgpvpn or a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.bgpvpn_router_association. BgpVpnRouterAssociation`, comprised of the properties on the BgpVpnRouterAssociation class. :returns: The results of BgpVpnRouterAssociation creation :rtype: :class:`~openstack.network.v2.bgpvpn_router_association. BgpVpnRouterAssociation` """ bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) return self._create( _bgpvpn_router_association.BgpVpnRouterAssociation, bgpvpn_id=bgpvpn_res.id, **attrs, ) def delete_bgpvpn_router_association( self, bgpvpn, router_association, ignore_missing=True ): """Delete a BGPVPN Router Association :param bgpvpn: The value can be either the ID of a bgpvpn or a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. :param port_association: The value can be either the ID of a bgpvpn_router_association or a :class:`~openstack.network.v2.bgpvpn_router_association. BgpVpnRouterAssociation` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the BgpVpnRouterAssociation does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent BgpVpnRouterAsociation. :returns: ``None`` """ bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) self._delete( _bgpvpn_router_association.BgpVpnRouterAssociation, router_association, ignore_missing=ignore_missing, bgpvpn_id=bgpvpn_res.id, ) def get_bgpvpn_router_association(self, bgpvpn, router_association): """Get a signle BGPVPN Router Association :param bgpvpn: The value can be the ID of a BGPVPN or a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. :param router_association: The value can be the ID of a BgpVpnRouterAssociation or a :class:`~openstack.network.v2.bgpvpn_router_association. BgpVpnRouterAssociation` instance. :returns: One :class:`~openstack.network.v2. bgpvpn_router_associaition.BgpVpnRouterAssociation` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) return self._get( _bgpvpn_router_association.BgpVpnRouterAssociation, router_association, bgpvpn_id=bgpvpn_res.id, ) def update_bgpvpn_router_association( self, bgpvpn, router_association, **attrs ): """Update a BPGPN Router Association :param dict query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of BgpVpnNetworkAssociation objects :rtype: :class:`~openstack.network.v2.bgpvpn_network_association. BgpVpnNetworkAssociation` """ bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) return self._update( _bgpvpn_router_association.BgpVpnRouterAssociation, router_association, bgpvpn_id=bgpvpn_res.id, **attrs, ) def bgpvpn_router_associations(self, bgpvpn, **query): """Return a generator of BGP VPN router Associations :param: bgpvpn: The value can be the ID of a BGPVPN or a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. :param dict query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of BgpVpnRouterAssociation objects :rtype: :class:`~openstack.network.v2.bgpvpn_router_association. BgpVpnRouterAssociation` """ bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) return self._list( _bgpvpn_router_association.BgpVpnRouterAssociation, bgpvpn_id=bgpvpn_res.id, **query, ) def find_extension(self, name_or_id, ignore_missing=True, **query): """Find a single extension :param name_or_id: The name or ID of a extension. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.extension.Extension` or None """ return self._find( extension.Extension, name_or_id, ignore_missing=ignore_missing, **query, ) def extensions(self, **query): """Return a generator of extensions :param dict query: Optional query parameters to be sent to limit the resources being returned. Currently no parameter is supported. :returns: A generator of extension objects :rtype: :class:`~openstack.network.v2.extension.Extension` """ return self._list(extension.Extension, **query) def create_flavor(self, **attrs): """Create a new network service flavor from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.flavor.Flavor`, comprised of the properties on the Flavor class. :returns: The results of flavor creation :rtype: :class:`~openstack.network.v2.flavor.Flavor` """ return self._create(_flavor.Flavor, **attrs) def delete_flavor(self, flavor, ignore_missing=True): """Delete a network service flavor :param flavor: The value can be either the ID of a flavor or a :class:`~openstack.network.v2.flavor.Flavor` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the flavor does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent flavor. :returns: ``None`` """ self._delete(_flavor.Flavor, flavor, ignore_missing=ignore_missing) def find_flavor(self, name_or_id, ignore_missing=True, **query): """Find a single network service flavor :param name_or_id: The name or ID of a flavor. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.flavor.Flavor` or None """ return self._find( _flavor.Flavor, name_or_id, ignore_missing=ignore_missing, **query ) def get_flavor(self, flavor): """Get a single network service flavor :param flavor: The value can be the ID of a flavor or a :class:`~openstack.network.v2.flavor.Flavor` instance. :returns: One :class:`~openstack.network.v2.flavor.Flavor` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_flavor.Flavor, flavor) def update_flavor(self, flavor, **attrs): """Update a network service flavor :param flavor: Either the id of a flavor or a :class:`~openstack.network.v2.flavor.Flavor` instance. :param attrs: The attributes to update on the flavor represented by ``flavor``. :returns: The updated flavor :rtype: :class:`~openstack.network.v2.flavor.Flavor` """ return self._update(_flavor.Flavor, flavor, **attrs) def flavors(self, **query): """Return a generator of network service flavors :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters include: * ``description``: The description of a flavor. * ``is_enabled``: Whether a flavor is enabled. * ``name``: The name of a flavor. * ``service_type``: The service type to which a falvor applies. :returns: A generator of flavor objects :rtype: :class:`~openstack.network.v2.flavor.Flavor` """ return self._list(_flavor.Flavor, **query) def associate_flavor_with_service_profile(self, flavor, service_profile): """Associate network flavor with service profile. :param flavor: Either the id of a flavor or a :class:`~openstack.network.v2.flavor.Flavor` instance. :param service_profile: The value can be either the ID of a service profile or a :class:`~openstack.network.v2.service_profile.ServiceProfile` instance. :return: """ flavor = self._get_resource(_flavor.Flavor, flavor) service_profile = self._get_resource( _service_profile.ServiceProfile, service_profile ) return flavor.associate_flavor_with_service_profile( self, service_profile.id ) def disassociate_flavor_from_service_profile( self, flavor, service_profile ): """Disassociate network flavor from service profile. :param flavor: Either the id of a flavor or a :class:`~openstack.network.v2.flavor.Flavor` instance. :param service_profile: The value can be either the ID of a service profile or a :class:`~openstack.network.v2.service_profile.ServiceProfile` instance. :return: """ flavor = self._get_resource(_flavor.Flavor, flavor) service_profile = self._get_resource( _service_profile.ServiceProfile, service_profile ) return flavor.disassociate_flavor_from_service_profile( self, service_profile.id ) def create_local_ip(self, **attrs): """Create a new local ip from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.local_ip.LocalIP`, comprised of the properties on the LocalIP class. :returns: The results of local ip creation :rtype: :class:`~openstack.network.v2.local_ip.LocalIP` """ return self._create(_local_ip.LocalIP, **attrs) def delete_local_ip(self, local_ip, ignore_missing=True, if_revision=None): """Delete a local ip :param local_ip: The value can be either the ID of a local ip or a :class:`~openstack.network.v2.local_ip.LocalIP` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the local ip does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent ip. :param int if_revision: Revision to put in If-Match header of update request to perform compare-and-swap update. :returns: ``None`` """ self._delete( _local_ip.LocalIP, local_ip, ignore_missing=ignore_missing, if_revision=if_revision, ) def find_local_ip(self, name_or_id, ignore_missing=True, **query): """Find a local IP :param name_or_id: The name or ID of an local IP. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.local_ip.LocalIP` or None """ return self._find( _local_ip.LocalIP, name_or_id, ignore_missing=ignore_missing, **query, ) def get_local_ip(self, local_ip): """Get a single local ip :param local_ip: The value can be the ID of a local ip or a :class:`~openstack.network.v2.local_ip.LocalIP` instance. :returns: One :class:`~openstack.network.v2.local_ip.LocalIP` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_local_ip.LocalIP, local_ip) def local_ips(self, **query): """Return a generator of local ips :param dict query: Optional query parameters to be sent to limit the resources being returned. * ``name``: Local IP name * ``description``: Local IP description * ``project_id``: Owner project ID * ``network_id``: Local IP network * ``local_port_id``: Local port ID * ``local_ip_address``: The IP address of a Local IP * ``ip_mode``: The Local IP mode :returns: A generator of local ip objects :rtype: :class:`~openstack.network.v2.local_ip.LocalIP` """ return self._list(_local_ip.LocalIP, **query) def update_local_ip(self, local_ip, if_revision=None, **attrs): """Update a local ip :param local_ip: Either the id of a local ip or a :class:`~openstack.network.v2.local_ip.LocalIP` instance. :param int if_revision: Revision to put in If-Match header of update request to perform compare-and-swap update. :param attrs: The attributes to update on the ip represented by ``value``. :returns: The updated ip :rtype: :class:`~openstack.network.v2.local_ip.LocalIP` """ return self._update( _local_ip.LocalIP, local_ip, if_revision=if_revision, **attrs ) def create_local_ip_association(self, local_ip, **attrs): """Create a new local ip association from attributes :param local_ip: The value can be the ID of a Local IP or a :class:`~openstack.network.v2.local_ip.LocalIP` instance. :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.local_ip_association.LocalIPAssociation`, comprised of the properties on the LocalIP class. :returns: The results of local ip association creation :rtype: :class:`~openstack.network.v2.local_ip_association.LocalIPAssociation` """ local_ip = self._get_resource(_local_ip.LocalIP, local_ip) return self._create( _local_ip_association.LocalIPAssociation, local_ip_id=local_ip.id, **attrs, ) def delete_local_ip_association( self, local_ip, fixed_port_id, ignore_missing=True, if_revision=None ): """Delete a local ip association :param local_ip: The value can be the ID of a Local IP or a :class:`~openstack.network.v2.local_ip.LocalIP` instance. :param fixed_port_id: The value can be either the fixed port ID or a :class: `~openstack.network.v2.local_ip_association.LocalIPAssociation` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the local ip association does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent ip. :param int if_revision: Revision to put in If-Match header of update request to perform compare-and-swap update. :returns: ``None`` """ local_ip = self._get_resource(_local_ip.LocalIP, local_ip) self._delete( _local_ip_association.LocalIPAssociation, fixed_port_id, local_ip_id=local_ip.id, ignore_missing=ignore_missing, if_revision=if_revision, ) def find_local_ip_association( self, name_or_id, local_ip, ignore_missing=True, **query ): """Find a local ip association :param name_or_id: The name or ID of local ip association. :param local_ip: The value can be the ID of a Local IP or a :class:`~openstack.network.v2.local_ip.LocalIP` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.local_ip_association.LocalIPAssociation` or None """ local_ip = self._get_resource(_local_ip.LocalIP, local_ip) return self._find( _local_ip_association.LocalIPAssociation, name_or_id, local_ip_id=local_ip.id, ignore_missing=ignore_missing, **query, ) def get_local_ip_association(self, local_ip_association, local_ip): """Get a single local ip association :param local_ip: The value can be the ID of a Local IP or a :class:`~openstack.network.v2.local_ip.LocalIP` instance. :param local_ip_association: The value can be the ID of a local ip association or a :class:`~openstack.network.v2.local_ip_association.LocalIPAssociation` instance. :returns: One :class:`~openstack.network.v2.local_ip_association.LocalIPAssociation` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ local_ip = self._get_resource(_local_ip.LocalIP, local_ip) return self._get( _local_ip_association.LocalIPAssociation, local_ip_association, local_ip_id=local_ip.id, ) def local_ip_associations(self, local_ip, **query): """Return a generator of local ip associations :param local_ip: The value can be the ID of a Local IP or a :class:`~openstack.network.v2.local_ip.LocalIP` instance. :param dict query: Optional query parameters to be sent to limit the resources being returned. * ``fixed_port_id``: The ID of the port to which a local IP is associated * ``fixed_ip``: The fixed ip address associated with a a Local IP * ``host``: Host where local ip is associated :returns: A generator of local ip association objects :rtype: :class:`~openstack.network.v2.local_ip_association.LocalIPAssociation` """ local_ip = self._get_resource(_local_ip.LocalIP, local_ip) return self._list( _local_ip_association.LocalIPAssociation, local_ip_id=local_ip.id, **query, ) def create_ip(self, **attrs): """Create a new floating ip from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.floating_ip.FloatingIP`, comprised of the properties on the FloatingIP class. :returns: The results of floating ip creation :rtype: :class:`~openstack.network.v2.floating_ip.FloatingIP` """ return self._create(_floating_ip.FloatingIP, **attrs) def delete_ip(self, floating_ip, ignore_missing=True, if_revision=None): """Delete a floating ip :param floating_ip: The value can be either the ID of a floating ip or a :class:`~openstack.network.v2.floating_ip.FloatingIP` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the floating ip does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent ip. :param int if_revision: Revision to put in If-Match header of update request to perform compare-and-swap update. :returns: ``None`` """ self._delete( _floating_ip.FloatingIP, floating_ip, ignore_missing=ignore_missing, if_revision=if_revision, ) def find_available_ip(self): """Find an available IP :returns: One :class:`~openstack.network.v2.floating_ip.FloatingIP` or None """ return _floating_ip.FloatingIP.find_available(self) def find_ip(self, name_or_id, ignore_missing=True, **query): """Find a single IP :param name_or_id: The name or ID of an IP. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.floating_ip.FloatingIP` or None """ return self._find( _floating_ip.FloatingIP, name_or_id, ignore_missing=ignore_missing, **query, ) def get_ip(self, floating_ip): """Get a single floating ip :param floating_ip: The value can be the ID of a floating ip or a :class:`~openstack.network.v2.floating_ip.FloatingIP` instance. :returns: One :class:`~openstack.network.v2.floating_ip.FloatingIP` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_floating_ip.FloatingIP, floating_ip) def ips(self, **query): """Return a generator of ips :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: * ``description``: The description of a floating IP. * ``fixed_ip_address``: The fixed IP address associated with a floating IP address. * ``floating_ip_address``: The IP address of a floating IP. * ``floating_network_id``: The ID of the network associated with a floating IP. * ``port_id``: The ID of the port to which a floating IP is associated. * ``project_id``: The ID of the project a floating IP is associated with. * ``router_id``: The ID of an associated router. * ``status``: The status of a floating IP, which can be ``ACTIVE`` or ``DOWN``. :returns: A generator of floating IP objects :rtype: :class:`~openstack.network.v2.floating_ip.FloatingIP` """ return self._list(_floating_ip.FloatingIP, **query) def update_ip(self, floating_ip, if_revision=None, **attrs): """Update a ip :param floating_ip: Either the id of a ip or a :class:`~openstack.network.v2.floating_ip.FloatingIP` instance. :param int if_revision: Revision to put in If-Match header of update request to perform compare-and-swap update. :param attrs: The attributes to update on the ip represented by ``value``. :returns: The updated ip :rtype: :class:`~openstack.network.v2.floating_ip.FloatingIP` """ return self._update( _floating_ip.FloatingIP, floating_ip, if_revision=if_revision, **attrs, ) def create_port_forwarding(self, **attrs): """Create a new floating ip port forwarding from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.port_forwarding.PortForwarding`, comprised of the properties on the PortForwarding class. :returns: The results of port forwarding creation :rtype: :class:`~openstack.network.v2.port_forwarding.PortForwarding` """ return self._create(_port_forwarding.PortForwarding, **attrs) def get_port_forwarding(self, port_forwarding, floating_ip): """Get a single port forwarding :param port_forwarding: The value can be the ID of a port forwarding or a :class:`~openstack.network.v2.port_forwarding.PortForwarding` instance. :param floating_ip: The value can be the ID of a Floating IP or a :class:`~openstack.network.v2.floating_ip.FloatingIP` instance. :returns: One :class:`~openstack.network.v2.port_forwarding.PortForwarding` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ floating_ip = self._get_resource(_floating_ip.FloatingIP, floating_ip) return self._get( _port_forwarding.PortForwarding, port_forwarding, floatingip_id=floating_ip.id, ) def find_port_forwarding( self, pf_id, floating_ip, ignore_missing=True, **query ): """Find a single port forwarding :param pf_id: The ID of a port forwarding. :param floating_ip: The value can be the ID of a Floating IP or a :class:`~openstack.network.v2.floating_ip.FloatingIP` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.port_forwarding.PortForwarding` or None """ floating_ip = self._get_resource(_floating_ip.FloatingIP, floating_ip) return self._find( _port_forwarding.PortForwarding, pf_id, floatingip_id=floating_ip.id, ignore_missing=ignore_missing, **query, ) def delete_port_forwarding( self, port_forwarding, floating_ip, ignore_missing=True ): """Delete a port forwarding :param port_forwarding: The value can be the ID of a port forwarding or a :class:`~openstack.network.v2.port_forwarding.PortForwarding` instance. :param floating_ip: The value can be the ID of a Floating IP or a :class:`~openstack.network.v2.floating_ip.FloatingIP` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the floating ip does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent ip. :returns: ``None`` """ fip = self._get_resource(_floating_ip.FloatingIP, floating_ip) self._delete( _port_forwarding.PortForwarding, port_forwarding, floatingip_id=fip.id, ignore_missing=ignore_missing, ) def port_forwardings(self, floating_ip, **query): """Return a generator of port forwardings :param floating_ip: The value can be the ID of a Floating IP or a :class:`~openstack.network.v2.floating_ip.FloatingIP` instance. :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: * ``internal_port_id``: The ID of internal port. * ``external_port``: The external TCP/UDP/other port number * ``protocol``: TCP/UDP/other protocol :returns: A generator of port forwarding objects :rtype: :class:`~openstack.network.v2.port_forwarding.PortForwarding` """ fip = self._get_resource(_floating_ip.FloatingIP, floating_ip) return self._list( _port_forwarding.PortForwarding, floatingip_id=fip.id, **query ) def update_port_forwarding(self, port_forwarding, floating_ip, **attrs): """Update a port forwarding :param port_forwarding: The value can be the ID of a port forwarding or a :class:`~openstack.network.v2.port_forwarding.PortForwarding` instance. :param floating_ip: The value can be the ID of a Floating IP or a :class:`~openstack.network.v2.floating_ip.FloatingIP` instance. :param attrs: The attributes to update on the ip represented by ``value``. :returns: The updated port_forwarding :rtype: :class:`~openstack.network.v2.port_forwarding.PortForwarding` """ fip = self._get_resource(_floating_ip.FloatingIP, floating_ip) return self._update( _port_forwarding.PortForwarding, port_forwarding, floatingip_id=fip.id, **attrs, ) def create_health_monitor(self, **attrs): """Create a new health monitor from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.health_monitor.HealthMonitor`, comprised of the properties on the HealthMonitor class. :returns: The results of health monitor creation :rtype: :class:`~openstack.network.v2.health_monitor.HealthMonitor` """ return self._create(_health_monitor.HealthMonitor, **attrs) def delete_health_monitor(self, health_monitor, ignore_missing=True): """Delete a health monitor :param health_monitor: The value can be either the ID of a health monitor or a :class:`~openstack.network.v2.health_monitor.HealthMonitor` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the health monitor does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent health monitor. :returns: ``None`` """ self._delete( _health_monitor.HealthMonitor, health_monitor, ignore_missing=ignore_missing, ) def find_health_monitor(self, name_or_id, ignore_missing=True, **query): """Find a single health monitor :param name_or_id: The name or ID of a health monitor. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.health_monitor.HealthMonitor` or None """ return self._find( _health_monitor.HealthMonitor, name_or_id, ignore_missing=ignore_missing, **query, ) def get_health_monitor(self, health_monitor): """Get a single health monitor :param health_monitor: The value can be the ID of a health monitor or a :class:`~openstack.network.v2.health_monitor.HealthMonitor` instance. :returns: One :class:`~openstack.network.v2.health_monitor.HealthMonitor` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_health_monitor.HealthMonitor, health_monitor) def health_monitors(self, **query): """Return a generator of health monitors :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: * ``delay``: the time in milliseconds between sending probes. * ``expected_codes``: The expected HTTP codes for a pssing HTTP(S) monitor. * ``http_method``: The HTTP method a monitor uses for requests. * ``is_admin_state_up``: The administrative state of a health monitor. * ``max_retries``: The maximum consecutive health probe attempts. * ``project_id``: The ID of the project this health monitor is associated with. * ``timeout``: The maximum number of milliseconds for a monitor to wait for a connection to be established before it times out. * ``type``: The type of probe sent by the load balancer for health check, which can be ``PING``, ``TCP``, ``HTTP`` or ``HTTPS``. * ``url_path``: The path portion of a URI that will be probed. :returns: A generator of health monitor objects :rtype: :class:`~openstack.network.v2.health_monitor.HealthMonitor` """ return self._list(_health_monitor.HealthMonitor, **query) def update_health_monitor(self, health_monitor, **attrs): """Update a health monitor :param health_monitor: Either the id of a health monitor or a :class:`~openstack.network.v2.health_monitor.HealthMonitor` instance. :param attrs: The attributes to update on the health monitor represented by ``value``. :returns: The updated health monitor :rtype: :class:`~openstack.network.v2.health_monitor.HealthMonitor` """ return self._update( _health_monitor.HealthMonitor, health_monitor, **attrs ) def create_listener(self, **attrs): """Create a new listener from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.listener.Listener`, comprised of the properties on the Listener class. :returns: The results of listener creation :rtype: :class:`~openstack.network.v2.listener.Listener` """ return self._create(_listener.Listener, **attrs) def delete_listener(self, listener, ignore_missing=True): """Delete a listener :param listener: The value can be either the ID of a listner or a :class:`~openstack.network.v2.listener.Listener` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the listner does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent listener. :returns: ``None`` """ self._delete( _listener.Listener, listener, ignore_missing=ignore_missing ) def find_listener(self, name_or_id, ignore_missing=True, **query): """Find a single listener :param name_or_id: The name or ID of a listener. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.listener.Listener` or None """ return self._find( _listener.Listener, name_or_id, ignore_missing=ignore_missing, **query, ) def get_listener(self, listener): """Get a single listener :param listener: The value can be the ID of a listener or a :class:`~openstack.network.v2.listener.Listener` instance. :returns: One :class:`~openstack.network.v2.listener.Listener` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_listener.Listener, listener) def listeners(self, **query): """Return a generator of listeners :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: * ``connection_limit``: The maximum number of connections permitted for the load-balancer. * ``default_pool_id``: The ID of the default pool. * ``default_tls_container_ref``: A reference to a container of TLS secret. * ``description``: The description of a listener. * ``is_admin_state_up``: The administrative state of the listener. * ``name``: The name of a listener. * ``project_id``: The ID of the project associated with a listener. * ``protocol``: The protocol of the listener. * ``protocol_port``: Port the listener will listen to. :returns: A generator of listener objects :rtype: :class:`~openstack.network.v2.listener.Listener` """ return self._list(_listener.Listener, **query) def update_listener(self, listener, **attrs): """Update a listener :param listener: Either the id of a listener or a :class:`~openstack.network.v2.listener.Listener` instance. :param attrs: The attributes to update on the listener represented by ``listener``. :returns: The updated listener :rtype: :class:`~openstack.network.v2.listener.Listener` """ return self._update(_listener.Listener, listener, **attrs) def create_load_balancer(self, **attrs): """Create a new load balancer from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.load_balancer.LoadBalancer`, comprised of the properties on the LoadBalancer class. :returns: The results of load balancer creation :rtype: :class:`~openstack.network.v2.load_balancer.LoadBalancer` """ return self._create(_load_balancer.LoadBalancer, **attrs) def delete_load_balancer(self, load_balancer, ignore_missing=True): """Delete a load balancer :param load_balancer: The value can be the ID of a load balancer or a :class:`~openstack.network.v2.load_balancer.LoadBalancer` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the load balancer does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent load balancer. :returns: ``None`` """ self._delete( _load_balancer.LoadBalancer, load_balancer, ignore_missing=ignore_missing, ) def find_load_balancer(self, name_or_id, ignore_missing=True, **query): """Find a single load balancer :param name_or_id: The name or ID of a load balancer. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.load_balancer.LoadBalancer` or None """ return self._find( _load_balancer.LoadBalancer, name_or_id, ignore_missing=ignore_missing, **query, ) def get_load_balancer(self, load_balancer): """Get a single load balancer :param load_balancer: The value can be the ID of a load balancer or a :class:`~openstack.network.v2.load_balancer.LoadBalancer` instance. :returns: One :class:`~openstack.network.v2.load_balancer.LoadBalancer` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_load_balancer.LoadBalancer, load_balancer) def load_balancers(self, **query): """Return a generator of load balancers :param dict query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of load balancer objects :rtype: :class:`~openstack.network.v2.load_balancer.LoadBalancer` """ return self._list(_load_balancer.LoadBalancer, **query) def update_load_balancer(self, load_balancer, **attrs): """Update a load balancer :param load_balancer: Either the id of a load balancer or a :class:`~openstack.network.v2.load_balancer.LoadBalancer` instance. :param attrs: The attributes to update on the load balancer represented by ``load_balancer``. :returns: The updated load balancer :rtype: :class:`~openstack.network.v2.load_balancer.LoadBalancer` """ return self._update( _load_balancer.LoadBalancer, load_balancer, **attrs ) def create_metering_label(self, **attrs): """Create a new metering label from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.metering_label.MeteringLabel`, comprised of the properties on the MeteringLabel class. :returns: The results of metering label creation :rtype: :class:`~openstack.network.v2.metering_label.MeteringLabel` """ return self._create(_metering_label.MeteringLabel, **attrs) def delete_metering_label(self, metering_label, ignore_missing=True): """Delete a metering label :param metering_label: The value can be either the ID of a metering label or a :class:`~openstack.network.v2.metering_label.MeteringLabel` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the metering label does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent metering label. :returns: ``None`` """ self._delete( _metering_label.MeteringLabel, metering_label, ignore_missing=ignore_missing, ) def find_metering_label(self, name_or_id, ignore_missing=True, **query): """Find a single metering label :param name_or_id: The name or ID of a metering label. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.metering_label.MeteringLabel` or None """ return self._find( _metering_label.MeteringLabel, name_or_id, ignore_missing=ignore_missing, **query, ) def get_metering_label(self, metering_label): """Get a single metering label :param metering_label: The value can be the ID of a metering label or a :class:`~openstack.network.v2.metering_label.MeteringLabel` instance. :returns: One :class:`~openstack.network.v2.metering_label.MeteringLabel` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_metering_label.MeteringLabel, metering_label) def metering_labels(self, **query): """Return a generator of metering labels :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: * ``description``: Description of a metering label. * ``name``: Name of a metering label. * ``is_shared``: Boolean indicating whether a metering label is shared. * ``project_id``: The ID of the project a metering label is associated with. :returns: A generator of metering label objects :rtype: :class:`~openstack.network.v2.metering_label.MeteringLabel` """ return self._list(_metering_label.MeteringLabel, **query) def update_metering_label(self, metering_label, **attrs): """Update a metering label :param metering_label: Either the id of a metering label or a :class:`~openstack.network.v2.metering_label.MeteringLabel` instance. :param attrs: The attributes to update on the metering label represented by ``metering_label``. :returns: The updated metering label :rtype: :class:`~openstack.network.v2.metering_label.MeteringLabel` """ return self._update( _metering_label.MeteringLabel, metering_label, **attrs ) def create_metering_label_rule(self, **attrs): """Create a new metering label rule from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.metering_label_rule.MeteringLabelRule`, comprised of the properties on the MeteringLabelRule class. :returns: The results of metering label rule creation :rtype: :class:`~openstack.network.v2.metering_label_rule.MeteringLabelRule` """ return self._create(_metering_label_rule.MeteringLabelRule, **attrs) def delete_metering_label_rule( self, metering_label_rule, ignore_missing=True ): """Delete a metering label rule :param metering_label_rule: The value can be either the ID of a metering label rule or a :class:`~openstack.network.v2.metering_label_rule.MeteringLabelRule` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the metering label rule does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent metering label rule. :returns: ``None`` """ self._delete( _metering_label_rule.MeteringLabelRule, metering_label_rule, ignore_missing=ignore_missing, ) def find_metering_label_rule( self, name_or_id, ignore_missing=True, **query ): """Find a single metering label rule :param name_or_id: The name or ID of a metering label rule. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.metering_label_rule.MeteringLabelRule` or None """ return self._find( _metering_label_rule.MeteringLabelRule, name_or_id, ignore_missing=ignore_missing, **query, ) def get_metering_label_rule(self, metering_label_rule): """Get a single metering label rule :param metering_label_rule: The value can be the ID of a metering label rule or a :class:`~openstack.network.v2.metering_label_rule.MeteringLabelRule` instance. :returns: One :class:`~openstack.network.v2.metering_label_rule.MeteringLabelRule` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _metering_label_rule.MeteringLabelRule, metering_label_rule ) def metering_label_rules(self, **query): """Return a generator of metering label rules :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: * ``direction``: The direction in which metering label rule is applied. * ``metering_label_id``: The ID of a metering label this rule is associated with. * ``project_id``: The ID of the project the metering label rule is associated with. * ``remote_ip_prefix``: The remote IP prefix to be associated with this metering label rule. :returns: A generator of metering label rule objects :rtype: :class:`~openstack.network.v2.metering_label_rule.MeteringLabelRule` """ return self._list(_metering_label_rule.MeteringLabelRule, **query) def update_metering_label_rule(self, metering_label_rule, **attrs): """Update a metering label rule :param metering_label_rule: Either the id of a metering label rule or a :class:`~openstack.network.v2.metering_label_rule.MeteringLabelRule` instance. :param attrs: The attributes to update on the metering label rule represented by ``metering_label_rule``. :returns: The updated metering label rule :rtype: :class:`~openstack.network.v2.metering_label_rule.MeteringLabelRule` """ return self._update( _metering_label_rule.MeteringLabelRule, metering_label_rule, **attrs, ) def create_network(self, **attrs): """Create a new network from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.network.Network`, comprised of the properties on the Network class. :returns: The results of network creation :rtype: :class:`~openstack.network.v2.network.Network` """ return self._create(_network.Network, **attrs) def delete_network(self, network, ignore_missing=True, if_revision=None): """Delete a network :param network: The value can be either the ID of a network or a :class:`~openstack.network.v2.network.Network` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the network does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent network. :param int if_revision: Revision to put in If-Match header of update request to perform compare-and-swap update. :returns: ``None`` """ self._delete( _network.Network, network, ignore_missing=ignore_missing, if_revision=if_revision, ) def find_network(self, name_or_id, ignore_missing=True, **query): """Find a single network :param name_or_id: The name or ID of a network. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.network.Network` or None """ return self._find( _network.Network, name_or_id, ignore_missing=ignore_missing, **query, ) def get_network(self, network): """Get a single network :param network: The value can be the ID of a network or a :class:`~openstack.network.v2.network.Network` instance. :returns: One :class:`~openstack.network.v2.network.Network` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_network.Network, network) def networks(self, **query): """Return a generator of networks :param kwargs query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * ``description``: The network description. * ``ipv4_address_scope_id``: The ID of the IPv4 address scope for the network. * ``ipv6_address_scope_id``: The ID of the IPv6 address scope for the network. * ``is_admin_state_up``: Network administrative state * ``is_port_security_enabled``: The port security status. * ``is_router_external``: Network is external or not. * ``is_shared``: Whether the network is shared across projects. * ``name``: The name of the network. * ``status``: Network status * ``project_id``: Owner tenant ID * ``provider_network_type``: Network physical mechanism * ``provider_physical_network``: Physical network * ``provider_segmentation_id``: VLAN ID for VLAN networks or Tunnel ID for GENEVE/GRE/VXLAN networks :returns: A generator of network objects :rtype: :class:`~openstack.network.v2.network.Network` """ return self._list(_network.Network, **query) def update_network(self, network, if_revision=None, **attrs): """Update a network :param network: Either the id of a network or an instance of type :class:`~openstack.network.v2.network.Network`. :param int if_revision: Revision to put in If-Match header of update request to perform compare-and-swap update. :param attrs: The attributes to update on the network represented by ``network``. :returns: The updated network :rtype: :class:`~openstack.network.v2.network.Network` """ return self._update( _network.Network, network, if_revision=if_revision, **attrs ) def find_network_ip_availability( self, name_or_id, ignore_missing=True, **query ): """Find IP availability of a network :param name_or_id: The name or ID of a network. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.network_ip_availability.NetworkIPAvailability` or None """ return self._find( network_ip_availability.NetworkIPAvailability, name_or_id, ignore_missing=ignore_missing, **query, ) def get_network_ip_availability(self, network): """Get IP availability of a network :param network: The value can be the ID of a network or a :class:`~openstack.network.v2.network.Network` instance. :returns: One :class:`~openstack.network.v2.network_ip_availability.NetworkIPAvailability` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( network_ip_availability.NetworkIPAvailability, network ) def network_ip_availabilities(self, **query): """Return a generator of network ip availabilities :param kwargs query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * ``ip_version``: IP version of the network * ``network_id``: ID of network to use when listening network IP availability. * ``network_name``: The name of the network for the particular network IP availability. * ``project_id``: Owner tenant ID :returns: A generator of network ip availability objects :rtype: :class:`~openstack.network.v2.network_ip_availability.NetworkIPAvailability` """ return self._list( network_ip_availability.NetworkIPAvailability, **query ) def create_network_segment_range(self, **attrs): """Create a new network segment range from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.network_segment_range.NetworkSegmentRange`, comprised of the properties on the NetworkSegmentRange class. :returns: The results of network segment range creation :rtype: :class:`~openstack.network.v2.network_segment_range.NetworkSegmentRange` """ return self._create( _network_segment_range.NetworkSegmentRange, **attrs ) def delete_network_segment_range( self, network_segment_range, ignore_missing=True ): """Delete a network segment range :param network_segment_range: The value can be either the ID of a network segment range or a :class:`~openstack.network.v2.network_segment_range.NetworkSegmentRange` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the network segment range does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent network segment range. :returns: ``None`` """ self._delete( _network_segment_range.NetworkSegmentRange, network_segment_range, ignore_missing=ignore_missing, ) def find_network_segment_range( self, name_or_id, ignore_missing=True, **query ): """Find a single network segment range :param name_or_id: The name or ID of a network segment range. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.network_segment_range.NetworkSegmentRange` or None """ return self._find( _network_segment_range.NetworkSegmentRange, name_or_id, ignore_missing=ignore_missing, **query, ) def get_network_segment_range(self, network_segment_range): """Get a single network segment range :param network_segment_range: The value can be the ID of a network segment range or a :class:`~openstack.network.v2.network_segment_range.NetworkSegmentRange` instance. :returns: One :class:`~openstack.network.v2._network_segment_range.NetworkSegmentRange` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _network_segment_range.NetworkSegmentRange, network_segment_range ) def network_segment_ranges(self, **query): """Return a generator of network segment ranges :param kwargs query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * ``name``: Name of the segments * ``default``: The network segment range is loaded from the host configuration file. * ``shared``: The network segment range is shared with other projects * ``project_id``: ID of the project that owns the network segment range * ``network_type``: Network type for the network segment ranges * ``physical_network``: Physical network name for the network segment ranges * ``minimum``: Minimum segmentation ID for the network segment ranges * ``maximum``: Maximum Segmentation ID for the network segment ranges * ``used``: Mapping of which segmentation ID in the range is used by which tenant * ``available``: List of available segmentation IDs in this network segment range :returns: A generator of network segment range objects :rtype: :class:`~openstack.network.v2._network_segment_range.NetworkSegmentRange` """ return self._list(_network_segment_range.NetworkSegmentRange, **query) def update_network_segment_range(self, network_segment_range, **attrs): """Update a network segment range :param network_segment_range: Either the ID of a network segment range or a :class:`~openstack.network.v2._network_segment_range.NetworkSegmentRange` instance. :param attrs: The attributes to update on the network segment range represented by ``network_segment_range``. :returns: The updated network segment range :rtype: :class:`~openstack.network.v2._network_segment_range.NetworkSegmentRange` """ return self._update( _network_segment_range.NetworkSegmentRange, network_segment_range, **attrs, ) def create_pool(self, **attrs): """Create a new pool from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.pool.Pool`, comprised of the properties on the Pool class. :returns: The results of pool creation :rtype: :class:`~openstack.network.v2.pool.Pool` """ return self._create(_pool.Pool, **attrs) def delete_pool(self, pool, ignore_missing=True): """Delete a pool :param pool: The value can be either the ID of a pool or a :class:`~openstack.network.v2.pool.Pool` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the pool does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent pool. :returns: ``None`` """ self._delete(_pool.Pool, pool, ignore_missing=ignore_missing) def find_pool(self, name_or_id, ignore_missing=True, **query): """Find a single pool :param name_or_id: The name or ID of a pool. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.pool.Pool` or None """ return self._find( _pool.Pool, name_or_id, ignore_missing=ignore_missing, **query ) def get_pool(self, pool): """Get a single pool :param pool: The value can be the ID of a pool or a :class:`~openstack.network.v2.pool.Pool` instance. :returns: One :class:`~openstack.network.v2.pool.Pool` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_pool.Pool, pool) def pools(self, **query): """Return a generator of pools :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: * ``description``: The description for the pool. * ``is_admin_state_up``: The administrative state of the pool. * ``lb_algorithm``: The load-balancer algorithm used, which is one of ``round-robin``, ``least-connections`` and so on. * ``name``: The name of the node pool. * ``project_id``: The ID of the project the pool is associated with. * ``protocol``: The protocol used by the pool, which is one of ``TCP``, ``HTTP`` or ``HTTPS``. * ``provider``: The name of the provider of the load balancer service. * ``subnet_id``: The subnet on which the members of the pool are located. * ``virtual_ip_id``: The ID of the virtual IP used. :returns: A generator of pool objects :rtype: :class:`~openstack.network.v2.pool.Pool` """ return self._list(_pool.Pool, **query) def update_pool(self, pool, **attrs): """Update a pool :param pool: Either the id of a pool or a :class:`~openstack.network.v2.pool.Pool` instance. :param attrs: The attributes to update on the pool represented by ``pool``. :returns: The updated pool :rtype: :class:`~openstack.network.v2.pool.Pool` """ return self._update(_pool.Pool, pool, **attrs) def create_pool_member(self, pool, **attrs): """Create a new pool member from attributes :param pool: The pool can be either the ID of a pool or a :class:`~openstack.network.v2.pool.Pool` instance that the member will be created in. :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.pool_member.PoolMember`, comprised of the properties on the PoolMember class. :returns: The results of pool member creation :rtype: :class:`~openstack.network.v2.pool_member.PoolMember` """ poolobj = self._get_resource(_pool.Pool, pool) return self._create( _pool_member.PoolMember, pool_id=poolobj.id, **attrs ) def delete_pool_member(self, pool_member, pool, ignore_missing=True): """Delete a pool member :param pool_member: The member can be either the ID of a pool member or a :class:`~openstack.network.v2.pool_member.PoolMember` instance. :param pool: The pool can be either the ID of a pool or a :class:`~openstack.network.v2.pool.Pool` instance that the member belongs to. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the pool member does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent pool member. :returns: ``None`` """ poolobj = self._get_resource(_pool.Pool, pool) self._delete( _pool_member.PoolMember, pool_member, ignore_missing=ignore_missing, pool_id=poolobj.id, ) def find_pool_member(self, name_or_id, pool, ignore_missing=True, **query): """Find a single pool member :param str name_or_id: The name or ID of a pool member. :param pool: The pool can be either the ID of a pool or a :class:`~openstack.network.v2.pool.Pool` instance that the member belongs to. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.pool_member.PoolMember` or None """ poolobj = self._get_resource(_pool.Pool, pool) return self._find( _pool_member.PoolMember, name_or_id, ignore_missing=ignore_missing, pool_id=poolobj.id, **query, ) def get_pool_member(self, pool_member, pool): """Get a single pool member :param pool_member: The member can be the ID of a pool member or a :class:`~openstack.network.v2.pool_member.PoolMember` instance. :param pool: The pool can be either the ID of a pool or a :class:`~openstack.network.v2.pool.Pool` instance that the member belongs to. :returns: One :class:`~openstack.network.v2.pool_member.PoolMember` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ poolobj = self._get_resource(_pool.Pool, pool) return self._get( _pool_member.PoolMember, pool_member, pool_id=poolobj.id ) def pool_members(self, pool, **query): """Return a generator of pool members :param pool: The pool can be either the ID of a pool or a :class:`~openstack.network.v2.pool.Pool` instance that the member belongs to. :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: * ``address``: The IP address of the pool member. * ``is_admin_state_up``: The administrative state of the pool member. * ``name``: Name of the pool member. * ``project_id``: The ID of the project this pool member is associated with. * ``protocol_port``: The port on which the application is hosted. * ``subnet_id``: Subnet ID in which to access this pool member. * ``weight``: A positive integer value that indicates the relative portion of traffic that this member should receive from the pool. :returns: A generator of pool member objects :rtype: :class:`~openstack.network.v2.pool_member.PoolMember` """ poolobj = self._get_resource(_pool.Pool, pool) return self._list(_pool_member.PoolMember, pool_id=poolobj.id, **query) def update_pool_member(self, pool_member, pool, **attrs): """Update a pool member :param pool_member: Either the ID of a pool member or a :class:`~openstack.network.v2.pool_member.PoolMember` instance. :param pool: The pool can be either the ID of a pool or a :class:`~openstack.network.v2.pool.Pool` instance that the member belongs to. :param attrs: The attributes to update on the pool member represented by ``pool_member``. :returns: The updated pool member :rtype: :class:`~openstack.network.v2.pool_member.PoolMember` """ poolobj = self._get_resource(_pool.Pool, pool) return self._update( _pool_member.PoolMember, pool_member, pool_id=poolobj.id, **attrs ) def create_port(self, **attrs): """Create a new port from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.port.Port`, comprised of the properties on the Port class. :returns: The results of port creation :rtype: :class:`~openstack.network.v2.port.Port` """ return self._create(_port.Port, **attrs) def create_ports(self, data): """Create ports from the list of attributes :param list data: List of dicts of attributes which will be used to create a :class:`~openstack.network.v2.port.Port`, comprised of the properties on the Port class. :returns: A generator of port objects :rtype: :class:`~openstack.network.v2.port.Port` """ return self._bulk_create(_port.Port, data) def delete_port(self, port, ignore_missing=True, if_revision=None): """Delete a port :param port: The value can be either the ID of a port or a :class:`~openstack.network.v2.port.Port` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the port does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent port. :param int if_revision: Revision to put in If-Match header of update request to perform compare-and-swap update. :returns: ``None`` """ self._delete( _port.Port, port, ignore_missing=ignore_missing, if_revision=if_revision, ) def find_port(self, name_or_id, ignore_missing=True, **query): """Find a single port :param name_or_id: The name or ID of a port. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.port.Port` or None """ return self._find( _port.Port, name_or_id, ignore_missing=ignore_missing, **query ) def get_port(self, port): """Get a single port :param port: The value can be the ID of a port or a :class:`~openstack.network.v2.port.Port` instance. :returns: One :class:`~openstack.network.v2.port.Port` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_port.Port, port) def ports(self, **query): """Return a generator of ports :param kwargs query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * ``description``: The port description. * ``device_id``: Port device ID. * ``device_owner``: Port device owner (e.g. ``network:dhcp``). * ``ip_address``: IP addresses of an allowed address pair. * ``is_admin_state_up``: The administrative state of the port. * ``is_port_security_enabled``: The port security status. * ``mac_address``: Port MAC address. * ``name``: The port name. * ``network_id``: ID of network that owns the ports. * ``project_id``: The ID of the project who owns the network. * ``status``: The port status. Value is ``ACTIVE`` or ``DOWN``. * ``subnet_id``: The ID of the subnet. :returns: A generator of port objects :rtype: :class:`~openstack.network.v2.port.Port` """ return self._list(_port.Port, **query) def update_port(self, port, if_revision=None, **attrs) -> _port.Port: """Update a port :param port: Either the id of a port or a :class:`~openstack.network.v2.port.Port` instance. :param int if_revision: Revision to put in If-Match header of update request to perform compare-and-swap update. :param attrs: The attributes to update on the port represented by ``port``. :returns: The updated port :rtype: :class:`~openstack.network.v2.port.Port` """ return self._update(_port.Port, port, if_revision=if_revision, **attrs) def add_ip_to_port(self, port, ip): ip.port_id = port.id return ip.commit(self) def remove_ip_from_port(self, ip): ip.port_id = None return ip.commit(self) def get_subnet_ports(self, subnet_id): result = [] ports = self.ports() for puerta in ports: for fixed_ip in puerta.fixed_ips: if fixed_ip['subnet_id'] == subnet_id: result.append(puerta) return result def create_qos_bandwidth_limit_rule(self, qos_policy, **attrs): """Create a new bandwidth limit rule :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule`, comprised of the properties on the QoSBandwidthLimitRule class. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :returns: The results of resource creation :rtype: :class:`~openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._create( _qos_bandwidth_limit_rule.QoSBandwidthLimitRule, qos_policy_id=policy.id, **attrs, ) def delete_qos_bandwidth_limit_rule( self, qos_rule, qos_policy, ignore_missing=True ): """Delete a bandwidth limit rule :param qos_rule: The value can be either the ID of a bandwidth limit rule or a :class:`~openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule` instance. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent bandwidth limit rule. :returns: ``None`` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) self._delete( _qos_bandwidth_limit_rule.QoSBandwidthLimitRule, qos_rule, ignore_missing=ignore_missing, qos_policy_id=policy.id, ) def find_qos_bandwidth_limit_rule( self, qos_rule_id, qos_policy, ignore_missing=True, **query ): """Find a bandwidth limit rule :param qos_rule_id: The ID of a bandwidth limit rule. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule` or None """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._find( _qos_bandwidth_limit_rule.QoSBandwidthLimitRule, qos_rule_id, ignore_missing=ignore_missing, qos_policy_id=policy.id, **query, ) def get_qos_bandwidth_limit_rule(self, qos_rule, qos_policy): """Get a single bandwidth limit rule :param qos_rule: The value can be the ID of a minimum bandwidth rule or a :class:`~openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule` instance. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :returns: One :class:`~openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._get( _qos_bandwidth_limit_rule.QoSBandwidthLimitRule, qos_rule, qos_policy_id=policy.id, ) def qos_bandwidth_limit_rules(self, qos_policy, **query): """Return a generator of bandwidth limit rules :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of bandwidth limit rule objects :rtype: :class:`~openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._list( _qos_bandwidth_limit_rule.QoSBandwidthLimitRule, qos_policy_id=policy.id, **query, ) def update_qos_bandwidth_limit_rule( self, qos_rule, qos_policy, **attrs, ): """Update a bandwidth limit rule :param qos_rule: Either the id of a bandwidth limit rule or a :class:`~openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule` instance. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param attrs: The attributes to update on the bandwidth limit rule represented by ``qos_rule``. :returns: The updated minimum bandwidth rule :rtype: :class:`~openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._update( _qos_bandwidth_limit_rule.QoSBandwidthLimitRule, qos_rule, qos_policy_id=policy.id, **attrs, ) def create_qos_dscp_marking_rule(self, qos_policy, **attrs): """Create a new QoS DSCP marking rule :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule`, comprised of the properties on the QosDscpMarkingRule class. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :returns: The results of router creation :rtype: :class:`~openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._create( _qos_dscp_marking_rule.QoSDSCPMarkingRule, qos_policy_id=policy.id, **attrs, ) def delete_qos_dscp_marking_rule( self, qos_rule, qos_policy, ignore_missing=True ): """Delete a QoS DSCP marking rule :param qos_rule: The value can be either the ID of a minimum bandwidth rule or a :class:`~openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule` instance. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent minimum bandwidth rule. :returns: ``None`` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) self._delete( _qos_dscp_marking_rule.QoSDSCPMarkingRule, qos_rule, ignore_missing=ignore_missing, qos_policy_id=policy.id, ) def find_qos_dscp_marking_rule( self, qos_rule_id, qos_policy, ignore_missing=True, **query ): """Find a QoS DSCP marking rule :param qos_rule_id: The ID of a QoS DSCP marking rule. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule` or None """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._find( _qos_dscp_marking_rule.QoSDSCPMarkingRule, qos_rule_id, ignore_missing=ignore_missing, qos_policy_id=policy.id, **query, ) def get_qos_dscp_marking_rule(self, qos_rule, qos_policy): """Get a single QoS DSCP marking rule :param qos_rule: The value can be the ID of a minimum bandwidth rule or a :class:`~openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule` instance. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :returns: One :class:`~openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._get( _qos_dscp_marking_rule.QoSDSCPMarkingRule, qos_rule, qos_policy_id=policy.id, ) def qos_dscp_marking_rules(self, qos_policy, **query): """Return a generator of QoS DSCP marking rules :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of QoS DSCP marking rule objects :rtype: :class:`~openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._list( _qos_dscp_marking_rule.QoSDSCPMarkingRule, qos_policy_id=policy.id, **query, ) def update_qos_dscp_marking_rule(self, qos_rule, qos_policy, **attrs): """Update a QoS DSCP marking rule :param qos_rule: Either the id of a minimum bandwidth rule or a :class:`~openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule` instance. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param attrs: The attributes to update on the QoS DSCP marking rule represented by ``qos_rule``. :returns: The updated QoS DSCP marking rule :rtype: :class:`~openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._update( _qos_dscp_marking_rule.QoSDSCPMarkingRule, qos_rule, qos_policy_id=policy.id, **attrs, ) def create_qos_minimum_bandwidth_rule(self, qos_policy, **attrs): """Create a new minimum bandwidth rule :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule`, comprised of the properties on the QoSMinimumBandwidthRule class. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :returns: The results of resource creation :rtype: :class:`~openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._create( _qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, qos_policy_id=policy.id, **attrs, ) def delete_qos_minimum_bandwidth_rule( self, qos_rule, qos_policy, ignore_missing=True ): """Delete a minimum bandwidth rule :param qos_rule: The value can be either the ID of a minimum bandwidth rule or a :class:`~openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` instance. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent minimum bandwidth rule. :returns: ``None`` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) self._delete( _qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, qos_rule, ignore_missing=ignore_missing, qos_policy_id=policy.id, ) def find_qos_minimum_bandwidth_rule( self, qos_rule_id, qos_policy, ignore_missing=True, **query ): """Find a minimum bandwidth rule :param qos_rule_id: The ID of a minimum bandwidth rule. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` or None """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._find( _qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, qos_rule_id, ignore_missing=ignore_missing, qos_policy_id=policy.id, **query, ) def get_qos_minimum_bandwidth_rule(self, qos_rule, qos_policy): """Get a single minimum bandwidth rule :param qos_rule: The value can be the ID of a minimum bandwidth rule or a :class:`~openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` instance. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :returns: One :class:`~openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._get( _qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, qos_rule, qos_policy_id=policy.id, ) def qos_minimum_bandwidth_rules(self, qos_policy, **query): """Return a generator of minimum bandwidth rules :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of minimum bandwidth rule objects :rtype: :class:`~openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._list( _qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, qos_policy_id=policy.id, **query, ) def update_qos_minimum_bandwidth_rule(self, qos_rule, qos_policy, **attrs): """Update a minimum bandwidth rule :param qos_rule: Either the id of a minimum bandwidth rule or a :class:`~openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` instance. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param attrs: The attributes to update on the minimum bandwidth rule represented by ``qos_rule``. :returns: The updated minimum bandwidth rule :rtype: :class:`~openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._update( _qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, qos_rule, qos_policy_id=policy.id, **attrs, ) def create_qos_minimum_packet_rate_rule(self, qos_policy, **attrs): """Create a new minimum packet rate rule :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule`, comprised of the properties on the QoSMinimumPacketRateRule class. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :returns: The results of resource creation :rtype: :class:`~openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._create( _qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, qos_policy_id=policy.id, **attrs, ) def delete_qos_minimum_packet_rate_rule( self, qos_rule, qos_policy, ignore_missing=True ): """Delete a minimum packet rate rule :param qos_rule: The value can be either the ID of a minimum packet rate rule or a :class:`~openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule` instance. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent minimum packet rate rule. :returns: ``None`` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) self._delete( _qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, qos_rule, ignore_missing=ignore_missing, qos_policy_id=policy.id, ) def find_qos_minimum_packet_rate_rule( self, qos_rule_id, qos_policy, ignore_missing=True, **query ): """Find a minimum packet rate rule :param qos_rule_id: The ID of a minimum packet rate rule. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule` or None """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._find( _qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, qos_rule_id, ignore_missing=ignore_missing, qos_policy_id=policy.id, **query, ) def get_qos_minimum_packet_rate_rule(self, qos_rule, qos_policy): """Get a single minimum packet rate rule :param qos_rule: The value can be the ID of a minimum packet rate rule or a :class:`~openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule` instance. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :returns: One :class:`~openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._get( _qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, qos_rule, qos_policy_id=policy.id, ) def qos_minimum_packet_rate_rules(self, qos_policy, **query): """Return a generator of minimum packet rate rules :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of minimum packet rate rule objects :rtype: :class:`~openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._list( _qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, qos_policy_id=policy.id, **query, ) def update_qos_minimum_packet_rate_rule( self, qos_rule, qos_policy, **attrs ): """Update a minimum packet rate rule :param qos_rule: Either the id of a minimum packet rate rule or a :class:`~openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule` instance. :param qos_policy: The value can be the ID of the QoS policy that the rule belongs or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param attrs: The attributes to update on the minimum packet rate rule represented by ``qos_rule``. :returns: The updated minimum packet rate rule :rtype: :class:`~openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._update( _qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, qos_rule, qos_policy_id=policy.id, **attrs, ) def create_qos_policy(self, **attrs): """Create a new QoS policy from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.qos_policy.QoSPolicy`, comprised of the properties on the QoSPolicy class. :returns: The results of QoS policy creation :rtype: :class:`~openstack.network.v2.qos_policy.QoSPolicy` """ return self._create(_qos_policy.QoSPolicy, **attrs) def delete_qos_policy(self, qos_policy, ignore_missing=True): """Delete a QoS policy :param qos_policy: The value can be either the ID of a QoS policy or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the QoS policy does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent QoS policy. :returns: ``None`` """ self._delete( _qos_policy.QoSPolicy, qos_policy, ignore_missing=ignore_missing ) def find_qos_policy(self, name_or_id, ignore_missing=True, **query): """Find a single QoS policy :param name_or_id: The name or ID of a QoS policy. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.qos_policy.QoSPolicy` or None """ return self._find( _qos_policy.QoSPolicy, name_or_id, ignore_missing=ignore_missing, **query, ) def get_qos_policy(self, qos_policy): """Get a single QoS policy :param qos_policy: The value can be the ID of a QoS policy or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :returns: One :class:`~openstack.network.v2.qos_policy.QoSPolicy` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_qos_policy.QoSPolicy, qos_policy) def qos_policies(self, **query): """Return a generator of QoS policies :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: * ``description``: The description of a QoS policy. * ``is_shared``: Whether the policy is shared among projects. * ``name``: The name of a QoS policy. * ``project_id``: The ID of the project who owns the network. :returns: A generator of QoS policy objects :rtype: :class:`~openstack.network.v2.qos_policy.QoSPolicy` """ return self._list(_qos_policy.QoSPolicy, **query) def update_qos_policy(self, qos_policy, **attrs): """Update a QoS policy :param qos_policy: Either the id of a QoS policy or a :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param attrs: The attributes to update on the QoS policy represented by ``qos_policy``. :returns: The updated QoS policy :rtype: :class:`~openstack.network.v2.qos_policy.QoSPolicy` """ return self._update(_qos_policy.QoSPolicy, qos_policy, **attrs) def find_qos_rule_type(self, rule_type_name, ignore_missing=True): """Find a single QoS rule type details :param rule_type_name: The name of a QoS rule type. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.network.v2.qos_rule_type.QoSRuleType` or None """ return self._find( _qos_rule_type.QoSRuleType, rule_type_name, ignore_missing=ignore_missing, ) def get_qos_rule_type(self, qos_rule_type): """Get details about single QoS rule type :param qos_rule_type: The value can be the name of a QoS policy rule type or a :class:`~openstack.network.v2.qos_rule_type.QoSRuleType` instance. :returns: One :class:`~openstack.network.v2.qos_rule_type.QoSRuleType` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_qos_rule_type.QoSRuleType, qos_rule_type) def qos_rule_types(self, **query): """Return a generator of QoS rule types :param dict query: Optional query parameters to be sent to limit the resources returned. Valid parameters include: * ``type``: The type of the QoS rule type. :returns: A generator of QoS rule type objects :rtype: :class:`~openstack.network.v2.qos_rule_type.QoSRuleType` """ return self._list(_qos_rule_type.QoSRuleType, **query) def delete_quota(self, quota, ignore_missing=True): """Delete a quota (i.e. reset to the default quota) :param quota: The value can be either the ID of a quota or a :class:`~openstack.network.v2.quota.Quota` instance. The ID of a quota is the same as the project ID for the quota. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when quota does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent quota. :returns: ``None`` """ self._delete(_quota.Quota, quota, ignore_missing=ignore_missing) def get_quota(self, quota, details=False): """Get a quota :param quota: The value can be the ID of a quota or a :class:`~openstack.network.v2.quota.Quota` instance. The ID of a quota is the same as the project ID for the quota. :param details: If set to True, details about quota usage will be returned. :returns: One :class:`~openstack.network.v2.quota.Quota` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ if details: quota_obj = self._get_resource(_quota.Quota, quota) quota = self._get( _quota.QuotaDetails, project=quota_obj.id, requires_id=False ) else: quota = self._get(_quota.Quota, quota) return quota def get_quota_default(self, quota): """Get a default quota :param quota: The value can be the ID of a default quota or a :class:`~openstack.network.v2.quota.QuotaDefault` instance. The ID of a default quota is the same as the project ID for the default quota. :returns: One :class:`~openstack.network.v2.quota.QuotaDefault` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ quota_obj = self._get_resource(_quota.Quota, quota) return self._get( _quota.QuotaDefault, project=quota_obj.id, requires_id=False ) def quotas(self, **query): """Return a generator of quotas :param dict query: Optional query parameters to be sent to limit the resources being returned. Currently no query parameter is supported. :returns: A generator of quota objects :rtype: :class:`~openstack.network.v2.quota.Quota` """ return self._list(_quota.Quota, **query) def update_quota(self, quota, **attrs): """Update a quota :param quota: Either the ID of a quota or a :class:`~openstack.network.v2.quota.Quota` instance. The ID of a quota is the same as the project ID for the quota. :param attrs: The attributes to update on the quota represented by ``quota``. :returns: The updated quota :rtype: :class:`~openstack.network.v2.quota.Quota` """ return self._update(_quota.Quota, quota, **attrs) def create_rbac_policy(self, **attrs): """Create a new RBAC policy from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.rbac_policy.RBACPolicy`, comprised of the properties on the RBACPolicy class. :return: The results of RBAC policy creation :rtype: :class:`~openstack.network.v2.rbac_policy.RBACPolicy` """ return self._create(_rbac_policy.RBACPolicy, **attrs) def delete_rbac_policy(self, rbac_policy, ignore_missing=True): """Delete a RBAC policy :param rbac_policy: The value can be either the ID of a RBAC policy or a :class:`~openstack.network.v2.rbac_policy.RBACPolicy` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the RBAC policy does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent RBAC policy. :returns: ``None`` """ self._delete( _rbac_policy.RBACPolicy, rbac_policy, ignore_missing=ignore_missing ) def find_rbac_policy(self, rbac_policy, ignore_missing=True, **query): """Find a single RBAC policy :param rbac_policy: The ID of a RBAC policy. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.rbac_policy.RBACPolicy` or None """ return self._find( _rbac_policy.RBACPolicy, rbac_policy, ignore_missing=ignore_missing, **query, ) def get_rbac_policy(self, rbac_policy): """Get a single RBAC policy :param rbac_policy: The value can be the ID of a RBAC policy or a :class:`~openstack.network.v2.rbac_policy.RBACPolicy` instance. :returns: One :class:`~openstack.network.v2.rbac_policy.RBACPolicy` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_rbac_policy.RBACPolicy, rbac_policy) def rbac_policies(self, **query): """Return a generator of RBAC policies :param dict query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * ``action``: RBAC policy action * ``object_type``: Type of the object that the RBAC policy affects * ``target_project_id``: ID of the tenant that the RBAC policy affects * ``project_id``: Owner tenant ID :returns: A generator of rbac objects :rtype: :class:`~openstack.network.v2.rbac_policy.RBACPolicy` """ return self._list(_rbac_policy.RBACPolicy, **query) def update_rbac_policy(self, rbac_policy, **attrs): """Update a RBAC policy :param rbac_policy: Either the id of a RBAC policy or a :class:`~openstack.network.v2.rbac_policy.RBACPolicy` instance. :param attrs: The attributes to update on the RBAC policy represented by ``rbac_policy``. :returns: The updated RBAC policy :rtype: :class:`~openstack.network.v2.rbac_policy.RBACPolicy` """ return self._update(_rbac_policy.RBACPolicy, rbac_policy, **attrs) def create_router(self, **attrs): """Create a new router from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.router.Router`, comprised of the properties on the Router class. :returns: The results of router creation :rtype: :class:`~openstack.network.v2.router.Router` """ return self._create(_router.Router, **attrs) def delete_router(self, router, ignore_missing=True, if_revision=None): """Delete a router :param router: The value can be either the ID of a router or a :class:`~openstack.network.v2.router.Router` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the router does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent router. :param int if_revision: Revision to put in If-Match header of update request to perform compare-and-swap update. :returns: ``None`` """ self._delete( _router.Router, router, ignore_missing=ignore_missing, if_revision=if_revision, ) def find_router(self, name_or_id, ignore_missing=True, **query): """Find a single router :param name_or_id: The name or ID of a router. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.router.Router` or None """ return self._find( _router.Router, name_or_id, ignore_missing=ignore_missing, **query ) def get_router(self, router): """Get a single router :param router: The value can be the ID of a router or a :class:`~openstack.network.v2.router.Router` instance. :returns: One :class:`~openstack.network.v2.router.Router` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_router.Router, router) def routers(self, **query): """Return a generator of routers :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: * ``description``: The description of a router. * ``flavor_id``: The ID of the flavor. * ``is_admin_state_up``: Router administrative state is up or not * ``is_distributed``: The distributed state of a router * ``is_ha``: The highly-available state of a router * ``name``: Router name * ``project_id``: The ID of the project this router is associated with. * ``status``: The status of the router. :returns: A generator of router objects :rtype: :class:`~openstack.network.v2.router.Router` """ return self._list(_router.Router, **query) def update_router(self, router, if_revision=None, **attrs): """Update a router :param router: Either the id of a router or a :class:`~openstack.network.v2.router.Router` instance. :param int if_revision: Revision to put in If-Match header of update request to perform compare-and-swap update. :param attrs: The attributes to update on the router represented by ``router``. :returns: The updated router :rtype: :class:`~openstack.network.v2.router.Router` """ return self._update( _router.Router, router, if_revision=if_revision, **attrs ) def add_interface_to_router(self, router, subnet_id=None, port_id=None): """Add Interface to a router :param router: Either the router ID or an instance of :class:`~openstack.network.v2.router.Router` :param subnet_id: ID of the subnet :param port_id: ID of the port :returns: Router with updated interface :rtype: :class:`~openstack.network.v2.router.Router` """ body = {} if port_id: body = {'port_id': port_id} else: body = {'subnet_id': subnet_id} router = self._get_resource(_router.Router, router) return router.add_interface(self, **body) def remove_interface_from_router( self, router, subnet_id=None, port_id=None ): """Remove Interface from a router :param router: Either the router ID or an instance of :class:`~openstack.network.v2.router.Router` :param subnet: ID of the subnet :param port: ID of the port :returns: Router with updated interface :rtype: :class:`~openstack.network.v2.router.Router` """ body = {} if port_id: body = {'port_id': port_id} else: body = {'subnet_id': subnet_id} router = self._get_resource(_router.Router, router) return router.remove_interface(self, **body) def add_extra_routes_to_router(self, router, body): """Add extra routes to a router :param router: Either the router ID or an instance of :class:`~openstack.network.v2.router.Router` :param body: The request body as documented in the api-ref. :returns: Router with updated extra routes :rtype: :class:`~openstack.network.v2.router.Router` """ router = self._get_resource(_router.Router, router) return router.add_extra_routes(self, body=body) def remove_extra_routes_from_router(self, router, body): """Remove extra routes from a router :param router: Either the router ID or an instance of :class:`~openstack.network.v2.router.Router` :param body: The request body as documented in the api-ref. :returns: Router with updated extra routes :rtype: :class:`~openstack.network.v2.router.Router` """ router = self._get_resource(_router.Router, router) return router.remove_extra_routes(self, body=body) def add_gateway_to_router(self, router, **body): """Add Gateway to a router :param router: Either the router ID or an instance of :class:`~openstack.network.v2.router.Router` :param body: Body with the gateway information :returns: Router with updated interface :rtype: :class:`~openstack.network.v2.router.Router` """ router = self._get_resource(_router.Router, router) return router.add_gateway(self, **body) def remove_gateway_from_router(self, router, **body): """Remove Gateway from a router :param router: Either the router ID or an instance of :class:`~openstack.network.v2.router.Router` :param body: Body with the gateway information :returns: Router with updated interface :rtype: :class:`~openstack.network.v2.router.Router` """ router = self._get_resource(_router.Router, router) return router.remove_gateway(self, **body) def add_external_gateways(self, router, body): """Add router external gateways :param router: Either the router ID or an instance of :class:`~openstack.network.v2.router.Router` :param body: Body containing the external_gateways parameter. :returns: Router with added gateways :rtype: :class:`~openstack.network.v2.router.Router` """ router = self._get_resource(_router.Router, router) return router.add_external_gateways(self, body) def update_external_gateways(self, router, body): """Update router external gateways :param router: Either the router ID or an instance of :class:`~openstack.network.v2.router.Router` :param body: Body containing the external_gateways parameter. :returns: Router with updated gateways :rtype: :class:`~openstack.network.v2.router.Router` """ router = self._get_resource(_router.Router, router) return router.update_external_gateways(self, body) def remove_external_gateways(self, router, body): """Remove router external gateways :param router: Either the router ID or an instance of :class:`~openstack.network.v2.router.Router` :param body: Body containing the external_gateways parameter. :returns: Router without the removed gateways :rtype: :class:`~openstack.network.v2.router.Router` """ router = self._get_resource(_router.Router, router) return router.remove_external_gateways(self, body) def routers_hosting_l3_agents(self, router, **query): """Return a generator of L3 agent hosting a router :param router: Either the router id or an instance of :class:`~openstack.network.v2.router.Router` :param kwargs query: Optional query parameters to be sent to limit the resources returned :returns: A generator of Router L3 Agents :rtype: :class:`~openstack.network.v2.router.RouterL3Agents` """ router = self._get_resource(_router.Router, router) return self._list(_agent.RouterL3Agent, router_id=router.id, **query) def agent_hosted_routers(self, agent, **query): """Return a generator of routers hosted by a L3 agent :param agent: Either the agent id of an instance of :class:`~openstack.network.v2.network_agent.Agent` :param kwargs query: Optional query parameters to be sent to limit the resources returned :returns: A generator of routers :rtype: :class:`~openstack.network.v2.agent.L3AgentRouters` """ agent = self._get_resource(_agent.Agent, agent) return self._list(_router.L3AgentRouter, agent_id=agent.id, **query) def add_router_to_agent(self, agent, router): """Add router to L3 agent :param agent: Either the id of an agent :class:`~openstack.network.v2.agent.Agent` instance :param router: A router instance :returns: Agent with attached router :rtype: :class:`~openstack.network.v2.agent.Agent` """ agent = self._get_resource(_agent.Agent, agent) router = self._get_resource(_router.Router, router) return agent.add_router_to_agent(self, router.id) def remove_router_from_agent(self, agent, router): """Remove router from L3 agent :param agent: Either the id of an agent or an :class:`~openstack.network.v2.agent.Agent` instance :param router: A router instance :returns: Agent with removed router :rtype: :class:`~openstack.network.v2.agent.Agent` """ agent = self._get_resource(_agent.Agent, agent) router = self._get_resource(_router.Router, router) return agent.remove_router_from_agent(self, router.id) def create_ndp_proxy(self, **attrs): """Create a new ndp proxy from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.ndp_proxy.NDPProxxy`, comprised of the properties on the NDPProxy class. :returns: The results of ndp proxy creation :rtype: :class:`~openstack.network.v2.ndp_proxy.NDPProxxy` """ return self._create(_ndp_proxy.NDPProxy, **attrs) def get_ndp_proxy(self, ndp_proxy): """Get a single ndp proxy :param ndp_proxy: The value can be the ID of a ndp proxy or a :class:`~openstack.network.v2.ndp_proxy.NDPProxy` instance. :returns: One :class:`~openstack.network.v2.ndp_proxy.NDPProxy` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_ndp_proxy.NDPProxy, ndp_proxy) def find_ndp_proxy(self, ndp_proxy_id, ignore_missing=True, **query): """Find a single ndp proxy :param ndp_proxy_id: The ID of a ndp proxy. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.ndp_proxy.NDPProxy` or None """ return self._find( _ndp_proxy.NDPProxy, ndp_proxy_id, ignore_missing=ignore_missing, **query, ) def delete_ndp_proxy(self, ndp_proxy, ignore_missing=True): """Delete a ndp proxy :param ndp_proxy: The value can be the ID of a ndp proxy or a :class:`~openstack.network.v2.ndp_proxy.NDPProxy` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the router does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent ndp proxy. :returns: ``None`` """ self._delete( _ndp_proxy.NDPProxy, ndp_proxy, ignore_missing=ignore_missing ) def ndp_proxies(self, **query): """Return a generator of ndp proxies :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: * ``router_id``: The ID fo the router * ``port_id``: The ID of internal port. * ``ip_address``: The internal IP address :returns: A generator of port forwarding objects :rtype: :class:`~openstack.network.v2.port_forwarding.PortForwarding` """ return self._list(_ndp_proxy.NDPProxy, paginated=False, **query) def update_ndp_proxy(self, ndp_proxy, **attrs): """Update a ndp proxy :param ndp_proxy: The value can be the ID of a ndp proxy or a :class:`~openstack.network.v2.ndp_proxy.NDPProxy` instance. :param attrs: The attributes to update on the ip represented by ``value``. :returns: The updated ndp_proxy :rtype: :class:`~openstack.network.v2.ndp_proxy.NDPProxy` """ return self._update(_ndp_proxy.NDPProxy, ndp_proxy, **attrs) def create_firewall_group(self, **attrs): """Create a new firewall group from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.firewall_group.FirewallGroup`, comprised of the properties on the FirewallGroup class. :returns: The results of firewall group creation :rtype: :class:`~openstack.network.v2.firewall_group.FirewallGroup` """ return self._create(_firewall_group.FirewallGroup, **attrs) def delete_firewall_group(self, firewall_group, ignore_missing=True): """Delete a firewall group :param firewall_group: The value can be either the ID of a firewall group or a :class:`~openstack.network.v2.firewall_group.FirewallGroup` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the firewall group does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent firewall group. :returns: ``None`` """ self._delete( _firewall_group.FirewallGroup, firewall_group, ignore_missing=ignore_missing, ) def find_firewall_group(self, name_or_id, ignore_missing=True, **query): """Find a single firewall group :param name_or_id: The name or ID of a firewall group. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.firewall_group.FirewallGroup` or None """ return self._find( _firewall_group.FirewallGroup, name_or_id, ignore_missing=ignore_missing, **query, ) def get_firewall_group(self, firewall_group): """Get a single firewall group :param firewall_group: The value can be the ID of a firewall group or a :class:`~openstack.network.v2.firewall_group.FirewallGroup` instance. :returns: One :class:`~openstack.network.v2.firewall_group.FirewallGroup` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_firewall_group.FirewallGroup, firewall_group) def firewall_groups(self, **query): """Return a generator of firewall_groups :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: * ``description``: Firewall group description * ``egress_policy_id``: The ID of egress firewall policy * ``ingress_policy_id``: The ID of ingress firewall policy * ``name``: The name of a firewall group * ``shared``: Indicates whether this firewall group is shared across all projects. * ``status``: The status of the firewall group. Valid values are ACTIVE, INACTIVE, ERROR, PENDING_UPDATE, or PENDING_DELETE. * ``ports``: A list of the IDs of the ports associated with the firewall group. * ``project_id``: The ID of the project this firewall group is associated with. :returns: A generator of firewall group objects """ return self._list(_firewall_group.FirewallGroup, **query) def update_firewall_group(self, firewall_group, **attrs): """Update a firewall group :param firewall_group: Either the id of a firewall group or a :class:`~openstack.network.v2.firewall_group.FirewallGroup` instance. :param attrs: The attributes to update on the firewall group represented by ``firewall_group``. :returns: The updated firewall group :rtype: :class:`~openstack.network.v2.firewall_group.FirewallGroup` """ return self._update( _firewall_group.FirewallGroup, firewall_group, **attrs ) def create_firewall_policy(self, **attrs): """Create a new firewall policy from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.firewall_policy.FirewallPolicy`, comprised of the properties on the FirewallPolicy class. :returns: The results of firewall policy creation :rtype: :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` """ return self._create(_firewall_policy.FirewallPolicy, **attrs) def delete_firewall_policy(self, firewall_policy, ignore_missing=True): """Delete a firewall policy :param firewall_policy: The value can be either the ID of a firewall policy or a :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the firewall policy does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent firewall policy. :returns: ``None`` """ self._delete( _firewall_policy.FirewallPolicy, firewall_policy, ignore_missing=ignore_missing, ) def find_firewall_policy(self, name_or_id, ignore_missing=True, **query): """Find a single firewall policy :param name_or_id: The name or ID of a firewall policy. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` or None """ return self._find( _firewall_policy.FirewallPolicy, name_or_id, ignore_missing=ignore_missing, **query, ) def get_firewall_policy(self, firewall_policy): """Get a single firewall policy :param firewall_policy: The value can be the ID of a firewall policy or a :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` instance. :returns: One :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_firewall_policy.FirewallPolicy, firewall_policy) def firewall_policies(self, **query): """Return a generator of firewall_policies :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: * ``description``: Firewall policy description * ``firewall_rule``: A list of the IDs of the firewall rules associated with the firewall policy. * ``name``: The name of a firewall policy * ``shared``: Indicates whether this firewall policy is shared across all projects. * ``project_id``: The ID of the project that owns the resource. :returns: A generator of firewall policy objects """ return self._list(_firewall_policy.FirewallPolicy, **query) def update_firewall_policy(self, firewall_policy, **attrs): """Update a firewall policy :param firewall_policy: Either the id of a firewall policy or a :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` instance. :param attrs: The attributes to update on the firewall policy represented by ``firewall_policy``. :returns: The updated firewall policy :rtype: :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` """ return self._update( _firewall_policy.FirewallPolicy, firewall_policy, **attrs ) def insert_rule_into_policy( self, firewall_policy_id, firewall_rule_id, insert_after=None, insert_before=None, ): """Insert a firewall_rule into a firewall_policy in order :param firewall_policy_id: The ID of the firewall policy. :param firewall_rule_id: The ID of the firewall rule. :param insert_after: The ID of the firewall rule to insert the new rule after. It will be worked only when insert_before is none. :param insert_before: The ID of the firewall rule to insert the new rule before. :returns: The updated firewall policy :rtype: :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` """ body = { 'firewall_rule_id': firewall_rule_id, 'insert_after': insert_after, 'insert_before': insert_before, } policy = self._get_resource( _firewall_policy.FirewallPolicy, firewall_policy_id ) return policy.insert_rule(self, **body) def remove_rule_from_policy(self, firewall_policy_id, firewall_rule_id): """Remove a firewall_rule from a firewall_policy. :param firewall_policy_id: The ID of the firewall policy. :param firewall_rule_id: The ID of the firewall rule. :returns: The updated firewall policy :rtype: :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` """ body = {'firewall_rule_id': firewall_rule_id} policy = self._get_resource( _firewall_policy.FirewallPolicy, firewall_policy_id ) return policy.remove_rule(self, **body) def create_firewall_rule(self, **attrs): """Create a new firewall rule from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.firewall_rule.FirewallRule`, comprised of the properties on the FirewallRule class. :returns: The results of firewall rule creation :rtype: :class:`~openstack.network.v2.firewall_rule.FirewallRule` """ return self._create(_firewall_rule.FirewallRule, **attrs) def delete_firewall_rule(self, firewall_rule, ignore_missing=True): """Delete a firewall rule :param firewall_rule: The value can be either the ID of a firewall rule or a :class:`~openstack.network.v2.firewall_rule.FirewallRule` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the firewall rule does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent firewall rule. :returns: ``None`` """ self._delete( _firewall_rule.FirewallRule, firewall_rule, ignore_missing=ignore_missing, ) def find_firewall_rule(self, name_or_id, ignore_missing=True, **query): """Find a single firewall rule :param name_or_id: The name or ID of a firewall rule. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.firewall_rule.FirewallRule` or None """ return self._find( _firewall_rule.FirewallRule, name_or_id, ignore_missing=ignore_missing, **query, ) def get_firewall_rule(self, firewall_rule): """Get a single firewall rule :param firewall_rule: The value can be the ID of a firewall rule or a :class:`~openstack.network.v2.firewall_rule.FirewallRule` instance. :returns: One :class:`~openstack.network.v2.firewall_rule.FirewallRule` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_firewall_rule.FirewallRule, firewall_rule) def firewall_rules(self, **query): """Return a generator of firewall_rules :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: * ``action``: The action that the API performs on traffic that matches the firewall rule. * ``description``: Firewall rule description * ``name``: The name of a firewall group * ``destination_ip_address``: The destination IPv4 or IPv6 address or CIDR for the firewall rule. * ``destination_port``: The destination port or port range for the firewall rule. * ``enabled``: Facilitates selectively turning off rules. * ``shared``: Indicates whether this firewall group is shared across all projects. * ``ip_version``: The IP protocol version for the firewall rule. * ``protocol``: The IP protocol for the firewall rule. * ``source_ip_address``: The source IPv4 or IPv6 address or CIDR for the firewall rule. * ``source_port``: The source port or port range for the firewall rule. * ``project_id``: The ID of the project this firewall group is associated with. :returns: A generator of firewall rule objects """ return self._list(_firewall_rule.FirewallRule, **query) def update_firewall_rule(self, firewall_rule, **attrs): """Update a firewall rule :param firewall_rule: Either the id of a firewall rule or a :class:`~openstack.network.v2.firewall_rule.FirewallRule` instance. :param attrs: The attributes to update on the firewall rule represented by ``firewall_rule``. :returns: The updated firewall rule :rtype: :class:`~openstack.network.v2.firewall_rule.FirewallRule` """ return self._update( _firewall_rule.FirewallRule, firewall_rule, **attrs ) def create_security_group(self, **attrs): """Create a new security group from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.security_group.SecurityGroup`, comprised of the properties on the SecurityGroup class. :returns: The results of security group creation :rtype: :class:`~openstack.network.v2.security_group.SecurityGroup` """ return self._create(_security_group.SecurityGroup, **attrs) def delete_security_group( self, security_group, ignore_missing=True, if_revision=None ): """Delete a security group :param security_group: The value can be either the ID of a security group or a :class:`~openstack.network.v2.security_group.SecurityGroup` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the security group does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent security group. :param int if_revision: Revision to put in If-Match header of update request to perform compare-and-swap update. :returns: ``None`` """ self._delete( _security_group.SecurityGroup, security_group, ignore_missing=ignore_missing, if_revision=if_revision, ) def find_security_group(self, name_or_id, ignore_missing=True, **query): """Find a single security group :param name_or_id: The name or ID of a security group. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.security_group.SecurityGroup` or None """ return self._find( _security_group.SecurityGroup, name_or_id, ignore_missing=ignore_missing, **query, ) def get_security_group(self, security_group): """Get a single security group :param security_group: The value can be the ID of a security group or a :class:`~openstack.network.v2.security_group.SecurityGroup` instance. :returns: One :class:`~openstack.network.v2.security_group.SecurityGroup` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_security_group.SecurityGroup, security_group) def security_groups(self, **query): """Return a generator of security groups :param dict query: Optional query parameters to be sent to limit the resources being returned. Valid parameters are: * ``description``: Security group description * ``ìd``: The id of a security group, or list of security group ids * ``name``: The name of a security group * ``project_id``: The ID of the project this security group is associated with. :returns: A generator of security group objects :rtype: :class:`~openstack.network.v2.security_group.SecurityGroup` """ return self._list(_security_group.SecurityGroup, **query) def update_security_group(self, security_group, if_revision=None, **attrs): """Update a security group :param security_group: Either the id of a security group or a :class:`~openstack.network.v2.security_group.SecurityGroup` instance. :param int if_revision: Revision to put in If-Match header of update request to perform compare-and-swap update. :param attrs: The attributes to update on the security group represented by ``security_group``. :returns: The updated security group :rtype: :class:`~openstack.network.v2.security_group.SecurityGroup` """ return self._update( _security_group.SecurityGroup, security_group, if_revision=if_revision, **attrs, ) def create_security_group_rule(self, **attrs): """Create a new security group rule from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule`, comprised of the properties on the SecurityGroupRule class. :returns: The results of security group rule creation :rtype: :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule` """ return self._create(_security_group_rule.SecurityGroupRule, **attrs) def create_security_group_rules(self, data): """Create new security group rules from the list of attributes :param list data: List of dicts of attributes which will be used to create a :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule`, comprised of the properties on the SecurityGroupRule class. :returns: A generator of security group rule objects :rtype: :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule` """ return self._bulk_create(_security_group_rule.SecurityGroupRule, data) def delete_security_group_rule( self, security_group_rule, ignore_missing=True, if_revision=None ): """Delete a security group rule :param security_group_rule: The value can be either the ID of a security group rule or a :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the security group rule does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent security group rule. :param int if_revision: Revision to put in If-Match header of update request to perform compare-and-swap update. :returns: ``None`` """ self._delete( _security_group_rule.SecurityGroupRule, security_group_rule, ignore_missing=ignore_missing, if_revision=if_revision, ) def find_security_group_rule( self, name_or_id, ignore_missing=True, **query ): """Find a single security group rule :param str name_or_id: The ID of a security group rule. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule` or None """ return self._find( _security_group_rule.SecurityGroupRule, name_or_id, ignore_missing=ignore_missing, **query, ) def get_security_group_rule(self, security_group_rule): """Get a single security group rule :param security_group_rule: The value can be the ID of a security group rule or a :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule` instance. :returns: :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _security_group_rule.SecurityGroupRule, security_group_rule ) def security_group_rules(self, **query): """Return a generator of security group rules :param kwargs query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * ``description``: The security group rule description * ``direction``: Security group rule direction * ``ether_type``: Must be IPv4 or IPv6, and addresses represented in CIDR must match the ingress or egress rule. * ``project_id``: The ID of the project this security group rule is associated with. * ``protocol``: Security group rule protocol * ``remote_group_id``: ID of a remote security group * ``security_group_id``: ID of security group that owns the rules :returns: A generator of security group rule objects :rtype: :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule` """ return self._list(_security_group_rule.SecurityGroupRule, **query) def create_default_security_group_rule(self, **attrs): """Create a new default security group rule from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.default_security_group_rule. DefaultSecurityGroupRule`, comprised of the properties on the DefaultSecurityGroupRule class. :returns: The results of default security group rule creation :rtype: :class:`~openstack.network.v2.default_security_group_rule. DefaultSecurityGroupRule` """ return self._create( _default_security_group_rule.DefaultSecurityGroupRule, **attrs ) def delete_default_security_group_rule( self, default_security_group_rule, ignore_missing=True, ): """Delete a default security group rule :param default_security_group_rule: The value can be either the ID of a default security group rule or a :class:`~openstack.network.v2.default_security_group_rule. DefaultSecurityGroupRule` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the defaul security group rule does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent default security group rule. :returns: ``None`` """ self._delete( _default_security_group_rule.DefaultSecurityGroupRule, default_security_group_rule, ignore_missing=ignore_missing, ) def find_default_security_group_rule( self, name_or_id, ignore_missing=True, **query ): """Find a single default security group rule :param str name_or_id: The ID of a default security group rule. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.default_security_group_rule. DefaultSecurityGroupRule` or None """ return self._find( _default_security_group_rule.DefaultSecurityGroupRule, name_or_id, ignore_missing=ignore_missing, **query, ) def get_default_security_group_rule(self, default_security_group_rule): """Get a single default security group rule :param default_security_group_rule: The value can be the ID of a default security group rule or a :class:`~openstack.network.v2.default_security_group_rule. DefaultSecurityGroupRule` instance. :returns: :class:`~openstack.network.v2.default_security_group_rule. DefaultSecurityGroupRule` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _default_security_group_rule.DefaultSecurityGroupRule, default_security_group_rule, ) def default_security_group_rules(self, **query): """Return a generator of default security group rules :param kwargs query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * ``description``: The default security group rule description * ``direction``: Default security group rule direction * ``ether_type``: Must be IPv4 or IPv6, and addresses represented in CIDR must match the ingress or egress rule. * ``protocol``: Default security group rule protocol * ``remote_group_id``: ID of a remote security group :returns: A generator of default security group rule objects :rtype: :class:`~openstack.network.v2.default_security_group_rule. DefaultSecurityGroupRule` """ return self._list( _default_security_group_rule.DefaultSecurityGroupRule, **query ) def create_segment(self, **attrs): """Create a new segment from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.segment.Segment`, comprised of the properties on the Segment class. :returns: The results of segment creation :rtype: :class:`~openstack.network.v2.segment.Segment` """ return self._create(_segment.Segment, **attrs) def delete_segment(self, segment, ignore_missing=True): """Delete a segment :param segment: The value can be either the ID of a segment or a :class:`~openstack.network.v2.segment.Segment` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the segment does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent segment. :returns: ``None`` """ self._delete(_segment.Segment, segment, ignore_missing=ignore_missing) def find_segment(self, name_or_id, ignore_missing=True, **query): """Find a single segment :param name_or_id: The name or ID of a segment. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.segment.Segment` or None """ return self._find( _segment.Segment, name_or_id, ignore_missing=ignore_missing, **query, ) def get_segment(self, segment): """Get a single segment :param segment: The value can be the ID of a segment or a :class:`~openstack.network.v2.segment.Segment` instance. :returns: One :class:`~openstack.network.v2.segment.Segment` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_segment.Segment, segment) def segments(self, **query): """Return a generator of segments :param kwargs query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * ``description``: The segment description * ``name``: Name of the segments * ``network_id``: ID of the network that owns the segments * ``network_type``: Network type for the segments * ``physical_network``: Physical network name for the segments * ``segmentation_id``: Segmentation ID for the segments :returns: A generator of segment objects :rtype: :class:`~openstack.network.v2.segment.Segment` """ return self._list(_segment.Segment, **query) def update_segment(self, segment, **attrs): """Update a segment :param segment: Either the id of a segment or a :class:`~openstack.network.v2.segment.Segment` instance. :param attrs: The attributes to update on the segment represented by ``segment``. :returns: The update segment :rtype: :class:`~openstack.network.v2.segment.Segment` """ return self._update(_segment.Segment, segment, **attrs) def service_providers(self, **query): """Return a generator of service providers :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of service provider objects :rtype: :class:`~openstack.network.v2.service_provider.ServiceProvider` """ return self._list(_service_provider.ServiceProvider, **query) def create_service_profile(self, **attrs): """Create a new network service flavor profile from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.service_profile.ServiceProfile`, comprised of the properties on the ServiceProfile class. :returns: The results of service profile creation :rtype: :class:`~openstack.network.v2.service_profile.ServiceProfile` """ return self._create(_service_profile.ServiceProfile, **attrs) def delete_service_profile(self, service_profile, ignore_missing=True): """Delete a network service flavor profile :param service_profile: The value can be either the ID of a service profile or a :class:`~openstack.network.v2.service_profile.ServiceProfile` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the service profile does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent service profile. :returns: ``None`` """ self._delete( _service_profile.ServiceProfile, service_profile, ignore_missing=ignore_missing, ) def find_service_profile(self, name_or_id, ignore_missing=True, **query): """Find a single network service flavor profile :param name_or_id: The name or ID of a service profile. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.service_profile.ServiceProfile` or None """ return self._find( _service_profile.ServiceProfile, name_or_id, ignore_missing=ignore_missing, **query, ) def get_service_profile(self, service_profile): """Get a single network service flavor profile :param service_profile: The value can be the ID of a service_profile or a :class:`~openstack.network.v2.service_profile.ServiceProfile` instance. :returns: One :class:`~openstack.network.v2.service_profile.ServiceProfile` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_service_profile.ServiceProfile, service_profile) def service_profiles(self, **query): """Return a generator of network service flavor profiles :param dict query: Optional query parameters to be sent to limit the resources returned. Available parameters inclue: * ``description``: The description of the service flavor profile * ``driver``: Provider driver for the service flavor profile * ``is_enabled``: Whether the profile is enabled * ``project_id``: The owner project ID :returns: A generator of service profile objects :rtype: :class:`~openstack.network.v2.service_profile.ServiceProfile` """ return self._list(_service_profile.ServiceProfile, **query) def update_service_profile(self, service_profile, **attrs): """Update a network flavor service profile :param service_profile: Either the id of a service profile or a :class:`~openstack.network.v2.service_profile.ServiceProfile` instance. :param attrs: The attributes to update on the service profile represented by ``service_profile``. :returns: The updated service profile :rtype: :class:`~openstack.network.v2.service_profile.ServiceProfile` """ return self._update( _service_profile.ServiceProfile, service_profile, **attrs ) def create_subnet(self, **attrs): """Create a new subnet from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.subnet.Subnet`, comprised of the properties on the Subnet class. :returns: The results of subnet creation :rtype: :class:`~openstack.network.v2.subnet.Subnet` """ return self._create(_subnet.Subnet, **attrs) def delete_subnet(self, subnet, ignore_missing=True, if_revision=None): """Delete a subnet :param subnet: The value can be either the ID of a subnet or a :class:`~openstack.network.v2.subnet.Subnet` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the subnet does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent subnet. :param int if_revision: Revision to put in If-Match header of update request to perform compare-and-swap update. :returns: ``None`` """ self._delete( _subnet.Subnet, subnet, ignore_missing=ignore_missing, if_revision=if_revision, ) def find_subnet(self, name_or_id, ignore_missing=True, **query): """Find a single subnet :param name_or_id: The name or ID of a subnet. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.subnet.Subnet` or None """ return self._find( _subnet.Subnet, name_or_id, ignore_missing=ignore_missing, **query ) def get_subnet(self, subnet): """Get a single subnet :param subnet: The value can be the ID of a subnet or a :class:`~openstack.network.v2.subnet.Subnet` instance. :returns: One :class:`~openstack.network.v2.subnet.Subnet` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_subnet.Subnet, subnet) def subnets(self, **query): """Return a generator of subnets :param dict query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * ``cidr``: Subnet CIDR * ``description``: The subnet description * ``gateway_ip``: Subnet gateway IP address * ``ip_version``: Subnet IP address version * ``ipv6_address_mode``: The IPv6 address mode * ``ipv6_ra_mode``: The IPv6 router advertisement mode * ``is_dhcp_enabled``: Subnet has DHCP enabled (boolean) * ``name``: Subnet name * ``network_id``: ID of network that owns the subnets * ``project_id``: Owner tenant ID * ``subnet_pool_id``: The subnet pool ID from which to obtain a CIDR. :returns: A generator of subnet objects :rtype: :class:`~openstack.network.v2.subnet.Subnet` """ return self._list(_subnet.Subnet, **query) def update_subnet(self, subnet, if_revision=None, **attrs): """Update a subnet :param subnet: Either the id of a subnet or a :class:`~openstack.network.v2.subnet.Subnet` instance. :param int if_revision: Revision to put in If-Match header of update request to perform compare-and-swap update. :param attrs: The attributes to update on the subnet represented by ``subnet``. :returns: The updated subnet :rtype: :class:`~openstack.network.v2.subnet.Subnet` """ return self._update( _subnet.Subnet, subnet, if_revision=if_revision, **attrs ) def create_subnet_pool(self, **attrs): """Create a new subnet pool from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.subnet_pool.SubnetPool`, comprised of the properties on the SubnetPool class. :returns: The results of subnet pool creation :rtype: :class:`~openstack.network.v2.subnet_pool.SubnetPool` """ return self._create(_subnet_pool.SubnetPool, **attrs) def delete_subnet_pool(self, subnet_pool, ignore_missing=True): """Delete a subnet pool :param subnet_pool: The value can be either the ID of a subnet pool or a :class:`~openstack.network.v2.subnet_pool.SubnetPool` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the subnet pool does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent subnet pool. :returns: ``None`` """ self._delete( _subnet_pool.SubnetPool, subnet_pool, ignore_missing=ignore_missing ) def find_subnet_pool(self, name_or_id, ignore_missing=True, **query): """Find a single subnet pool :param name_or_id: The name or ID of a subnet pool. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.subnet_pool.SubnetPool` or None """ return self._find( _subnet_pool.SubnetPool, name_or_id, ignore_missing=ignore_missing, **query, ) def get_subnet_pool(self, subnet_pool): """Get a single subnet pool :param subnet_pool: The value can be the ID of a subnet pool or a :class:`~openstack.network.v2.subnet_pool.SubnetPool` instance. :returns: One :class:`~openstack.network.v2.subnet_pool.SubnetPool` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_subnet_pool.SubnetPool, subnet_pool) def subnet_pools(self, **query): """Return a generator of subnet pools :param kwargs query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * ``address_scope_id``: Subnet pool address scope ID * ``description``: The subnet pool description * ``ip_version``: The IP address family * ``is_default``: Subnet pool is the default (boolean) * ``is_shared``: Subnet pool is shared (boolean) * ``name``: Subnet pool name * ``project_id``: Owner tenant ID :returns: A generator of subnet pool objects :rtype: :class:`~openstack.network.v2.subnet_pool.SubnetPool` """ return self._list(_subnet_pool.SubnetPool, **query) def update_subnet_pool(self, subnet_pool, **attrs): """Update a subnet pool :param subnet_pool: Either the ID of a subnet pool or a :class:`~openstack.network.v2.subnet_pool.SubnetPool` instance. :param attrs: The attributes to update on the subnet pool represented by ``subnet_pool``. :returns: The updated subnet pool :rtype: :class:`~openstack.network.v2.subnet_pool.SubnetPool` """ return self._update(_subnet_pool.SubnetPool, subnet_pool, **attrs) @staticmethod def _check_tag_support(resource): try: # Check 'tags' attribute exists resource.tags except AttributeError: raise exceptions.InvalidRequest( '%s resource does not support tag' % resource.__class__.__name__ ) def set_tags(self, resource, tags): """Replace tags of a specified resource with specified tags :param resource: :class:`~openstack.resource.Resource` instance. :param tags: New tags to be set. :type tags: "list" :returns: The updated resource :rtype: :class:`~openstack.resource.Resource` """ self._check_tag_support(resource) return resource.set_tags(self, tags) def create_trunk(self, **attrs): """Create a new trunk from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.trunk.Trunk`, comprised of the properties on the Trunk class. :returns: The results of trunk creation :rtype: :class:`~openstack.network.v2.trunk.Trunk` """ return self._create(_trunk.Trunk, **attrs) def delete_trunk(self, trunk, ignore_missing=True): """Delete a trunk :param trunk: The value can be either the ID of trunk or a :class:`openstack.network.v2.trunk.Trunk` instance :returns: ``None`` """ self._delete(_trunk.Trunk, trunk, ignore_missing=ignore_missing) def find_trunk(self, name_or_id, ignore_missing=True, **query): """Find a single trunk :param name_or_id: The name or ID of a trunk. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.trunk.Trunk` or None """ return self._find( _trunk.Trunk, name_or_id, ignore_missing=ignore_missing, **query ) def get_trunk(self, trunk): """Get a single trunk :param trunk: The value can be the ID of a trunk or a :class:`~openstack.network.v2.trunk.Trunk` instance. :returns: One :class:`~openstack.network.v2.trunk.Trunk` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_trunk.Trunk, trunk) def trunks(self, **query): """Return a generator of trunks :param dict query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of trunk objects :rtype: :class:`~openstack.network.v2.trunk.trunk` """ return self._list(_trunk.Trunk, **query) def update_trunk(self, trunk, **attrs): """Update a trunk :param trunk: Either the id of a trunk or a :class:`~openstack.network.v2.trunk.Trunk` instance. :param attrs: The attributes to update on the trunk represented by ``trunk``. :returns: The updated trunk :rtype: :class:`~openstack.network.v2.trunk.Trunk` """ return self._update(_trunk.Trunk, trunk, **attrs) def add_trunk_subports(self, trunk, subports): """Set sub_ports on trunk :param trunk: The value can be the ID of a trunk or a :class:`~openstack.network.v2.trunk.Trunk` instance. :param subports: New subports to be set. :type subports: "list" :returns: The updated trunk :rtype: :class:`~openstack.network.v2.trunk.Trunk` """ trunk = self._get_resource(_trunk.Trunk, trunk) return trunk.add_subports(self, subports) def delete_trunk_subports(self, trunk, subports): """Remove sub_ports from trunk :param trunk: The value can be the ID of a trunk or a :class:`~openstack.network.v2.trunk.Trunk` instance. :param subports: Subports to be removed. :type subports: "list" :returns: The updated trunk :rtype: :class:`~openstack.network.v2.trunk.Trunk` """ trunk = self._get_resource(_trunk.Trunk, trunk) return trunk.delete_subports(self, subports) def get_trunk_subports(self, trunk): """Get sub_ports configured on trunk :param trunk: The value can be the ID of a trunk or a :class:`~openstack.network.v2.trunk.Trunk` instance. :returns: Trunk sub_ports :rtype: "list" """ trunk = self._get_resource(_trunk.Trunk, trunk) return trunk.get_subports(self) # ========== VPNaas ========== # ========== VPN Endpoint group ========== def create_vpn_endpoint_group(self, **attrs): """Create a new vpn endpoint group from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup`, comprised of the properties on the VpnEndpointGroup class. :returns: The results of vpn endpoint group creation. :rtype: :class:`~openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup` """ return self._create(_vpn_endpoint_group.VpnEndpointGroup, **attrs) def delete_vpn_endpoint_group( self, vpn_endpoint_group, ignore_missing=True ): """Delete a vpn service :param vpn_endpoint_group: The value can be either the ID of a vpn service or a :class:`~openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the vpn service does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent vpn service. :returns: ``None`` """ self._delete( _vpn_endpoint_group.VpnEndpointGroup, vpn_endpoint_group, ignore_missing=ignore_missing, ) def find_vpn_endpoint_group( self, name_or_id, ignore_missing=True, **query ): """Find a single vpn service :param name_or_id: The name or ID of a vpn service. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup` or None """ return self._find( _vpn_endpoint_group.VpnEndpointGroup, name_or_id, ignore_missing=ignore_missing, **query, ) def get_vpn_endpoint_group(self, vpn_endpoint_group): """Get a single vpn service :param vpn_endpoint_group: The value can be the ID of a vpn service or a :class:`~openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup` instance. :returns: One :class:`~openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _vpn_endpoint_group.VpnEndpointGroup, vpn_endpoint_group ) def vpn_endpoint_groups(self, **query): """Return a generator of vpn services :param dict query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of vpn service objects :rtype: :class:`~openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup` """ return self._list(_vpn_endpoint_group.VpnEndpointGroup, **query) def update_vpn_endpoint_group(self, vpn_endpoint_group, **attrs): """Update a vpn service :param vpn_endpoint_group: Either the id of a vpn service or a :class:`~openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup` instance. :param attrs: The attributes to update on the VPN service represented by ``vpn_endpoint_group``. :returns: The updated vpnservice :rtype: :class:`~openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup` """ return self._update( _vpn_endpoint_group.VpnEndpointGroup, vpn_endpoint_group, **attrs ) # ========== IPsec Site Connection ========== def create_vpn_ipsec_site_connection(self, **attrs): """Create a new IPsec site connection from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection`, comprised of the properties on the IPSecSiteConnection class. :returns: The results of IPsec site connection creation :rtype: :class:`~openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection` """ return self._create( _ipsec_site_connection.VpnIPSecSiteConnection, **attrs ) def find_vpn_ipsec_site_connection( self, name_or_id, ignore_missing=True, **query ): """Find a single IPsec site connection :param name_or_id: The name or ID of an IPsec site connection. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods such as query filters. :returns: One :class:`~openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection` or None """ return self._find( _ipsec_site_connection.VpnIPSecSiteConnection, name_or_id, ignore_missing=ignore_missing, **query, ) def get_vpn_ipsec_site_connection(self, ipsec_site_connection): """Get a single IPsec site connection :param ipsec_site_connection: The value can be the ID of an IPsec site connection or a :class:`~openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection` instance. :returns: One :class:`~openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _ipsec_site_connection.VpnIPSecSiteConnection, ipsec_site_connection, ) def vpn_ipsec_site_connections(self, **query): """Return a generator of IPsec site connections :param dict query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of IPsec site connection objects :rtype: :class:`~openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection` """ return self._list( _ipsec_site_connection.VpnIPSecSiteConnection, **query ) def update_vpn_ipsec_site_connection(self, ipsec_site_connection, **attrs): """Update a IPsec site connection :ipsec_site_connection: Either the id of an IPsec site connection or a :class:`~openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection` instance. :param attrs: The attributes to update on the IPsec site connection represented by ``ipsec_site_connection``. :returns: The updated IPsec site connection :rtype: :class:`~openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection` """ return self._update( _ipsec_site_connection.VpnIPSecSiteConnection, ipsec_site_connection, **attrs, ) def delete_vpn_ipsec_site_connection( self, ipsec_site_connection, ignore_missing=True ): """Delete a IPsec site connection :param ipsec_site_connection: The value can be either the ID of an IPsec site connection, or a :class:`~openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the IPsec site connection does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent IPsec site connection. :returns: ``None`` """ self._delete( _ipsec_site_connection.VpnIPSecSiteConnection, ipsec_site_connection, ignore_missing=ignore_missing, ) # ========== IKEPolicy ========== def create_vpn_ike_policy(self, **attrs): """Create a new ike policy from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.vpn_ike_policy.VpnIkePolicy`, comprised of the properties on the VpnIkePolicy class. :returns: The results of ike policy creation :rtype: :class:`~openstack.network.v2.vpn_ike_policy.VpnIkePolicy` """ return self._create(_ike_policy.VpnIkePolicy, **attrs) def find_vpn_ike_policy(self, name_or_id, ignore_missing=True, **query): """Find a single ike policy :param name_or_id: The name or ID of an IKE policy. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods such as query filters. :returns: One :class:`~openstack.network.v2.vpn_ike_policy.VpnIkePolicy` or None. """ return self._find( _ike_policy.VpnIkePolicy, name_or_id, ignore_missing=ignore_missing, **query, ) def get_vpn_ike_policy(self, ike_policy): """Get a single ike policy :param ike_policy: The value can be the ID of an IKE policy or a :class:`~openstack.network.v2.vpn_ike_policy.VpnIkePolicy` instance. :returns: One :class:`~openstack.network.v2.vpn_ike_policy.VpnIkePolicy` :rtype: :class:`~openstack.network.v2.ike_policy.VpnIkePolicy` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_ike_policy.VpnIkePolicy, ike_policy) def vpn_ike_policies(self, **query): """Return a generator of IKE policies :param dict query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of ike policy objects :rtype: :class:`~openstack.network.v2.vpn_ike_policy.VpnIkePolicy` """ return self._list(_ike_policy.VpnIkePolicy, **query) def update_vpn_ike_policy(self, ike_policy, **attrs): """Update an IKE policy :ike_policy: Either the IK of an IKE policy or a :class:`~openstack.network.v2.vpn_ike_policy.VpnIkePolicy` instance. :param attrs: The attributes to update on the ike policy represented by ``ike_policy``. :returns: The updated ike policy :rtype: :class:`~openstack.network.v2.vpn_ike_policy.VpnIkePolicy` """ return self._update(_ike_policy.VpnIkePolicy, ike_policy, **attrs) def delete_vpn_ike_policy(self, ike_policy, ignore_missing=True): """Delete an IKE policy :param ike_policy: The value can be either the ID of an ike policy, or a :class:`~openstack.network.v2.vpn_ike_policy.VpnIkePolicy` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the ike policy does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent ike policy. :returns: ``None`` """ self._delete( _ike_policy.VpnIkePolicy, ike_policy, ignore_missing=ignore_missing ) # ========== IPSecPolicy ========== def create_vpn_ipsec_policy(self, **attrs): """Create a new IPsec policy from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy`, comprised of the properties on the VpnIpsecPolicy class. :returns: The results of IPsec policy creation :rtype: :class:`~openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy` """ return self._create(_ipsec_policy.VpnIpsecPolicy, **attrs) def find_vpn_ipsec_policy(self, name_or_id, ignore_missing=True, **query): """Find a single IPsec policy :param name_or_id: The name or ID of an IPsec policy. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods such as query filters. :returns: One :class:`~openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy` or None. """ return self._find( _ipsec_policy.VpnIpsecPolicy, name_or_id, ignore_missing=ignore_missing, **query, ) def get_vpn_ipsec_policy(self, ipsec_policy): """Get a single IPsec policy :param ipsec_policy: The value can be the ID of an IPcec policy or a :class:`~openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy` instance. :returns: One :class:`~openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy` :rtype: :class:`~openstack.network.v2.ipsec_policy.VpnIpsecPolicy` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_ipsec_policy.VpnIpsecPolicy, ipsec_policy) def vpn_ipsec_policies(self, **query): """Return a generator of IPsec policies :param dict query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of IPsec policy objects :rtype: :class:`~openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy` """ return self._list(_ipsec_policy.VpnIpsecPolicy, **query) def update_vpn_ipsec_policy(self, ipsec_policy, **attrs): """Update an IPsec policy :ipsec_policy: Either the id of an IPsec policy or a :class:`~openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy` instance. :param attrs: The attributes to update on the IPsec policy represented by ``ipsec_policy``. :returns: The updated IPsec policy :rtype: :class:`~openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy` """ return self._update( _ipsec_policy.VpnIpsecPolicy, ipsec_policy, **attrs ) def delete_vpn_ipsec_policy(self, ipsec_policy, ignore_missing=True): """Delete an IPsec policy :param ipsec_policy: The value can be either the ID of an IPsec policy, or a :class:`~openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the IPsec policy does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent IPsec policy. :returns: ``None`` """ self._delete( _ipsec_policy.VpnIpsecPolicy, ipsec_policy, ignore_missing=ignore_missing, ) # ========== VPN Service ========== def create_vpn_service(self, **attrs): """Create a new vpn service from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.vpn_service.VpnService`, comprised of the properties on the VpnService class. :returns: The results of vpn service creation :rtype: :class:`~openstack.network.v2.vpn_service.VpnService` """ return self._create(_vpn_service.VpnService, **attrs) def delete_vpn_service(self, vpn_service, ignore_missing=True): """Delete a vpn service :param vpn_service: The value can be either the ID of a vpn service or a :class:`~openstack.network.v2.vpn_service.VpnService` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the vpn service does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent vpn service. :returns: ``None`` """ self._delete( _vpn_service.VpnService, vpn_service, ignore_missing=ignore_missing ) def find_vpn_service(self, name_or_id, ignore_missing=True, **query): """Find a single vpn service :param name_or_id: The name or ID of a vpn service. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.vpn_service.VpnService` or None """ return self._find( _vpn_service.VpnService, name_or_id, ignore_missing=ignore_missing, **query, ) def get_vpn_service(self, vpn_service): """Get a single vpn service :param vpn_service: The value can be the ID of a vpn service or a :class:`~openstack.network.v2.vpn_service.VpnService` instance. :returns: One :class:`~openstack.network.v2.vpn_service.VpnService` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_vpn_service.VpnService, vpn_service) def vpn_services(self, **query): """Return a generator of vpn services :param dict query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of vpn service objects :rtype: :class:`~openstack.network.v2.vpn_service.VpnService` """ return self._list(_vpn_service.VpnService, **query) def update_vpn_service(self, vpn_service, **attrs): """Update a vpn service :param vpn_service: Either the id of a vpn service or a :class:`~openstack.network.v2.vpn_service.VpnService` instance. :param attrs: The attributes to update on the VPN service represented by ``vpn_service``. :returns: The updated vpnservice :rtype: :class:`~openstack.network.v2.vpn_service.VpnService` """ return self._update(_vpn_service.VpnService, vpn_service, **attrs) def create_floating_ip_port_forwarding(self, floating_ip, **attrs): """Create a new floating ip port forwarding from attributes :param floating_ip: The value can be either the ID of a floating ip or a :class:`~openstack.network.v2.floating_ip.FloatingIP` instance. :param attrs:Keyword arguments which will be used to create a:class:`~openstack.network.v2.port_forwarding.PortForwarding`, comprised of the properties on the PortForwarding class. :returns: The results of port forwarding creation :rtype: :class:`~openstack.network.v2.port_forwarding.PortForwarding` """ floatingip = self._get_resource(_floating_ip.FloatingIP, floating_ip) return self._create( _port_forwarding.PortForwarding, floatingip_id=floatingip.id, **attrs, ) def delete_floating_ip_port_forwarding( self, floating_ip, port_forwarding, ignore_missing=True ): """Delete a floating IP port forwarding. :param floating_ip: The value can be either the ID of a floating ip or a :class:`~openstack.network.v2.floating_ip.FloatingIP` instance. :param port_forwarding: The value can be either the ID of a port forwarding or a :class:`~openstack.network.v2.port_forwarding.PortForwarding` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the floating ip does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent ip. :returns: ``None`` """ floatingip = self._get_resource(_floating_ip.FloatingIP, floating_ip) self._delete( _port_forwarding.PortForwarding, port_forwarding, ignore_missing=ignore_missing, floatingip_id=floatingip.id, ) def find_floating_ip_port_forwarding( self, floating_ip, port_forwarding_id, ignore_missing=True, **query ): """Find a floating ip port forwarding :param floating_ip: The value can be the ID of the Floating IP that the port forwarding belongs or a :class:`~openstack.network.v2.floating_ip.FloatingIP` instance. :param port_forwarding_id: The ID of a port forwarding. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.port_forwarding.PortForwarding` or None """ floatingip = self._get_resource(_floating_ip.FloatingIP, floating_ip) return self._find( _port_forwarding.PortForwarding, port_forwarding_id, ignore_missing=ignore_missing, floatingip_id=floatingip.id, **query, ) def get_floating_ip_port_forwarding(self, floating_ip, port_forwarding): """Get a floating ip port forwarding :param floating_ip: The value can be the ID of the Floating IP that the port forwarding belongs or a :class:`~openstack.network.v2.floating_ip.FloatingIP` instance. :param port_forwarding: The value can be the ID of a port forwarding or a :class:`~openstack.network.v2.port_forwarding.PortForwarding` instance. :returns: One :class:`~openstack.network.v2.port_forwarding.PortForwarding` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ floatingip = self._get_resource(_floating_ip.FloatingIP, floating_ip) return self._get( _port_forwarding.PortForwarding, port_forwarding, floatingip_id=floatingip.id, ) def floating_ip_port_forwardings(self, floating_ip, **query): """Return a generator of floating ip port forwarding :param floating_ip: The value can be the ID of the Floating IP that the port forwarding belongs or a :class:`~openstack.network.v2.floating_ip.FloatingIP` instance. :param kwargs **query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of floating ip port forwarding objects :rtype: :class:`~openstack.network.v2.port_forwarding.PortForwarding` """ floatingip = self._get_resource(_floating_ip.FloatingIP, floating_ip) return self._list( _port_forwarding.PortForwarding, floatingip_id=floatingip.id, **query, ) def update_floating_ip_port_forwarding( self, floating_ip, port_forwarding, **attrs ): """Update a floating ip port forwarding :param floating_ip: The value can be the ID of the Floating IP that the port forwarding belongs or a :class:`~openstack.network.v2.floating_ip.FloatingIP` instance. :param port_forwarding: Either the id of a floating ip port forwarding or a :class:`~openstack.network.v2.port_forwarding.PortForwarding`instance. :param attrs: The attributes to update on the floating ip port forwarding represented by ``floating_ip``. :returns: The updated floating ip port forwarding :rtype: :class:`~openstack.network.v2.port_forwarding.PortForwarding` """ floatingip = self._get_resource(_floating_ip.FloatingIP, floating_ip) return self._update( _port_forwarding.PortForwarding, port_forwarding, floatingip_id=floatingip.id, **attrs, ) def create_conntrack_helper(self, router, **attrs): """Create a new L3 conntrack helper from attributes :param router: Either the router ID or an instance of :class:`~openstack.network.v2.router.Router` :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.l3_conntrack_helper.ConntrackHelper`, comprised of the properties on the ConntrackHelper class. :returns: The results of conntrack helper creation :rtype: :class:`~openstack.network.v2.l3_conntrack_helper.ConntrackHelper` """ router = self._get_resource(_router.Router, router) return self._create( _l3_conntrack_helper.ConntrackHelper, router_id=router.id, **attrs ) def conntrack_helpers(self, router, **query): """Return a generator of conntrack helpers :param router: Either the router ID or an instance of :class:`~openstack.network.v2.router.Router` :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of conntrack helper objects :rtype: :class:`~openstack.network.v2.l3_conntrack_helper.ConntrackHelper` """ router = self._get_resource(_router.Router, router) return self._list( _l3_conntrack_helper.ConntrackHelper, router_id=router.id, **query ) def get_conntrack_helper(self, conntrack_helper, router): """Get a single L3 conntrack helper :param conntrack_helper: The value can be the ID of a L3 conntrack helper or a :class:`~openstack.network.v2.l3_conntrack_helper.ConntrackHelper`, instance. :param router: The value can be the ID of a Router or a :class:`~openstack.network.v2.router.Router` instance. :returns: One :class:`~openstack.network.v2.l3_conntrack_helper.ConntrackHelper` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ router = self._get_resource(_router.Router, router) return self._get( _l3_conntrack_helper.ConntrackHelper, conntrack_helper, router_id=router.id, ) def update_conntrack_helper(self, conntrack_helper, router, **attrs): """Update a L3 conntrack_helper :param conntrack_helper: The value can be the ID of a L3 conntrack helper or a :class:`~openstack.network.v2.l3_conntrack_helper.ConntrackHelper`, instance. :param router: The value can be the ID of a Router or a :class:`~openstack.network.v2.router.Router` instance. :param attrs: The attributes to update on the L3 conntrack helper represented by ``conntrack_helper``. :returns: The updated conntrack helper :rtype: :class:`~openstack.network.v2.l3_conntrack_helper.ConntrackHelper` """ router = self._get_resource(_router.Router, router) return self._update( _l3_conntrack_helper.ConntrackHelper, conntrack_helper, router_id=router.id, **attrs, ) def delete_conntrack_helper( self, conntrack_helper, router, ignore_missing=True ): """Delete a L3 conntrack_helper :param conntrack_helper: The value can be the ID of a L3 conntrack helper or a :class:`~openstack.network.v2.l3_conntrack_helper.ConntrackHelper`, instance. :param router: The value can be the ID of a Router or a :class:`~openstack.network.v2.router.Router` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the floating ip does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent ip. :returns: ``None`` """ router = self._get_resource(_router.Router, router) self._delete( _l3_conntrack_helper.ConntrackHelper, conntrack_helper, router_id=router.id, ignore_missing=ignore_missing, ) def create_tap_flow(self, **attrs): """Create a new Tap Flow from attributes""" return self._create(_tap_flow.TapFlow, **attrs) def delete_tap_flow(self, tap_flow, ignore_missing=True): """Delete a Tap Flow""" self._delete( _tap_flow.TapFlow, tap_flow, ignore_missing=ignore_missing ) def find_tap_flow(self, name_or_id, ignore_missing=True, **query): """Find a single Tap Service""" return self._find( _tap_flow.TapFlow, name_or_id, ignore_missing=ignore_missing, **query, ) def get_tap_flow(self, tap_flow): """Get a signle Tap Flow""" return self._get(_tap_flow.TapFlow, tap_flow) def update_tap_flow(self, tap_flow, **attrs): """Update a Tap Flow""" return self._update(_tap_flow.TapFlow, tap_flow, **attrs) def tap_flows(self, **query): """Return a generator of Tap Flows""" return self._list(_tap_flow.TapFlow, **query) def create_tap_mirror(self, **attrs): """Create a new Tap Mirror from attributes""" return self._create(_tap_mirror.TapMirror, **attrs) def delete_tap_mirror(self, tap_mirror, ignore_missing=True): """Delete a Tap Mirror""" self._delete( _tap_mirror.TapMirror, tap_mirror, ignore_missing=ignore_missing ) def find_tap_mirror(self, name_or_id, ignore_missing=True, **query): """Find a single Tap Mirror""" return self._find( _tap_mirror.TapMirror, name_or_id, ignore_missing=ignore_missing, **query, ) def get_tap_mirror(self, tap_mirror): """Get a signle Tap Mirror""" return self._get(_tap_mirror.TapMirror, tap_mirror) def update_tap_mirror(self, tap_mirror, **attrs): """Update a Tap Mirror""" return self._update(_tap_mirror.TapMirror, tap_mirror, **attrs) def tap_mirrors(self, **query): """Return a generator of Tap Mirrors""" return self._list(_tap_mirror.TapMirror, **query) def create_tap_service(self, **attrs): """Create a new Tap Service from attributes""" return self._create(_tap_service.TapService, **attrs) def delete_tap_service(self, tap_service, ignore_missing=True): """Delete a Tap Service""" self._delete( _tap_service.TapService, tap_service, ignore_missing=ignore_missing ) def find_tap_service(self, name_or_id, ignore_missing=True, **query): """Find a single Tap Service""" return self._find( _tap_service.TapService, name_or_id, ignore_missing=ignore_missing, **query, ) def get_tap_service(self, tap_service): """Get a signle Tap Service""" return self._get(_tap_service.TapService, tap_service) def update_tap_service(self, tap_service, **attrs): """Update a Tap Service""" return self._update(_tap_service.TapService, tap_service, **attrs) def tap_services(self, **query): """Return a generator of Tap Services""" return self._list(_tap_service.TapService, **query) def create_sfc_flow_classifier(self, **attrs): """Create a new Flow Classifier from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier`, comprised of the properties on the SfcFlowClassifier class. :returns: The results of SFC Flow Classifier creation :rtype: :class:`~openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier` """ return self._create(_sfc_flow_classifier.SfcFlowClassifier, **attrs) def delete_sfc_flow_classifier(self, flow_classifier, ignore_missing=True): """Delete a Flow Classifier :param flow_classifier: The value can be either the ID of a flow classifier or a :class:`~openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the flow classifier does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent flow classifier. :returns: ``None`` """ self._delete( _sfc_flow_classifier.SfcFlowClassifier, flow_classifier, ignore_missing=ignore_missing, ) def find_sfc_flow_classifier( self, name_or_id, ignore_missing=True, **query ): """Find a single Flow Classifier :param str name_or_id: The name or ID of an SFC flow classifier. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.sfc_flow_classifier. SfcFlowClassifier` or None """ return self._find( _sfc_flow_classifier.SfcFlowClassifier, name_or_id, ignore_missing=ignore_missing, **query, ) def get_sfc_flow_classifier(self, flow_classifier): """Get a single Flow Classifier :param flow_classifier: The value can be the ID of an SFC flow classifier or a :class:`~openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier` instance. :returns: :class:`~openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _sfc_flow_classifier.SfcFlowClassifier, flow_classifier ) def update_sfc_flow_classifier(self, flow_classifier, **attrs): """Update a Flow Classifier :param flow_classifier: The value can be the ID of a Flow Classifier :class:`~openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier`, instance. :param attrs: The attributes to update on the Flow Classifier :returns: The updated Flow Classifier. :rtype: :class:`~openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier` """ return self._update( _sfc_flow_classifier.SfcFlowClassifier, flow_classifier, **attrs ) def sfc_flow_classifiers(self, **query): """Return a generator of Flow Classifiers :param kwargs query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * ``name``: The name of the flow classifier. * ``description``: The flow classifier description * ``ethertype``: Must be IPv4 or IPv6. * ``protocol``: Flow classifier protocol :returns: A generator of SFC Flow classifier objects :rtype: :class:`~openstack.network.v2.sfc_flow_classifier. SfcFlowClassifier` """ return self._list(_sfc_flow_classifier.SfcFlowClassifier, **query) def create_sfc_port_chain(self, **attrs): """Create a new Port Chain from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.sfc_port_chain.SfcPortChain`, comprised of the properties on the SfcPortchain class. :returns: The results of SFC Port Chain creation :rtype: :class:`~openstack.network.v2.sfc_port_chain.SfcPortChain` """ return self._create(_sfc_port_chain.SfcPortChain, **attrs) def delete_sfc_port_chain(self, port_chain, ignore_missing=True): """Delete a Port Chain :param port_chain: The value can be either the ID of a port chain or a :class:`~openstack.network.v2.sfc_port_chain.SfcPortChain` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the port chain does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent port chain. :returns: ``None`` """ self._delete( _sfc_port_chain.SfcPortChain, port_chain, ignore_missing=ignore_missing, ) def find_sfc_port_chain(self, name_or_id, ignore_missing=True, **query): """Find a single Port Chain :param str name_or_id: The name or ID of an SFC port chain. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.sfc_port_chain. SfcPortChain` or None """ return self._find( _sfc_port_chain.SfcPortChain, name_or_id, ignore_missing=ignore_missing, **query, ) def get_sfc_port_chain(self, port_chain): """Get a signle Port Chain :param port_chain: The value can be the ID of an SFC port chain or a :class:`~openstack.network.v2.sfc_port_chain.SfcPortChain` instance. :returns: :class:`~openstack.network.v2.sfc_port_chain.SfcPortchain` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_sfc_port_chain.SfcPortChain, port_chain) def update_sfc_port_chain(self, port_chain, **attrs): """Update a Port Chain :param flow_classifier: The value can be the ID of a Flow Classifier :class:`~openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier`, instance. :param attrs: The attributes to update on the Flow Classifier :returns: The updated Flow Classifier. :rtype: :class:`~openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier` """ return self._update(_sfc_port_chain.SfcPortChain, port_chain, **attrs) def sfc_port_chains(self, **query): """Return a generator of Port Chains :param kwargs query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * ``name``: The name of the port chain * ``description``: The port chain description :returns: A generator of SFC port chain objects :rtype: :class:`~openstack.network.v2.sfc_port_chain.SfcPortChain` """ return self._list(_sfc_port_chain.SfcPortChain, **query) def create_sfc_port_pair(self, **attrs): """Create a new Port Pair from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.sfc_port_pair.SfcPortPair`, comprised of the properties on the SfcPortPair class. :returns: The results of SFC Port Pair creation :rtype: :class:`~openstack.network.v2.sfc_port_pair.SfPortPair` """ return self._create(_sfc_port_pair.SfcPortPair, **attrs) def delete_sfc_port_pair(self, port_pair, ignore_missing=True): """Delete a Port Pair :param port_pair: The value can be either the ID of a port pair or a :class:`~openstack.network.v2.sfc_port_pair.SfcPortPair` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the port pair does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent port pair. :returns: ``None`` """ self._delete( _sfc_port_pair.SfcPortPair, port_pair, ignore_missing=ignore_missing, ) def find_sfc_port_pair(self, name_or_id, ignore_missing=True, **query): """Find a single Port Pair :param str name_or_id: The name or ID of an SFC port pair. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.sfc_port_pair.SfcPortPair` or None """ return self._find( _sfc_port_pair.SfcPortPair, name_or_id, ignore_missing=ignore_missing, **query, ) def get_sfc_port_pair(self, port_pair): """Get a signle Port Pair :param port_pair: The value can be the ID of an SFC port pair or a :class:`~openstack.network.v2.sfc_port_pair.SfcPortPair` instance. :returns: :class:`~openstack.network.v2.sfc_port_pair.SfcPortPair` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_sfc_port_pair.SfcPortPair, port_pair) def update_sfc_port_pair(self, port_pair, **attrs): """Update a Port Pair :param port_pair: The value can be the ID of a Port Pair :class:`~openstack.network.v2.sfc_port_pair.SfcPortPair`, instance. :param attrs: The attributes to update on the Port Pair :returns: The updated Port Pair. :rtype: :class:`~openstack.network.v2.sfc_port_pair.SfcPortPair` """ return self._update(_sfc_port_pair.SfcPortPair, port_pair, **attrs) def sfc_port_pairs(self, **query): """Return a generator of Port Pairs :param kwargs query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * ``name``: The name of the port pair. * ``description``: The port pair description. :returns: A generator of SFC port pair objects :rtype: :class:`~openstack.network.v2.sfc_port_pair.SfcPortPair` """ return self._list(_sfc_port_pair.SfcPortPair, **query) def create_sfc_port_pair_group(self, **attrs): """Create a new Port Pair Group from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.sfc_port_pair_group.SfcPortPairGroup`, comprised of the properties on the SfcPortPairGroup class. :returns: The results of SFC Port Pair Group creation :rtype: :class:`~openstack.network.v2.sfc_port_pair_group.SfcPortPairGroup` """ return self._create(_sfc_port_pair_group.SfcPortPairGroup, **attrs) def delete_sfc_port_pair_group(self, port_pair_group, ignore_missing=True): """Delete a Port Pair Group :param port_pair_group: The value can be either the ID of a port pair group or a :class:`~openstack.network.v2.sfc_port_pair_group. SfcPortPairGroup` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the port pair group does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent port pair group. :returns: ``None`` """ self._delete( _sfc_port_pair_group.SfcPortPairGroup, port_pair_group, ignore_missing=ignore_missing, ) def find_sfc_port_pair_group( self, name_or_id, ignore_missing=True, **query ): """Find a single Port Pair Group :param str name_or_id: The name or ID of an SFC port pair group. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.sfc_port_pair_group. SfcPortPairGroup` or None """ return self._find( _sfc_port_pair_group.SfcPortPairGroup, name_or_id, ignore_missing=ignore_missing, **query, ) def get_sfc_port_pair_group(self, port_pair_group): """Get a signle Port Pair Group :param port_pair_group: The value can be the ID of an SFC port pair group or a :class:`~openstack.network.v2.sfc_port_pair_group.SfcPortPairGroup` instance. :returns: :class:`~openstack.network.v2.sfc_port_pair_group.SfcPortPairGroup` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get( _sfc_port_pair_group.SfcPortPairGroup, port_pair_group ) def update_sfc_port_pair_group(self, port_pair_group, **attrs): """Update a Port Pair Group :param port_pair_group: The value can be the ID of a Port Pair Group :class:`~openstack.network.v2.sfc_port_pair.SfcPortPairGroup`, instance. :param attrs: The attributes to update on the Port Pair Group :returns: The updated Port Pair Group. :rtype: :class:`~openstack.network.v2.sfc_port_pair_group.SfcPortPairGroup` """ return self._update( _sfc_port_pair_group.SfcPortPairGroup, port_pair_group, **attrs ) def sfc_port_pair_groups(self, **query): """Return a generator of Port Pair Groups :param kwargs query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * ``name``: The name of the port pair. * ``description``: The port pair description. :returns: A generator of SFC port pair group objects :rtype: :class:`~openstack.network.v2.sfc_port_pair_group. SfcPortPairGroup` """ return self._list(_sfc_port_pair_group.SfcPortPairGroup, **query) def create_sfc_service_graph(self, **attrs): """Create a new Service Graph from attributes :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.sfc_service_graph.SfcServiceGraph`, comprised of the properties on the SfcServiceGraph class. :returns: The results of SFC Service Graph creation :rtype: :class:`~openstack.network.v2.sfc_service_graph.SfcServiceGraph` """ return self._create(_sfc_sservice_graph.SfcServiceGraph, **attrs) def delete_sfc_service_graph(self, service_graph, ignore_missing=True): """Delete a Service Graph :param service_graph: The value can be either the ID of a service graph or a :class:`~openstack.network.v2.sfc_service_graph.SfcServiceGraph` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the service graph does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent service graph. :returns: ``None`` """ self._delete( _sfc_sservice_graph.SfcServiceGraph, service_graph, ignore_missing=ignore_missing, ) def find_sfc_service_graph(self, name_or_id, ignore_missing=True, **query): """Find a single Service Graph :param str name_or_id: The name or ID of an SFC service graph. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.sfc_service_graph. SfcServiceGraph` or None """ return self._find( _sfc_sservice_graph.SfcServiceGraph, name_or_id, ignore_missing=ignore_missing, **query, ) def get_sfc_service_graph(self, service_graph): """Get a signle Service Graph :param service_graph: The value can be the ID of an SFC service graph or a :class:`~openstack.network.v2.sfc_service_graph.SfcServiceGraph` instance. :returns: :class:`~openstack.network.v2.sfc_service_graph.SfcServiceGraph` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_sfc_sservice_graph.SfcServiceGraph, service_graph) def update_sfc_service_graph(self, service_graph, **attrs): """Update a Service Graph :param service_graph: The value can be the ID of a Service Graph :class:`~openstack.network.v2.sfc_service_graph.SfcServiceGraph`, instance. :param attrs: The attributes to update on the Service Graph :returns: The updated Service Graph. :rtype: :class:`~openstack.network.v2.sfc_service_graph.SfcServiceGraph` """ return self._update( _sfc_sservice_graph.SfcServiceGraph, service_graph, **attrs ) def sfc_service_graphs(self, **query): """Return a generator of Service Graphs :param kwargs query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * ``name``: The name of the port pair. * ``description``: The port pair description. :returns: A generator of SFC service graph objects :rtype: :class:`~openstack.network.v2.sfc_service_graph.SfcServiceGraph` """ return self._list(_sfc_sservice_graph.SfcServiceGraph, **query) def _get_cleanup_dependencies(self): return {'network': {'before': ['identity']}} def _service_cleanup( self, dry_run=True, client_status_queue=None, identified_resources=None, filters=None, resource_evaluation_fn=None, skip_resources=None, ): project_id = self.get_project_id() # check if the VPN service plugin is configured vpn_plugin = list(self.service_providers(service_type="VPN")) if vpn_plugin: if not self.should_skip_resource_cleanup( "vpn_ipsec_site_connection", skip_resources ): for obj in self.vpn_ipsec_site_connections(): self._service_cleanup_del_res( self.delete_vpn_ipsec_site_connection, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) if not self.should_skip_resource_cleanup( "vpn_service", skip_resources ): for obj in self.vpn_services(): self._service_cleanup_del_res( self.delete_vpn_service, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) if not self.should_skip_resource_cleanup( "vpn_endpoint_group", skip_resources ): for obj in self.vpn_endpoint_groups(): self._service_cleanup_del_res( self.delete_vpn_endpoint_group, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) if not self.should_skip_resource_cleanup( "vpn_ike_policy", skip_resources ): for obj in self.vpn_ike_policies(): self._service_cleanup_del_res( self.delete_vpn_ike_policy, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) if not self.should_skip_resource_cleanup( "vpn_ipsec_policy", skip_resources ): for obj in self.vpn_ipsec_policies(): self._service_cleanup_del_res( self.delete_vpn_ipsec_policy, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) if not self.should_skip_resource_cleanup( "floating_ip", skip_resources ): # Delete floating_ips in the project if no filters defined OR all # filters are matching and port_id is empty for obj in self.ips(project_id=project_id): self._service_cleanup_del_res( self.delete_ip, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=fip_cleanup_evaluation, ) if not self.should_skip_resource_cleanup( "security_group", skip_resources ): # Delete (try to delete) all security groups in the project # Let's hope we can't drop SG in use for obj in self.security_groups(project_id=project_id): if obj.name != 'default': self._service_cleanup_del_res( self.delete_security_group, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) if not ( self.should_skip_resource_cleanup("network", skip_resources) or self.should_skip_resource_cleanup("router", skip_resources) or self.should_skip_resource_cleanup("port", skip_resources) or self.should_skip_resource_cleanup("subnet", skip_resources) ): # Networks are crazy, try to delete router+net+subnet # if there are no "other" ports allocated on the net for net in self.networks(project_id=project_id): network_has_ports_allocated = False router_if = list() for port in self.ports( project_id=project_id, network_id=net.id ): self.log.debug('Looking at port %s' % port) if port.device_owner in [ 'network:router_interface', 'network:router_interface_distributed', 'network:ha_router_replicated_interface', ]: router_if.append(port) elif port.device_owner == 'network:dhcp': # we don't treat DHCP as a real port continue elif port.device_owner is None or port.device_owner == '': # Nobody owns the port - go with it continue elif ( identified_resources and port.device_id not in identified_resources ): # It seems some no other service identified this resource # to be deleted. We can assume it doesn't count network_has_ports_allocated = True if network_has_ports_allocated: # If some ports are on net - we cannot delete it continue self.log.debug('Network %s should be deleted' % net) # __Check__ if we need to drop network according to filters network_must_be_deleted = self._service_cleanup_del_res( self.delete_network, net, dry_run=True, client_status_queue=None, identified_resources=None, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) if not network_must_be_deleted: # If not - check another net continue # otherwise disconnect router, drop net, subnet, router # Disconnect for port in router_if: if client_status_queue: client_status_queue.put(port) router = self.get_router(port.device_id) if not dry_run: # Router interfaces cannot be deleted when the router has # static routes, so remove those first if len(router.routes) > 0: try: self.remove_extra_routes_from_router( router, {"router": {"routes": router.routes}}, ) except exceptions.SDKException: self.log.error( f"Cannot delete routes {router.routes} from router {router}" ) try: self.remove_interface_from_router( router=port.device_id, port_id=port.id ) except exceptions.SDKException: self.log.error('Cannot delete object %s' % obj) # router disconnected, drop it self._service_cleanup_del_res( self.delete_router, router, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=None, resource_evaluation_fn=None, ) # Drop ports not belonging to anybody for port in self.ports( project_id=project_id, network_id=net.id ): if port.device_owner is None or port.device_owner == '': self._service_cleanup_del_res( self.delete_port, port, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=None, resource_evaluation_fn=None, ) # Drop all subnets in the net (no further conditions) for obj in self.subnets( project_id=project_id, network_id=net.id ): self._service_cleanup_del_res( self.delete_subnet, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=None, resource_evaluation_fn=None, ) # And now the network itself (we are here definitely only if we # need that) self._service_cleanup_del_res( self.delete_network, net, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=None, resource_evaluation_fn=None, ) else: self.log.debug( "Skipping cleanup of networks, routers, ports and subnets " "as those resources require all of them to be cleaned up" "together, but at least one should be kept" ) if not self.should_skip_resource_cleanup("router", skip_resources): # It might happen, that we have routers not attached to anything for obj in self.routers(): ports = list(self.ports(device_id=obj.id)) if len(ports) == 0: self._service_cleanup_del_res( self.delete_router, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=None, resource_evaluation_fn=None, ) def fip_cleanup_evaluation(obj, identified_resources=None, filters=None): """Determine whether Floating IP should be deleted :param Resource obj: Floating IP object :param dict identified_resources: Optional dictionary with resources identified by other services for deletion. :param dict filters: dictionary with parameters """ if filters is not None and ( obj.port_id is not None and identified_resources and obj.port_id not in identified_resources ): # If filters are set, but port is not empty and will not be empty - # skip return False else: return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/address_group.py0000664000175000017500000000610600000000000023454 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class AddressGroup(resource.Resource): """Address group extension.""" resource_key = 'address_group' resources_key = 'address_groups' base_path = '/address-groups' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _allow_unknown_attrs_in_body = True _query_mapping = resource.QueryParameters( 'sort_key', 'sort_dir', 'name', 'description', 'project_id', ) # Properties #: The ID of the address group. id = resource.Body('id') #: The address group name. name = resource.Body('name') #: The address group name. description = resource.Body('description') #: The ID of the project that owns the address group. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The IP addresses of the address group. addresses = resource.Body('addresses', type=list) def _put(self, session, url, body): resp = session.put(url, json=body) exceptions.raise_from_response(resp) return resp def add_addresses(self, session, addresses): """Add addresses into the address group. :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :param list addresses: The list of address strings. :returns: The response as a AddressGroup object with updated addresses :raises: :class:`~openstack.exceptions.SDKException` on error. """ url = utils.urljoin(self.base_path, self.id, 'add_addresses') resp = self._put(session, url, {'addresses': addresses}) self._translate_response(resp) return self def remove_addresses(self, session, addresses): """Remove addresses from the address group. :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :param list addresses: The list of address strings. :returns: The response as a AddressGroup object with updated addresses :raises: :class:`~openstack.exceptions.SDKException` on error. """ url = utils.urljoin(self.base_path, self.id, 'remove_addresses') resp = self._put(session, url, {'addresses': addresses}) self._translate_response(resp) return self ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/address_scope.py0000664000175000017500000000324000000000000023425 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class AddressScope(resource.Resource): """Address scope extension.""" resource_key = 'address_scope' resources_key = 'address_scopes' base_path = '/address-scopes' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'name', 'ip_version', 'project_id', 'sort_key', 'sort_dir', is_shared='shared', ) # Properties #: The address scope name. name = resource.Body('name') #: The ID of the project that owns the address scope. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The IP address family of the address scope. #: *Type: int* ip_version = resource.Body('ip_version', type=int) #: Indicates whether this address scope is shared across all projects. #: *Type: bool* is_shared = resource.Body('shared', type=bool) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/agent.py0000664000175000017500000001254500000000000021715 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class Agent(resource.Resource): """Neutron agent extension.""" resource_key = 'agent' resources_key = 'agents' base_path = '/agents' _allow_unknown_attrs_in_body = True # capabilities allow_create = False allow_fetch = True allow_commit = True allow_delete = True allow_list = True # NOTE: We skip query for JSON fields and datetime fields _query_mapping = resource.QueryParameters( 'agent_type', 'availability_zone', 'binary', 'description', 'host', 'topic', is_admin_state_up='admin_state_up', is_alive='alive', ) # Properties #: The type of network agent. agent_type = resource.Body('agent_type') #: Availability zone for the network agent. availability_zone = resource.Body('availability_zone') #: The name of the network agent's application binary. binary = resource.Body('binary') #: Network agent configuration data specific to the agent_type. configuration = resource.Body('configurations') #: Timestamp when the network agent was created. created_at = resource.Body('created_at') #: The network agent description. description = resource.Body('description') #: Timestamp when the network agent's heartbeat was last seen. last_heartbeat_at = resource.Body('heartbeat_timestamp') #: The host the agent is running on. host = resource.Body('host') #: The administrative state of the network agent, which is up #: ``True`` or down ``False``. *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: Whether or not the network agent is alive. #: *Type: bool* is_alive = resource.Body('alive', type=bool) #: Whether or not the agent is succesffully synced towards placement. #: Agents supporting the guaranteed minimum bandwidth feature share their #: resource view with neutron-server and neutron-server share this view #: with placement, resources_synced represents the success of the latter. #: The value None means no resource view synchronization to Placement was #: attempted. true / false values signify the success of the last #: synchronization attempt. #: *Type: bool* resources_synced = resource.Body('resources_synced', type=bool) #: Timestamp when the network agent was last started. started_at = resource.Body('started_at') #: The messaging queue topic the network agent subscribes to. topic = resource.Body('topic') #: The HA state of the L3 agent. This is one of 'active', 'standby' or #: 'fault' for HA routers, or None for other types of routers. ha_state = resource.Body('ha_state') def add_agent_to_network(self, session, network_id): body = {'network_id': network_id} url = utils.urljoin(self.base_path, self.id, 'dhcp-networks') resp = session.post(url, json=body) return resp.json() def remove_agent_from_network(self, session, network_id): body = {'network_id': network_id} url = utils.urljoin( self.base_path, self.id, 'dhcp-networks', network_id ) session.delete(url, json=body) def add_router_to_agent(self, session, router): body = {'router_id': router} url = utils.urljoin(self.base_path, self.id, 'l3-routers') resp = session.post(url, json=body) return resp.json() def remove_router_from_agent(self, session, router): body = {'router_id': router} url = utils.urljoin(self.base_path, self.id, 'l3-routers', router) session.delete(url, json=body) def get_bgp_speakers_hosted_by_dragent(self, session): """List BGP speakers hosted by a Dynamic Routing Agent :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` """ url = utils.urljoin(self.base_path, self.id, 'bgp-drinstances') resp = session.get(url) exceptions.raise_from_response(resp) self._body.attributes.update(resp.json()) return resp.json() class NetworkHostingDHCPAgent(Agent): resource_key = 'agent' resources_key = 'agents' resource_name = 'dhcp-agent' base_path = '/networks/%(network_id)s/dhcp-agents' # capabilities allow_create = False allow_fetch = True allow_commit = False allow_delete = False allow_list = True # NOTE: Doesn't support query yet. class RouterL3Agent(Agent): resource_key = 'agent' resources_key = 'agents' base_path = '/routers/%(router_id)s/l3-agents' resource_name = 'l3-agent' # capabilities allow_create = False allow_retrieve = True allow_commit = False allow_delete = False allow_list = True # NOTE: No query parameter is supported ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/auto_allocated_topology.py0000664000175000017500000000355700000000000025536 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class AutoAllocatedTopology(resource.Resource): resource_name = 'auto_allocated_topology' resource_key = 'auto_allocated_topology' base_path = '/auto-allocated-topology' _allow_unknown_attrs_in_body = True # Capabilities allow_create = False allow_fetch = True allow_commit = False allow_delete = True allow_list = False # NOTE: this resource doesn't support list or query # Properties #: Project ID #: If project is not specified the topology will be created #: for project user is authenticated against. #: Will return in error if resources have not been configured correctly #: To use this feature auto-allocated-topology, subnet_allocation, #: external-net and router extensions must be enabled and set up. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) class ValidateTopology(AutoAllocatedTopology): base_path = '/auto-allocated-topology/%(project)s?fields=dry-run' #: Validate requirements before running (Does not return topology) #: Will return "Deployment error:" if the resources required have not #: been correctly set up. dry_run = resource.Body('dry_run') project = resource.URI('project') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/availability_zone.py0000664000175000017500000000274000000000000024320 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource as _resource class AvailabilityZone(_resource.Resource): resource_key = 'availability_zone' resources_key = 'availability_zones' base_path = '/availability_zones' _allow_unknown_attrs_in_body = True # capabilities allow_create = False allow_fetch = False allow_commit = False allow_delete = False allow_list = True # NOTE: We don't support query by state yet because there is a mapping # at neutron side difficult to map. _query_mapping = _resource.QueryParameters( name='availability_zone', resource='agent_type', ) # Properties #: Name of the availability zone. name = _resource.Body('name') #: Type of resource for the availability zone, such as ``network``. resource = _resource.Body('resource') #: State of the availability zone, either ``available`` or #: ``unavailable``. state = _resource.Body('state') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/bgp_peer.py0000664000175000017500000000301600000000000022373 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class BgpPeer(resource.Resource): resource_key = 'bgp_peer' resources_key = 'bgp_peers' base_path = '/bgp-peers' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: The Id of the BGP Peer id = resource.Body('id') #: The BGP Peer's name. name = resource.Body('name') #: The ID of the project that owns the BGP Peer project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The authentication type for the BGP Peer, can be none or md5. #: none by default. auth_type = resource.Body('auth_type') #: The remote Autonomous System number of the BGP Peer. remote_as = resource.Body('remote_as') #: The ip address of the Peer. peer_ip = resource.Body('peer_ip') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/bgp_speaker.py0000664000175000017500000001512700000000000023100 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class BgpSpeaker(resource.Resource): resource_key = 'bgp_speaker' resources_key = 'bgp_speakers' base_path = '/bgp-speakers' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: The Id of the BGP Speaker id = resource.Body('id') #: The BGP speaker's name. name = resource.Body('name') #: The ID of the project that owns the BGP Speaker. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The IP version (4 or 6) of the BGP Speaker. ip_version = resource.Body('ip_version') #: Whether to enable or disable the advertisement of floating ip host #: routes by the BGP Speaker. True by default. advertise_floating_ip_host_routes = resource.Body( 'advertise_floating_ip_host_routes' ) #: Whether to enable or disable the advertisement of tenant network #: routes by the BGP Speaker. True by default. advertise_tenant_networks = resource.Body('advertise_tenant_networks') #: The local Autonomous System number of the BGP Speaker. local_as = resource.Body('local_as') #: The ID of the network to which the BGP Speaker is associated. networks = resource.Body('networks') def _put(self, session, url, body): resp = session.put(url, json=body) exceptions.raise_from_response(resp) return resp def add_bgp_peer(self, session, peer_id): """Add BGP Peer to a BGP Speaker :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :param peer_id: id of the peer to associate with the speaker. :returns: A dictionary as the API Reference describes it. :raises: :class:`~openstack.exceptions.SDKException` on error. """ url = utils.urljoin(self.base_path, self.id, 'add_bgp_peer') body = {'bgp_peer_id': peer_id} resp = self._put(session, url, body) return resp.json() def remove_bgp_peer(self, session, peer_id): """Remove BGP Peer from a BGP Speaker :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :param peer_id: The ID of the peer to disassociate from the speaker. :raises: :class:`~openstack.exceptions.SDKException` on error. """ url = utils.urljoin(self.base_path, self.id, 'remove_bgp_peer') body = {'bgp_peer_id': peer_id} self._put(session, url, body) def add_gateway_network(self, session, network_id): """Add Network to a BGP Speaker :param: session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :param network_id: The ID of the network to associate with the speaker :returns: A dictionary as the API Reference describes it. """ body = {'network_id': network_id} url = utils.urljoin(self.base_path, self.id, 'add_gateway_network') resp = session.put(url, json=body) return resp.json() def remove_gateway_network(self, session, network_id): """Delete Network from a BGP Speaker :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :param network_id: The ID of the network to disassociate from the speaker """ body = {'network_id': network_id} url = utils.urljoin(self.base_path, self.id, 'remove_gateway_network') session.put(url, json=body) def get_advertised_routes(self, session): """List routes advertised by a BGP Speaker :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :returns: The response as a list of routes (cidr/nexthop pair advertised by the BGP Speaker. :raises: :class:`~openstack.exceptions.SDKException` on error. """ url = utils.urljoin(self.base_path, self.id, 'get_advertised_routes') resp = session.get(url) exceptions.raise_from_response(resp) self._body.attributes.update(resp.json()) return resp.json() def get_bgp_dragents(self, session): """List Dynamic Routing Agents hosting a specific BGP Speaker :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :returns: The response as a list of dragents hosting a specific BGP Speaker. :raises: :class:`~openstack.exceptions.SDKException` on error. """ url = utils.urljoin(self.base_path, self.id, 'bgp-dragents') resp = session.get(url) exceptions.raise_from_response(resp) self._body.attributes.update(resp.json()) return resp.json() def add_bgp_speaker_to_dragent(self, session, bgp_agent_id): """Add BGP Speaker to a Dynamic Routing Agent :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :param bgp_agent_id: The id of the dynamic routing agent to which add the speaker. """ body = {'bgp_speaker_id': self.id} url = utils.urljoin('agents', bgp_agent_id, 'bgp-drinstances') session.post(url, json=body) def remove_bgp_speaker_from_dragent(self, session, bgp_agent_id): """Delete BGP Speaker from a Dynamic Routing Agent :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :param bgp_agent_id: The id of the dynamic routing agent from which remove the speaker. """ url = utils.urljoin('agents', bgp_agent_id, 'bgp-drinstances', self.id) session.delete(url) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/bgpvpn.py0000664000175000017500000000447300000000000022114 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class BgpVpn(resource.Resource): resource_key = 'bgpvpn' resources_key = 'bgpvpns' base_path = '/bgpvpn/bgpvpns' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'name', 'project_id', 'local_pref', 'vni', 'type', 'networks', 'routers', 'ports', # NOTE(seba): (route|import|export) targets only support exact matches # and have therefore been left out ) # Properties #: The Id of the BGPVPN id = resource.Body('id') #: The BGPVPN's name. name = resource.Body('name') #: The ID of the project that owns the BGPVPN project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: List of route distinguisher strings. route_distinguishers = resource.Body('route_distinguishers') #: Route Targets that will be both imported and used for export. route_targets = resource.Body('route_targets') #: Additional Route Targets that will be imported. import_targets = resource.Body('import_targets') #: Additional Route Targets that will be used for export. export_targets = resource.Body('export_targets') #: The default BGP LOCAL_PREF of routes that will be advertised to #: the BGPVPN. local_pref = resource.Body('local_pref') #: The globally-assigned VXLAN vni for the BGP VPN. vni = resource.Body('vni') #: Selection of the type of VPN and the technology behind it. #: Allowed values are l2 or l3. type = resource.Body('type') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/bgpvpn_network_association.py0000664000175000017500000000267500000000000026263 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class BgpVpnNetworkAssociation(resource.Resource): resource_key = 'network_association' resources_key = 'network_associations' base_path = '/bgpvpn/bgpvpns/%(bgpvpn_id)s/network_associations' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = False allow_delete = True allow_list = True # Properties #: The Id of the BGPVPN id = resource.Body('id') #: The ID of the BGPVPN who owns Network Association. bgpvpn_id = resource.URI('bgpvpn_id') #: The ID of the project that owns the BGPVPN project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The ID of a Neutron network with which to associate the BGP VPN. network_id = resource.Body('network_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/bgpvpn_port_association.py0000664000175000017500000000372300000000000025551 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class BgpVpnPortAssociation(resource.Resource): resource_key = 'port_association' resources_key = 'port_associations' base_path = '/bgpvpn/bgpvpns/%(bgpvpn_id)s/port_associations' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: The Id of the BGPVPN id = resource.Body('id') #: The ID of the BGPVPN who owns Network Association. bgpvpn_id = resource.URI('bgpvpn_id') #: The ID of the project that owns the BGPVPN project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The ID of a Neutron Port with which to associate the BGP VPN. port_id = resource.Body('port_id') #: Boolean flag controlling whether or not the fixed IPs of a port will be #: advertised to the BGPVPN (default: true). advertise_fixed_ips = resource.Body('advertise_fixed_ips') #: List of routes, each route being a dict with at least a type key, #: which can be prefix or bgpvpn. #: For the prefix type, the IP prefix (v4 or v6) to advertise is specified #: in the prefix key. #: For the bgpvpn type, the bgpvpn_id key specifies the BGPVPN from which #: routes will be readvertised routes = resource.Body('routes') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/bgpvpn_router_association.py0000664000175000017500000000324400000000000026103 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class BgpVpnRouterAssociation(resource.Resource): resource_key = 'router_association' resources_key = 'router_associations' base_path = '/bgpvpn/bgpvpns/%(bgpvpn_id)s/router_associations' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: The Id of the BGPVPN id = resource.Body('id') #: The ID of the BGPVPN who owns Network Association. bgpvpn_id = resource.URI('bgpvpn_id') #: The ID of the project that owns the BGPVPN project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The ID of a Neutron router with which to associate the BGP VPN. router_id = resource.Body('router_id') #: Boolean flag controlling whether or not the routes specified in the #: routes attribute of the router will be advertised to the BGPVPN #: (default: true). advertise_extra_routes = resource.Body('advertise_extra_routes') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/default_security_group_rule.py0000664000175000017500000000777000000000000026441 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import _base from openstack import resource class DefaultSecurityGroupRule(_base.NetworkResource): resource_key = 'default_security_group_rule' resources_key = 'default_security_group_rules' base_path = '/default-security-group-rules' # capabilities allow_create = True allow_fetch = True allow_commit = False allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'id', 'description', 'remote_group_id', 'remote_address_group_id', 'direction', 'protocol', 'port_range_min', 'port_range_max', 'remote_ip_prefix', 'used_in_default_sg', 'used_in_non_default_sg', 'sort_dir', 'sort_key', ether_type='ethertype', ) # Properties #: The default security group rule description. description = resource.Body('description') #: The remote security group ID to be associated with this security #: group rule created from this template. #: You can specify either ``remote_group_id`` or #: #: ``remote_address_group_id`` or ``remote_ip_prefix``. remote_group_id = resource.Body('remote_group_id') #: The remote address group ID to be associated with this security #: group rule created from that template. #: You can specify either ``remote_group_id`` or #: ``remote_address_group_id`` or ``remote_ip_prefix``. remote_address_group_id = resource.Body('remote_address_group_id') #: ``ingress`` or ``egress``: The direction in which the security group #: #: rule will be applied. See 'direction' field in the security group rule #: API. direction = resource.Body('direction') #: The protocol that is matched by the security group rule. #: Valid values are ``null``, ``tcp``, ``udp``, and ``icmp``. protocol = resource.Body('protocol') #: The minimum port number in the range that is matched by the #: security group rule. If the protocol is TCP or UDP, this value #: must be less than or equal to the value of the port_range_max #: attribute. If the protocol is ICMP, this value must be an ICMP type. port_range_min = resource.Body('port_range_min', type=int) #: The maximum port number in the range that is matched by the #: security group rule. The port_range_min attribute constrains #: the port_range_max attribute. If the protocol is ICMP, this #: value must be an ICMP type. port_range_max = resource.Body('port_range_max', type=int) #: The remote IP prefix to be associated with this security group rule. #: You can specify either ``remote_group_id`` or #: ``remote_address_group_id`` or ``remote_ip_prefix``. #: This attribute matches the specified IP prefix as the source or #: destination IP address of the IP packet depending on direction. remote_ip_prefix = resource.Body('remote_ip_prefix') #: Must be IPv4 or IPv6, and addresses represented in CIDR must match #: the ingress or egress rules. ether_type = resource.Body('ethertype') #: Indicate if this template be used to create security group rules in the #: default security group created automatically for each project. used_in_default_sg = resource.Body('used_in_default_sg', type=bool) #: Indicate if this template be used to create security group rules in the #: custom security groups created in the project by users. used_in_non_default_sg = resource.Body('used_in_non_default_sg', type=bool) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/extension.py0000664000175000017500000000247400000000000022633 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Extension(resource.Resource): resource_key = 'extension' resources_key = 'extensions' base_path = '/extensions' _allow_unknown_attrs_in_body = True # capabilities allow_fetch = True allow_list = True # NOTE: No query parameters supported # Properties #: An alias the extension is known under. alias = resource.Body('alias', alternate_id=True) #: Text describing what the extension does. description = resource.Body('description') #: Links pertaining to this extension. links = resource.Body('links', type=list, list_type=dict) #: The name of this extension. name = resource.Body('name') #: Timestamp when the extension was last updated. updated_at = resource.Body('updated') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/firewall_group.py0000664000175000017500000000455200000000000023637 0ustar00zuulzuul00000000000000# Copyright (c) 2018 China Telecom Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class FirewallGroup(resource.Resource): resource_key = 'firewall_group' resources_key = 'firewall_groups' base_path = '/fwaas/firewall_groups' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'description', 'egress_firewall_policy_id', 'ingress_firewall_policy_id', 'name', 'shared', 'status', 'ports', 'project_id', ) # Properties #: The administrative state of the firewall group, which is up (true) or #: down (false). Default is true. admin_state_up = resource.Body('admin_state_up') #: The firewall group rule description. description = resource.Body('description') #: The ID of the egress firewall policy for the firewall group. egress_firewall_policy_id = resource.Body('egress_firewall_policy_id') #: The ID of the ingress firewall policy for the firewall group. ingress_firewall_policy_id = resource.Body('ingress_firewall_policy_id') #: The ID of the firewall group. id = resource.Body('id') #: The name of a firewall group name = resource.Body('name') #: A list of the IDs of the ports associated with the firewall group. ports = resource.Body('ports') #: The ID of the project that owns the resource. project_id = resource.Body('project_id') #: Indicates whether this firewall group is shared across all projects. shared = resource.Body('shared') #: The status of the firewall group. Valid values are ACTIVE, INACTIVE, #: ERROR, PENDING_UPDATE, or PENDING_DELETE. status = resource.Body('status') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/firewall_policy.py0000664000175000017500000000725200000000000024002 0ustar00zuulzuul00000000000000# Copyright (c) 2018 China Telecom Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.exceptions import HttpException from openstack import resource from openstack import utils class FirewallPolicy(resource.Resource): resource_key = 'firewall_policy' resources_key = 'firewall_policies' base_path = '/fwaas/firewall_policies' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'description', 'firewall_rules', 'name', 'project_id', 'shared', ) # Properties #: Each time that the firewall policy or its associated rules are changed, #: the API sets this attribute to false. To audit the policy, #: explicitly set this attribute to true. audited = resource.Body('audited') #: The firewall group rule description. description = resource.Body('description') #: The ID of the firewall policy. id = resource.Body('id') #: A list of the IDs of the firewall rules associated with the #: firewall policy. firewall_rules = resource.Body('firewall_rules') #: The name of a firewall policy name = resource.Body('name') #: The ID of the project that owns the resource. project_id = resource.Body('project_id') #: Set to true to make this firewall policy visible to other projects. shared = resource.Body('shared') def insert_rule(self, session, **body): """Insert a firewall_rule into a firewall_policy in order. :param session: The session to communicate through. :type session: :class:`~openstack.session.Session` :param dict body: The body requested to be updated on the router :returns: The updated firewall policy :rtype: :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` :raises: :class:`~openstack.exceptions.HttpException` on error. """ url = utils.urljoin(self.base_path, self.id, 'insert_rule') return self._put_request(session, url, body) def remove_rule(self, session, **body): """Remove a firewall_rule from a firewall_policy. :param session: The session to communicate through. :type session: :class:`~openstack.session.Session` :param dict body: The body requested to be updated on the router :returns: The updated firewall policy :rtype: :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` :raises: :class:`~openstack.exceptions.HttpException` on error. """ url = utils.urljoin(self.base_path, self.id, 'remove_rule') return self._put_request(session, url, body) def _put_request(self, session, url, json_data): resp = session.put(url, json=json_data) data = resp.json() if not resp.ok: message = None if 'NeutronError' in data: message = data['NeutronError']['message'] raise HttpException(message=message, response=resp) self._body.attributes.update(data) self._update_location() return self ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/firewall_rule.py0000664000175000017500000000612500000000000023450 0ustar00zuulzuul00000000000000# Copyright (c) 2018 China Telecom Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class FirewallRule(resource.Resource): resource_key = 'firewall_rule' resources_key = 'firewall_rules' base_path = '/fwaas/firewall_rules' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'action', 'description', 'destination_ip_address', 'name', 'destination_port', 'enabled', 'ip_version', 'project_id', 'protocol', 'shared', 'source_ip_address', 'source_port', 'firewall_policy_id', ) # Properties #: The action that the API performs on traffic that matches the firewall #: rule. Valid values are allow or deny. Default is deny. action = resource.Body('action') #: The description of the firewall rule description = resource.Body('description') #: The destination IPv4 or IPv6 address or CIDR for the firewall rule. destination_ip_address = resource.Body('destination_ip_address') #: The destination port or port range for the firewall rule. destination_port = resource.Body('destination_port') #: Facilitates selectively turning off rules without having to disassociate #: the rule from the firewall policy enabled = resource.Body('enabled') #: The IP protocol version for the firewall rule. Valid values are 4 or 6. ip_version = resource.Body('ip_version') #: The name of the firewall rule. name = resource.Body('name') #: The ID of the project that owns the resource. project_id = resource.Body('project_id') #: The IP protocol for the firewall rule. protocol = resource.Body('protocol') #: Indicates whether this firewall rule is shared across all projects. shared = resource.Body('shared') #: The source IPv4 or IPv6 address or CIDR for the firewall rule. source_ip_address = resource.Body('source_ip_address') #: The source port or port range for the firewall rule. source_port = resource.Body('source_port') #: Summary field of a FirewallRule, composed of the protocol, #: source_ip_address:source_port, #: destination_ip_address:destination_port and action. summary = resource.Computed('summary', default='') #: The ID of the firewall policy. firewall_policy_id = resource.Body('firewall_policy_id') #: The ID of the firewall rule. id = resource.Body('id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/flavor.py0000664000175000017500000000425100000000000022103 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource from openstack import utils class Flavor(resource.Resource): resource_key = 'flavor' resources_key = 'flavors' base_path = '/flavors' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'description', 'name', 'service_type', 'sort_key', 'sort_dir', is_enabled='enabled', ) # properties #: description for the flavor description = resource.Body('description') #: Sets enabled flag is_enabled = resource.Body('enabled', type=bool) #: The name of the flavor name = resource.Body('name') #: Service type to which the flavor applies service_type = resource.Body('service_type') #: IDs of service profiles associated with this flavor service_profile_ids = resource.Body('service_profiles', type=list) def associate_flavor_with_service_profile( self, session, service_profile_id=None ): flavor_id = self.id url = utils.urljoin(self.base_path, flavor_id, 'service_profiles') body = {"service_profile": {"id": service_profile_id}} resp = session.post(url, json=body) return resp.json() def disassociate_flavor_from_service_profile( self, session, service_profile_id=None ): flavor_id = self.id url = utils.urljoin( self.base_path, flavor_id, 'service_profiles', service_profile_id ) session.delete( url, ) return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/floating_ip.py0000664000175000017500000001003100000000000023076 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack.network.v2 import _base from openstack import resource class FloatingIP(_base.NetworkResource, tag.TagMixin): name_attribute = "floating_ip_address" resource_name = "floating ip" resource_key = 'floatingip' resources_key = 'floatingips' base_path = '/floatingips' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # For backward compatibility include tenant_id as query param _query_mapping = resource.QueryParameters( 'description', 'fixed_ip_address', 'floating_ip_address', 'floating_network_id', 'port_id', 'router_id', 'status', 'subnet_id', 'project_id', 'tenant_id', 'sort_key', 'sort_dir', tenant_id='project_id', **tag.TagMixin._tag_query_parameters ) # Properties #: Timestamp at which the floating IP was created. created_at = resource.Body('created_at') #: The floating IP description. description = resource.Body('description') #: The DNS domain. dns_domain = resource.Body('dns_domain') #: The DNS name. dns_name = resource.Body('dns_name') #: The fixed IP address associated with the floating IP. If you #: intend to associate the floating IP with a fixed IP at creation #: time, then you must indicate the identifier of the internal port. #: If an internal port has multiple associated IP addresses, the #: service chooses the first IP unless you explicitly specify the #: parameter fixed_ip_address to select a specific IP. fixed_ip_address = resource.Body('fixed_ip_address') #: The floating IP address. floating_ip_address = resource.Body('floating_ip_address') #: Floating IP object doesn't have name attribute, set ip address to name #: so that user could find floating IP by UUID or IP address using find_ip name = floating_ip_address #: The ID of the network associated with the floating IP. floating_network_id = resource.Body('floating_network_id') #: Read-only. The details of the port that this floating IP associates #: with. Present if ``fip-port-details`` extension is loaded. #: *Type: dict with keys: name, network_id, mac_address, admin_state_up, #: status, device_id, device_owner* port_details = resource.Body('port_details', type=dict) #: The port ID. port_id = resource.Body('port_id') #: The ID of the QoS policy attached to the floating IP. qos_policy_id = resource.Body('qos_policy_id') #: The ID of the project this floating IP is associated with. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The ID of an associated router. router_id = resource.Body('router_id') #: The floating IP status. Value is ``ACTIVE`` or ``DOWN``. status = resource.Body('status') #: Timestamp at which the floating IP was last updated. updated_at = resource.Body('updated_at') #: The Subnet ID associated with the floating IP. subnet_id = resource.Body('subnet_id') @classmethod def find_available(cls, session): # server-side filtering on empty values is not always supported. # TODO(mordred) Make this check for support for the server-side filter for ip in cls.list(session): if not ip.port_id: return ip return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/health_monitor.py0000664000175000017500000000541000000000000023624 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class HealthMonitor(resource.Resource): resource_key = 'healthmonitor' resources_key = 'healthmonitors' base_path = '/lbaas/healthmonitors' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'delay', 'expected_codes', 'http_method', 'max_retries', 'timeout', 'type', 'url_path', 'project_id', is_admin_state_up='adminstate_up', ) # Properties #: The time, in seconds, between sending probes to members. delay = resource.Body('delay') #: Expected HTTP codes for a passing HTTP(S) monitor. expected_codes = resource.Body('expected_codes') #: The HTTP method that the monitor uses for requests. http_method = resource.Body('http_method') #: The administrative state of the health monitor, which is up #: ``True`` or down ``False``. *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: Maximum consecutive health probe tries. max_retries = resource.Body('max_retries') #: Name of the health monitor. name = resource.Body('name') #: List of pools associated with this health monitor #: *Type: list of dicts which contain the pool IDs* pool_ids = resource.Body('pools', type=list) #: The ID of the pool associated with this health monitor pool_id = resource.Body('pool_id') #: The ID of the project this health monitor is associated with. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The maximum number of seconds for a monitor to wait for a #: connection to be established before it times out. This value must #: be less than the delay value. timeout = resource.Body('timeout') #: The type of probe sent by the load balancer to verify the member #: state, which is PING, TCP, HTTP, or HTTPS. type = resource.Body('type') #: Path portion of URI that will be probed if type is HTTP(S). url_path = resource.Body('url_path') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/l3_conntrack_helper.py0000664000175000017500000000241300000000000024527 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ConntrackHelper(resource.Resource): resource_key = 'conntrack_helper' resources_key = 'conntrack_helpers' base_path = '/routers/%(router_id)s/conntrack_helpers' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: The ID of the Router who owns helper. router_id = resource.URI('router_id') #: The netfilter conntrack helper module. helper = resource.Body('helper') #: The network protocol for the netfilter conntrack target rule. protocol = resource.Body('protocol') #: The network port for the netfilter conntrack target rule. port = resource.Body('port') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/listener.py0000664000175000017500000000522100000000000022435 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Listener(resource.Resource): resource_key = 'listener' resources_key = 'listeners' base_path = '/lbaas/listeners' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'connection_limit', 'default_pool_id', 'default_tls_container_ref', 'description', 'name', 'project_id', 'protocol', 'protocol_port', is_admin_state_up='admin_state_up', ) # Properties #: The maximum number of connections permitted for this load balancer. #: Default is infinite. connection_limit = resource.Body('connection_limit') #: ID of default pool. Must have compatible protocol with listener. default_pool_id = resource.Body('default_pool_id') #: A reference to a container of TLS secrets. default_tls_container_ref = resource.Body('default_tls_container_ref') #: Description for the listener. description = resource.Body('description') #: The administrative state of the listener, which is up #: ``True`` or down ``False``. *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: List of load balancers associated with this listener. #: *Type: list of dicts which contain the load balancer IDs* load_balancer_ids = resource.Body('loadbalancers') #: The ID of the load balancer associated with this listener. load_balancer_id = resource.Body('loadbalancer_id') #: Name of the listener name = resource.Body('name') #: The ID of the project this listener is associated with. project_id = resource.Body('project_id') #: The protocol of the listener, which is TCP, HTTP, HTTPS #: or TERMINATED_HTTPS. protocol = resource.Body('protocol') #: Port the listener will listen to, e.g. 80. protocol_port = resource.Body('protocol_port') #: A list of references to TLS secrets. #: *Type: list* sni_container_refs = resource.Body('sni_container_refs') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/load_balancer.py0000664000175000017500000000524100000000000023360 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class LoadBalancer(resource.Resource): resource_key = 'loadbalancer' resources_key = 'loadbalancers' base_path = '/lbaas/loadbalancers' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'description', 'name', 'project_id', 'provider', 'provisioning_status', 'tenant_id', 'vip_address', 'vip_subnet_id', is_admin_state_up='admin_state_up', ) # Properties #: Description for the load balancer. description = resource.Body('description') #: The administrative state of the load balancer, which is up #: ``True`` or down ``False``. *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: List of listeners associated with this load balancer. #: *Type: list of dicts which contain the listener IDs* listener_ids = resource.Body('listeners', type=list) #: Name of the load balancer name = resource.Body('name') #: Status of load_balancer operating, e.g. ONLINE, OFFLINE. operating_status = resource.Body('operating_status') #: List of pools associated with this load balancer. #: *Type: list of dicts which contain the pool IDs* pool_ids = resource.Body('pools', type=list) #: The ID of the project this load balancer is associated with. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The name of the provider. provider = resource.Body('provider') #: Status of load balancer provisioning, e.g. ACTIVE, INACTIVE. provisioning_status = resource.Body('provisioning_status') #: The IP address of the VIP. vip_address = resource.Body('vip_address') #: The ID of the port for the VIP. vip_port_id = resource.Body('vip_port_id') #: The ID of the subnet on which to allocate the VIP address. vip_subnet_id = resource.Body('vip_subnet_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/local_ip.py0000664000175000017500000000427100000000000022376 0ustar00zuulzuul00000000000000# Copyright 2021 Huawei, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from openstack import resource class LocalIP(resource.Resource): """Local IP extension.""" resource_name = "local ip" resource_key = "local_ip" resources_key = "local_ips" base_path = "/local_ips" # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _allow_unknown_attrs_in_body = True _query_mapping = resource.QueryParameters( 'sort_key', 'sort_dir', 'name', 'description', 'project_id', 'network_id', 'local_port_id', 'local_ip_address', 'ip_mode', ) # Properties #: Timestamp at which the floating IP was created. created_at = resource.Body('created_at') #: The local ip description. description = resource.Body('description') #: The ID of the local ip. id = resource.Body('id') #: The local ip ip-mode. ip_mode = resource.Body('ip_mode') #: The Local IP address. local_ip_address = resource.Body('local_ip_address') #: The ID of the port that owns the local ip. local_port_id = resource.Body('local_port_id') #: The local ip name. name = resource.Body('name') #: The ID of the network that owns the local ip. network_id = resource.Body('network_id') #: The ID of the project that owns the local ip. project_id = resource.Body('project_id') #: The local ip revision number. revision_number = resource.Body('revision_number') #: Timestamp at which the floating IP was last updated. updated_at = resource.Body('updated_at') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/local_ip_association.py0000664000175000017500000000304300000000000024766 0ustar00zuulzuul00000000000000# Copyright 2021 Huawei, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from openstack import resource class LocalIPAssociation(resource.Resource): """Local IP extension.""" resource_key = "port_association" resources_key = "port_associations" base_path = "/local_ips/%(local_ip_id)s/port_associations" # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _allow_unknown_attrs_in_body = True _query_mapping = resource.QueryParameters( 'fixed_port_id', 'fixed_ip', 'host', 'sort_key', 'sort_dir', ) # Properties #: The fixed port ID. fixed_port_id = resource.Body('fixed_port_id') #: The fixed IP. fixed_ip = resource.Body('fixed_ip') #: Host host = resource.Body('host') #: The local ip address local_ip_address = resource.Body('local_ip_address') #: The ID of Local IP address local_ip_id = resource.URI('local_ip_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/metering_label.py0000664000175000017500000000315400000000000023564 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class MeteringLabel(resource.Resource): resource_key = 'metering_label' resources_key = 'metering_labels' base_path = '/metering/metering-labels' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'description', 'name', 'project_id', 'sort_key', 'sort_dir', is_shared='shared', ) # Properties #: Description of the metering label. description = resource.Body('description') #: Name of the metering label. name = resource.Body('name') #: The ID of the project this metering label is associated with. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: Indicates whether this label is shared across all tenants. #: *Type: bool* is_shared = resource.Body('shared', type=bool) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/metering_label_rule.py0000664000175000017500000000543300000000000024615 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class MeteringLabelRule(resource.Resource): resource_key = 'metering_label_rule' resources_key = 'metering_label_rules' base_path = '/metering/metering-label-rules' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'direction', 'metering_label_id', 'remote_ip_prefix', 'source_ip_prefix', 'destination_ip_prefix', 'project_id', 'sort_key', 'sort_dir', ) # Properties #: ingress or egress: The direction in which metering label rule is #: applied. Default: ``"ingress"`` direction = resource.Body('direction') #: Specify whether the ``remote_ip_prefix`` will be excluded or not #: from traffic counters of the metering label, ie: to not count the #: traffic of a specific IP address of a range. Default: ``False``, #: *Type: bool* is_excluded = resource.Body('excluded', type=bool) #: The metering label ID to associate with this metering label rule. metering_label_id = resource.Body('metering_label_id') #: The ID of the project this metering label rule is associated with. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The remote IP prefix to be associated with this metering label rule. remote_ip_prefix = resource.Body( 'remote_ip_prefix', deprecated=True, deprecation_reason="The use of 'remote_ip_prefix' in metering label " "rules is deprecated and will be removed in future " "releases. One should use instead, the " "'source_ip_prefix' and/or 'destination_ip_prefix' " "parameters. For more details, you can check the " "spec: https://review.opendev.org/#/c/744702/.", ) #: The source IP prefix to be associated with this metering label rule. source_ip_prefix = resource.Body('source_ip_prefix') #: The destination IP prefix to be associated with this metering label rule destination_ip_prefix = resource.Body('destination_ip_prefix') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/ndp_proxy.py0000664000175000017500000000364100000000000022636 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class NDPProxy(resource.Resource): resource_name = "ndp proxy" resource_key = 'ndp_proxy' resources_key = 'ndp_proxies' base_path = '/ndp_proxies' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _allow_unknown_attrs_in_body = True _query_mapping = resource.QueryParameters( "sort_key", "sort_dir", 'name', 'description', 'project_id', 'router_id', 'port_id', 'ip_address', ) # Properties #: Timestamp at which the NDP proxy was created. created_at = resource.Body('created_at') #: The description description = resource.Body('description') #: The ID of the NDP proxy. id = resource.Body('id') #: The internal IP address ip_address = resource.Body('ip_address') # The name of ndp proxy name = resource.Body('name') #: The ID of internal port port_id = resource.Body('port_id') #: The ID of the project that owns the NDP proxy. project_id = resource.Body('project_id') #: The NDP proxy revision number. revision_number = resource.Body('revision_number') #: The ID of Router router_id = resource.Body('router_id') #: Timestamp at which the NDP proxy was last updated. updated_at = resource.Body('updated_at') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/network.py0000664000175000017500000001261000000000000022301 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack.network.v2 import _base from openstack import resource class Network(_base.NetworkResource, tag.TagMixin): resource_key = 'network' resources_key = 'networks' base_path = '/networks' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # NOTE: We don't support query on list or datetime fields yet _query_mapping = resource.QueryParameters( 'description', 'name', 'status', 'project_id', 'sort_key', 'sort_dir', ipv4_address_scope_id='ipv4_address_scope', ipv6_address_scope_id='ipv6_address_scope', is_admin_state_up='admin_state_up', is_port_security_enabled='port_security_enabled', is_router_external='router:external', is_shared='shared', provider_network_type='provider:network_type', provider_physical_network='provider:physical_network', provider_segmentation_id='provider:segmentation_id', **tag.TagMixin._tag_query_parameters ) # Properties #: Availability zone hints to use when scheduling the network. #: *Type: list of availability zone names* availability_zone_hints = resource.Body('availability_zone_hints') #: Availability zones for the network. #: *Type: list of availability zone names* availability_zones = resource.Body('availability_zones') #: Timestamp when the network was created. created_at = resource.Body('created_at') #: The network description. description = resource.Body('description') #: The DNS domain associated. dns_domain = resource.Body('dns_domain') #: The ID of the IPv4 address scope for the network. ipv4_address_scope_id = resource.Body('ipv4_address_scope') #: The ID of the IPv6 address scope for the network. ipv6_address_scope_id = resource.Body('ipv6_address_scope') #: The administrative state of the network, which is up ``True`` or #: down ``False``. *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: Whether or not this is the default external network. #: *Type: bool* is_default = resource.Body('is_default', type=bool) #: The port security status, which is enabled ``True`` or disabled #: ``False``. *Type: bool* *Default: False* #: Available for multiple provider extensions. is_port_security_enabled = resource.Body( 'port_security_enabled', type=bool, default=False ) #: Whether or not the router is external. #: *Type: bool* *Default: False* is_router_external = resource.Body( 'router:external', type=bool, default=False ) #: Indicates whether this network is shared across all tenants. #: By default, only administrative users can change this value. #: *Type: bool* is_shared = resource.Body('shared', type=bool) #: Read-only. The maximum transmission unit (MTU) of the network resource. mtu = resource.Body('mtu', type=int) #: The network name. name = resource.Body('name') #: The ID of the project this network is associated with. project_id = resource.Body('project_id') #: The type of physical network that maps to this network resource. #: For example, ``flat``, ``vlan``, ``vxlan``, or ``gre``. #: Available for multiple provider extensions. provider_network_type = resource.Body('provider:network_type') #: The physical network where this network object is implemented. #: Available for multiple provider extensions. provider_physical_network = resource.Body('provider:physical_network') #: An isolated segment ID on the physical network. The provider #: network type defines the segmentation model. #: Available for multiple provider extensions. provider_segmentation_id = resource.Body('provider:segmentation_id') #: The ID of the QoS policy attached to the port. qos_policy_id = resource.Body('qos_policy_id') #: A list of provider segment objects. #: Available for multiple provider extensions. segments = resource.Body('segments') #: The network status. status = resource.Body('status') #: The associated subnet IDs. #: *Type: list of strs of the subnet IDs* subnet_ids = resource.Body('subnets', type=list) #: Timestamp when the network was last updated. updated_at = resource.Body('updated_at') #: Indicates the VLAN transparency mode of the network is_vlan_transparent = resource.Body('vlan_transparent', type=bool) class DHCPAgentHostingNetwork(Network): resource_key = 'network' resources_key = 'networks' base_path = '/agents/%(agent_id)s/dhcp-networks' resource_name = 'dhcp-network' # capabilities allow_create = False allow_fetch = True allow_commit = False allow_delete = False allow_list = True # NOTE: No query parameter is supported ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/network_ip_availability.py0000664000175000017500000000376000000000000025531 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class NetworkIPAvailability(resource.Resource): resource_key = 'network_ip_availability' resources_key = 'network_ip_availabilities' base_path = '/network-ip-availabilities' name_attribute = 'network_name' _allow_unknown_attrs_in_body = True # capabilities allow_create = False allow_fetch = True allow_commit = False allow_delete = False allow_list = True _query_mapping = resource.QueryParameters( 'ip_version', 'network_id', 'network_name', 'project_id', 'sort_key', 'sort_dir', ) # Properties #: Network ID to use when listing network IP availability. network_id = resource.Body('network_id') #: Network Name for the particular network IP availability. network_name = resource.Body('network_name') #: The Subnet IP Availability of all subnets of a network. #: *Type: list* subnet_ip_availability = resource.Body('subnet_ip_availability', type=list) #: The ID of the project this network IP availability is associated with. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The total ips of a network. #: *Type: int* total_ips = resource.Body('total_ips', type=int) #: The used or consumed ip of a network #: *Type: int* used_ips = resource.Body('used_ips', type=int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/network_segment_range.py0000664000175000017500000000570300000000000025204 0ustar00zuulzuul00000000000000# Copyright (c) 2018, Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class NetworkSegmentRange(resource.Resource): resource_key = 'network_segment_range' resources_key = 'network_segment_ranges' base_path = '/network_segment_ranges' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'name', 'default', 'shared', 'project_id', 'network_type', 'physical_network', 'minimum', 'maximum', 'used', 'available', 'sort_key', 'sort_dir', ) # Properties #: The network segment range name. name = resource.Body('name') #: The network segment range is loaded from the host configuration file. #: *Type: bool* default = resource.Body('default', type=bool) #: The network segment range is shared with other projects. #: *Type: bool* shared = resource.Body('shared', type=bool) #: The ID of the project associated with this network segment range. project_id = resource.Body('project_id') #: The type of network associated with this network segment range, such as #: ``geneve``, ``gre``, ``vlan`` or ``vxlan``. network_type = resource.Body('network_type') #: The name of the physical network associated with this network segment #: range. physical_network = resource.Body('physical_network') #: The minimum segmentation ID for this network segment range. The #: network type defines the segmentation model, VLAN ID for ``vlan`` #: network type and tunnel ID for ``geneve``, ``gre`` and ``vxlan`` #: network types. #: *Type: int* minimum = resource.Body('minimum', type=int) #: The maximum segmentation ID for this network segment range. The #: network type defines the segmentation model, VLAN ID for ``vlan`` #: network type and tunnel ID for ``geneve``, ``gre`` and ``vxlan`` #: network types. #: *Type: int* maximum = resource.Body('maximum', type=int) #: Mapping of which segmentation ID in the range is used by which tenant. #: *Type: dict* used = resource.Body('used', type=dict) #: List of available segmentation IDs in this network segment range. #: *Type: list* available = resource.Body('available', type=list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/pool.py0000664000175000017500000000744400000000000021572 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Pool(resource.Resource): resource_key = 'pool' resources_key = 'pools' base_path = '/lbaas/pools' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'description', 'lb_algorithm', 'name', 'protocol', 'provider', 'subnet_id', 'virtual_ip_id', 'listener_id', 'project_id', is_admin_state_up='admin_state_up', load_balancer_id='loadbalancer_id', ) # Properties #: Description for the pool. description = resource.Body('description') #: The ID of the associated health monitors. health_monitor_id = resource.Body('healthmonitor_id') #: The ID of the associated health monitors (LBaaS v1). health_monitor_ids = resource.Body('health_monitors', type=list) #: The statuses of the associated health monitors. health_monitor_status = resource.Body('health_monitor_status', type=list) #: The administrative state of the pool, which is up ``True`` or down #: ``False``. *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: The load-balancer algorithm, which is round-robin, least-connections, #: and so on. This value, which must be supported, is dependent on the #: load-balancer provider. Round-robin must be supported. lb_algorithm = resource.Body('lb_algorithm') #: List of associated listeners. #: *Type: list of dicts which contain the listener IDs* listener_ids = resource.Body('listeners', type=list) #: ID of listener associated with this pool listener_id = resource.Body('listener_id') #: List of associated load balancers. #: *Type: list of dicts which contain the load balancer IDs* load_balancer_ids = resource.Body('loadbalancers', type=list) #: ID of load balancer associated with this pool load_balancer_id = resource.Body('loadbalancer_id') #: List of members that belong to the pool. #: *Type: list of dicts which contain the member IDs* member_ids = resource.Body('members', type=list) #: Pool name. Does not have to be unique. name = resource.Body('name') #: The ID of the project this pool is associated with. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The protocol of the pool, which is TCP, HTTP, or HTTPS. protocol = resource.Body('protocol') #: The provider name of the load balancer service. provider = resource.Body('provider') #: Human readable description of the status. status = resource.Body('status') #: The status of the network. status_description = resource.Body('status_description') #: The subnet on which the members of the pool will be located. subnet_id = resource.Body('subnet_id') #: Session persistence algorithm that should be used (if any). #: *Type: dict with keys ``type`` and ``cookie_name``* session_persistence = resource.Body('session_persistence') #: The ID of the virtual IP (VIP) address. virtual_ip_id = resource.Body('vip_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/pool_member.py0000664000175000017500000000434700000000000023120 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class PoolMember(resource.Resource): resource_key = 'member' resources_key = 'members' base_path = '/lbaas/pools/%(pool_id)s/members' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'address', 'name', 'protocol_port', 'subnet_id', 'weight', 'project_id', is_admin_state_up='admin_state_up', ) # Properties #: The ID of the owning pool pool_id = resource.URI('pool_id') #: The IP address of the pool member. address = resource.Body('address') #: The administrative state of the pool member, which is up ``True`` or #: down ``False``. *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: Name of the pool member. name = resource.Body('name') #: The ID of the project this pool member is associated with. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The port on which the application is hosted. protocol_port = resource.Body('protocol_port', type=int) #: Subnet ID in which to access this pool member. subnet_id = resource.Body('subnet_id') #: A positive integer value that indicates the relative portion of traffic #: that this member should receive from the pool. For example, a member #: with a weight of 10 receives five times as much traffic as a member #: with weight of 2. weight = resource.Body('weight', type=int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/port.py0000664000175000017500000001630100000000000021575 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack.network.v2 import _base from openstack import resource class Port(_base.NetworkResource, tag.TagMixin): resource_key = 'port' resources_key = 'ports' base_path = '/ports' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # NOTE: we skip query on list or datetime fields for now _query_mapping = resource.QueryParameters( 'binding:host_id', 'binding:profile', 'binding:vif_details', 'binding:vif_type', 'binding:vnic_type', 'description', 'device_id', 'device_owner', 'fields', 'fixed_ips', 'id', 'ip_address', 'mac_address', 'name', 'network_id', 'status', 'subnet_id', 'project_id', 'security_groups', 'sort_key', 'sort_dir', is_admin_state_up='admin_state_up', is_port_security_enabled='port_security_enabled', security_group_ids='security_groups', **tag.TagMixin._tag_query_parameters ) # Properties #: Allowed address pairs list. Dictionary key ``ip_address`` is required #: and key ``mac_address`` is optional. allowed_address_pairs = resource.Body('allowed_address_pairs', type=list) #: The ID of the host where the port is allocated. In some cases, #: different implementations can run on different hosts. binding_host_id = resource.Body('binding:host_id') #: A dictionary the enables the application running on the specified #: host to pass and receive vif port-specific information to the plug-in. #: *Type: dict* binding_profile = resource.Body('binding:profile', type=dict) #: Read-only. A dictionary that enables the application to pass #: information about functions that the Networking API provides. #: To enable or disable port filtering features such as security group #: and anti-MAC/IP spoofing, specify ``port_filter: True`` or #: ``port_filter: False``. *Type: dict* binding_vif_details = resource.Body('binding:vif_details', type=dict) #: Read-only. The vif type for the specified port. binding_vif_type = resource.Body('binding:vif_type') #: The vnic type that is bound to the neutron port. #: #: In POST and PUT operations, specify a value of ``normal`` (virtual nic), #: ``direct`` (pci passthrough), or ``macvtap`` #: (virtual interface with a tap-like software interface). #: These values support SR-IOV PCI passthrough networking. #: The ML2 plug-in supports the vnic_type. #: #: In GET operations, the binding:vnic_type extended attribute is #: visible to only port owners and administrative users. binding_vnic_type = resource.Body('binding:vnic_type') #: Timestamp when the port was created. created_at = resource.Body('created_at') #: Underlying data plane status of this port. data_plane_status = resource.Body('data_plane_status') #: The port description. description = resource.Body('description') #: Device ID of this port. device_id = resource.Body('device_id') #: Device owner of this port (e.g. ``network:dhcp``). device_owner = resource.Body('device_owner') #: Device profile of this port, refers to Cyborg device-profiles: # https://docs.openstack.org/api-ref/accelerator/v2/index.html# # device-profiles. device_profile = resource.Body('device_profile') #: DNS assignment for the port. dns_assignment = resource.Body('dns_assignment') #: DNS domain assigned to the port. dns_domain = resource.Body('dns_domain') #: DNS name for the port. dns_name = resource.Body('dns_name') #: Extra DHCP options. extra_dhcp_opts = resource.Body('extra_dhcp_opts', type=list) #: IP addresses for the port. Includes the IP address and subnet ID. fixed_ips = resource.Body('fixed_ips', type=list) #: The type of hardware offload this port will request when attached to the # network backend. hardware_offload_type = resource.Body('hardware_offload_type') #: Read-only. The ip_allocation indicates when ports use deferred, # immediate or no IP allocation. ip_allocation = resource.Body('ip_allocation') #: The administrative state of the port, which is up ``True`` or #: down ``False``. *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: The port security status, which is enabled ``True`` or disabled #: ``False``. *Type: bool* *Default: False* is_port_security_enabled = resource.Body( 'port_security_enabled', type=bool, default=False ) #: The MAC address of an allowed address pair. mac_address = resource.Body('mac_address') #: The port name. name = resource.Body('name') #: The ID of the attached network. network_id = resource.Body('network_id') #: The NUMA affinity policy defined for this port. numa_affinity_policy = resource.Body('numa_affinity_policy') #: The ID of the project who owns the network. Only administrative #: users can specify a project ID other than their own. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: Whether to propagate uplink status of the port. *Type: bool* propagate_uplink_status = resource.Body( 'propagate_uplink_status', type=bool ) #: Read-only. The ID of the QoS policy attached to the network where the # port is bound. qos_network_policy_id = resource.Body('qos_network_policy_id') #: The ID of the QoS policy attached to the port. qos_policy_id = resource.Body('qos_policy_id') #: Read-only. The port-resource-request exposes Placement resources # (i.e.: minimum-bandwidth) and traits (i.e.: vnic-type, physnet) # requested by a port to Nova and Placement. resource_request = resource.Body('resource_request', type=dict) #: The IDs of any attached security groups. #: *Type: list of strs of the security group IDs* security_group_ids = resource.Body('security_groups', type=list) #: The port status. Value is ``ACTIVE`` or ``DOWN``. status = resource.Body('status') #: Read-only. The trunk referring to this parent port and its subports. #: Present for trunk parent ports if ``trunk-details`` extension is loaded. #: *Type: dict with keys: trunk_id, sub_ports. #: sub_ports is a list of dicts with keys: #: port_id, segmentation_type, segmentation_id, mac_address* trunk_details = resource.Body('trunk_details', type=dict) #: Timestamp when the port was last updated. updated_at = resource.Body('updated_at') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/port_forwarding.py0000664000175000017500000000344400000000000024023 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class PortForwarding(resource.Resource): name_attribute = "floating_ip_port_forwarding" resource_name = "port forwarding" resource_key = 'port_forwarding' resources_key = 'port_forwardings' base_path = '/floatingips/%(floatingip_id)s/port_forwardings' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'internal_port_id', 'external_port', 'protocol', 'sort_key', 'sort_dir', ) # Properties #: The ID of Floating IP address floatingip_id = resource.URI('floatingip_id') #: The ID of internal port internal_port_id = resource.Body('internal_port_id') #: The internal IP address internal_ip_address = resource.Body('internal_ip_address') #: The internal TCP/UDP/other port number internal_port = resource.Body('internal_port', type=int) #: The external TCP/UDP/other port number external_port = resource.Body('external_port', type=int) #: The protocol protocol = resource.Body('protocol') #: The description description = resource.Body('description') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/qos_bandwidth_limit_rule.py0000664000175000017500000000252700000000000025671 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class QoSBandwidthLimitRule(resource.Resource): resource_key = 'bandwidth_limit_rule' resources_key = 'bandwidth_limit_rules' base_path = '/qos/policies/%(qos_policy_id)s/bandwidth_limit_rules' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: The ID of the QoS policy who owns rule. qos_policy_id = resource.URI('qos_policy_id') #: Maximum bandwidth in kbps. max_kbps = resource.Body('max_kbps') #: Maximum burst bandwidth in kbps. max_burst_kbps = resource.Body('max_burst_kbps') #: Traffic direction from the tenant point of view ('egress', 'ingress'). direction = resource.Body('direction') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/qos_dscp_marking_rule.py0000664000175000017500000000215500000000000025165 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class QoSDSCPMarkingRule(resource.Resource): resource_key = 'dscp_marking_rule' resources_key = 'dscp_marking_rules' base_path = '/qos/policies/%(qos_policy_id)s/dscp_marking_rules' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: The ID of the QoS policy who owns rule. qos_policy_id = resource.URI('qos_policy_id') #: DSCP mark field. dscp_mark = resource.Body('dscp_mark') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/qos_minimum_bandwidth_rule.py0000664000175000017500000000240300000000000026217 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class QoSMinimumBandwidthRule(resource.Resource): resource_key = 'minimum_bandwidth_rule' resources_key = 'minimum_bandwidth_rules' base_path = '/qos/policies/%(qos_policy_id)s/minimum_bandwidth_rules' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: The ID of the QoS policy who owns rule. qos_policy_id = resource.URI('qos_policy_id') #: Minimum bandwidth in kbps. min_kbps = resource.Body('min_kbps') #: Traffic direction from the tenant point of view. Valid values: 'egress' direction = resource.Body('direction') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/qos_minimum_packet_rate_rule.py0000664000175000017500000000244200000000000026540 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack import resource class QoSMinimumPacketRateRule(resource.Resource): resource_key = 'minimum_packet_rate_rule' resources_key = 'minimum_packet_rate_rules' base_path = '/qos/policies/%(qos_policy_id)s/minimum_packet_rate_rules' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: Traffic direction from the tenant point of view. Valid values: ('any', #: 'egress', 'ingress') direction = resource.Body('direction') #: Minimum packet rate in kpps. min_kpps = resource.Body('min_kpps') #: The ID of the QoS policy who owns rule. qos_policy_id = resource.URI('qos_policy_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/qos_policy.py0000664000175000017500000000434100000000000022773 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack import resource from openstack import utils class QoSPolicy(resource.Resource, tag.TagMixin): resource_key = 'policy' resources_key = 'policies' base_path = '/qos/policies' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'name', 'description', 'is_default', 'project_id', 'sort_key', 'sort_dir', is_shared='shared', **tag.TagMixin._tag_query_parameters ) # Properties #: QoS policy name. name = resource.Body('name') #: The ID of the project who owns the network. Only administrative #: users can specify a project ID other than their own. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The QoS policy description. description = resource.Body('description') #: Indicates whether this QoS policy is the default policy for this #: project. #: *Type: bool* is_default = resource.Body('is_default', type=bool) #: Indicates whether this QoS policy is shared across all projects. #: *Type: bool* is_shared = resource.Body('shared', type=bool) #: List of QoS rules applied to this QoS policy. rules = resource.Body('rules') def set_tags(self, session, tags): url = utils.urljoin('/policies', self.id, 'tags') session.put(url, json={'tags': tags}) self._body.attributes.update({'tags': tags}) return self ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/qos_rule_type.py0000664000175000017500000000232000000000000023477 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class QoSRuleType(resource.Resource): resource_key = 'rule_type' resources_key = 'rule_types' base_path = '/qos/rule-types' _allow_unknown_attrs_in_body = True # capabilities allow_create = False allow_fetch = True allow_commit = False allow_delete = False allow_list = True _query_mapping = resource.QueryParameters( 'type', 'drivers', 'all_rules', 'all_supported', ) # Properties #: QoS rule type name. type = resource.Body('type', alternate_id=True) #: List of QoS backend drivers supporting this QoS rule type drivers = resource.Body('drivers') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/quota.py0000664000175000017500000001312100000000000021737 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Quota(resource.Resource): resource_key = 'quota' resources_key = 'quotas' base_path = '/quotas' _allow_unknown_attrs_in_body = True # capabilities allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: Flag to check the quota usage before setting the new limit. *Type: bool* check_limit = resource.Body('check_limit', type=bool) #: The maximum amount of floating IPs you can have. *Type: int* floating_ips = resource.Body('floatingip', type=int) #: The maximum amount of health monitors you can create. *Type: int* health_monitors = resource.Body('healthmonitor', type=int) #: The maximum amount of listeners you can create. *Type: int* listeners = resource.Body('listener', type=int) #: The maximum amount of load balancers you can create. *Type: int* load_balancers = resource.Body('loadbalancer', type=int) #: The maximum amount of L7 policies you can create. *Type: int* l7_policies = resource.Body('l7policy', type=int) #: The maximum amount of networks you can create. *Type: int* networks = resource.Body('network', type=int) #: The maximum amount of pools you can create. *Type: int* pools = resource.Body('pool', type=int) #: The maximum amount of ports you can create. *Type: int* ports = resource.Body('port', type=int) #: The ID of the project these quota values are for. project_id = resource.Body('tenant_id', alternate_id=True) #: The maximum amount of RBAC policies you can create. *Type: int* rbac_policies = resource.Body('rbac_policy', type=int) #: The maximum amount of routers you can create. *Type: int* routers = resource.Body('router', type=int) #: The maximum amount of subnets you can create. *Type: int* subnets = resource.Body('subnet', type=int) #: The maximum amount of subnet pools you can create. *Type: int* subnet_pools = resource.Body('subnetpool', type=int) #: The maximum amount of security group rules you can create. *Type: int* security_group_rules = resource.Body('security_group_rule', type=int) #: The maximum amount of security groups you can create. *Type: int* security_groups = resource.Body('security_group', type=int) def _prepare_request( self, requires_id=True, prepend_key=False, base_path=None, **kwargs ): _request = super()._prepare_request(requires_id, prepend_key) if self.resource_key in _request.body: _body = _request.body[self.resource_key] else: _body = _request.body if 'id' in _body: del _body['id'] return _request class QuotaDefault(Quota): base_path = '/quotas/%(project)s/default' # capabilities allow_retrieve = True allow_commit = False allow_delete = False allow_list = False # Properties #: The ID of the project. project = resource.URI('project') class QuotaDetails(Quota): base_path = '/quotas/%(project)s/details' # capabilities allow_retrieve = True allow_commit = False allow_delete = False allow_list = False # Properties #: The ID of the project. project = resource.URI('project') #: The maximum amount of floating IPs you can have. *Type: dict* floating_ips = resource.Body('floatingip', type=dict) #: The maximum amount of health monitors you can create. *Type: dict* health_monitors = resource.Body('healthmonitor', type=dict) #: The maximum amount of listeners you can create. *Type: dict* listeners = resource.Body('listener', type=dict) #: The maximum amount of load balancers you can create. *Type: dict* load_balancers = resource.Body('loadbalancer', type=dict) #: The maximum amount of L7 policies you can create. *Type: dict* l7_policies = resource.Body('l7policy', type=dict) #: The maximum amount of networks you can create. *Type: dict* networks = resource.Body('network', type=dict) #: The maximum amount of pools you can create. *Type: dict* pools = resource.Body('pool', type=dict) #: The maximum amount of ports you can create. *Type: dict* ports = resource.Body('port', type=dict) #: The ID of the project these quota values are for. project_id = resource.Body('project_id', alternate_id=True) #: The maximum amount of RBAC policies you can create. *Type: dict* rbac_policies = resource.Body('rbac_policy', type=dict) #: The maximum amount of routers you can create. *Type: int* routers = resource.Body('router', type=dict) #: The maximum amount of subnets you can create. *Type: dict* subnets = resource.Body('subnet', type=dict) #: The maximum amount of subnet pools you can create. *Type: dict* subnet_pools = resource.Body('subnetpool', type=dict) #: The maximum amount of security group rules you can create. *Type: dict* security_group_rules = resource.Body('security_group_rule', type=dict) #: The maximum amount of security groups you can create. *Type: dict* security_groups = resource.Body('security_group', type=dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/rbac_policy.py0000664000175000017500000000325000000000000023076 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class RBACPolicy(resource.Resource): resource_key = 'rbac_policy' resources_key = 'rbac_policies' base_path = '/rbac-policies' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'action', 'object_id', 'object_type', 'project_id', 'target_project_id', target_project_id='target_tenant', ) # Properties #: ID of the object that this RBAC policy affects. object_id = resource.Body('object_id') #: The ID of the project this RBAC will be enforced. target_project_id = resource.Body('target_tenant') #: The owner project ID. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: Type of the object that this RBAC policy affects. object_type = resource.Body('object_type') #: Action for the RBAC policy. action = resource.Body('action') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/router.py0000664000175000017500000002175600000000000022143 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack import exceptions from openstack.network.v2 import _base from openstack import resource from openstack import utils class Router(_base.NetworkResource, tag.TagMixin): resource_key = 'router' resources_key = 'routers' base_path = '/routers' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # NOTE: We don't support query on datetime, list or dict fields _query_mapping = resource.QueryParameters( 'description', 'flavor_id', 'name', 'status', 'project_id', 'sort_key', 'sort_dir', is_admin_state_up='admin_state_up', is_distributed='distributed', is_ha='ha', **tag.TagMixin._tag_query_parameters ) # Properties #: Availability zone hints to use when scheduling the router. #: *Type: list of availability zone names* availability_zone_hints = resource.Body( 'availability_zone_hints', type=list ) #: Availability zones for the router. #: *Type: list of availability zone names* availability_zones = resource.Body('availability_zones', type=list) #: Timestamp when the router was created. created_at = resource.Body('created_at') #: The router description. description = resource.Body('description') #: The ndp proxy state of the router enable_ndp_proxy = resource.Body('enable_ndp_proxy', type=bool) #: The ``network_id``, for the external gateway. *Type: dict* external_gateway_info = resource.Body('external_gateway_info', type=dict) #: The ID of the flavor. flavor_id = resource.Body('flavor_id') #: The administrative state of the router, which is up ``True`` #: or down ``False``. *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: The distributed state of the router, which is distributed ``True`` #: or not ``False``. *Type: bool* is_distributed = resource.Body('distributed', type=bool) #: The highly-available state of the router, which is highly available #: ``True`` or not ``False``. *Type: bool* is_ha = resource.Body('ha', type=bool) #: The router name. name = resource.Body('name') #: The ID of the project this router is associated with. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: Revision number of the router. *Type: int* revision_number = resource.Body('revision', type=int) #: The extra routes configuration for the router. routes = resource.Body('routes', type=list) #: The router status. status = resource.Body('status') #: Timestamp when the router was created. updated_at = resource.Body('updated_at') def _put(self, session, url, body): resp = session.put(url, json=body) exceptions.raise_from_response(resp) return resp def add_interface(self, session, **body): """Add an internal interface to a logical router. :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :param dict body: The body requested to be updated on the router :returns: The body of the response as a dictionary. :raises: :class:`~openstack.exceptions.SDKException` on error. """ url = utils.urljoin(self.base_path, self.id, 'add_router_interface') resp = self._put(session, url, body) return resp.json() def remove_interface(self, session, **body): """Remove an internal interface from a logical router. :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :param dict body: The body requested to be updated on the router :returns: The body of the response as a dictionary. :raises: :class:`~openstack.exceptions.SDKException` on error. """ url = utils.urljoin(self.base_path, self.id, 'remove_router_interface') resp = self._put(session, url, body) return resp.json() def add_extra_routes(self, session, body) -> 'Router': """Add extra routes to a logical router. :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :param dict body: The request body as documented in the api-ref. :returns: The response as a Router object with the added extra routes. :raises: :class:`~openstack.exceptions.SDKException` on error. """ url = utils.urljoin(self.base_path, self.id, 'add_extraroutes') resp = self._put(session, url, body) self._translate_response(resp) return self def remove_extra_routes(self, session, body) -> 'Router': """Remove extra routes from a logical router. :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :param dict body: The request body as documented in the api-ref. :returns: The response as a Router object with the extra routes left. :raises: :class:`~openstack.exceptions.SDKException` on error. """ url = utils.urljoin(self.base_path, self.id, 'remove_extraroutes') resp = self._put(session, url, body) self._translate_response(resp) return self def add_gateway(self, session, **body): """Add an external gateway to a logical router. :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :param dict body: The body requested to be updated on the router :returns: The body of the response as a dictionary. """ url = utils.urljoin(self.base_path, self.id, 'add_gateway_router') resp = session.put(url, json=body) return resp.json() def remove_gateway(self, session, **body): """Remove an external gateway from a logical router. :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :param dict body: The body requested to be updated on the router :returns: The body of the response as a dictionary. """ url = utils.urljoin(self.base_path, self.id, 'remove_gateway_router') resp = session.put(url, json=body) return resp.json() def add_external_gateways(self, session, body): """Add external gateways to a router. :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :param dict body: The body requested to be updated on the router :returns: The body of the response as a dictionary. """ url = utils.urljoin(self.base_path, self.id, 'add_external_gateways') resp = session.put(url, json=body) self._translate_response(resp) return self def update_external_gateways(self, session, body): """Update external gateways of a router. :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :param dict body: The body requested to be updated on the router :returns: The body of the response as a dictionary. """ url = utils.urljoin( self.base_path, self.id, 'update_external_gateways' ) resp = session.put(url, json=body) self._translate_response(resp) return self def remove_external_gateways(self, session, body): """Remove external gateways from a router. :param session: The session to communicate through. :type session: :class:`~keystoneauth1.adapter.Adapter` :param dict body: The body requested to be updated on the router :returns: The body of the response as a dictionary. """ url = utils.urljoin( self.base_path, self.id, 'remove_external_gateways' ) resp = session.put(url, json=body) self._translate_response(resp) return self class L3AgentRouter(Router): resource_key = 'router' resources_key = 'routers' base_path = '/agents/%(agent_id)s/l3-routers' resource_name = 'l3-router' # capabilities allow_create = False allow_retrieve = True allow_commit = False allow_delete = False allow_list = True # NOTE: No query parameter is supported ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/security_group.py0000664000175000017500000000416200000000000023676 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack.network.v2 import _base from openstack import resource class SecurityGroup(_base.NetworkResource, tag.TagMixin): resource_key = 'security_group' resources_key = 'security_groups' base_path = '/security-groups' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'description', 'fields', 'id', 'name', 'stateful', 'project_id', 'tenant_id', 'revision_number', 'sort_dir', 'sort_key', **tag.TagMixin._tag_query_parameters ) # Properties #: Timestamp when the security group was created. created_at = resource.Body('created_at') #: The security group description. description = resource.Body('description') #: The security group name. name = resource.Body('name') #: Whether the security group is stateful or not. stateful = resource.Body('stateful') #: The ID of the project this security group is associated with. project_id = resource.Body('project_id') #: A list of #: :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule` #: objects. *Type: list* security_group_rules = resource.Body('security_group_rules', type=list) #: The ID of the project this security group is associated with. tenant_id = resource.Body('tenant_id', deprecated=True) #: Timestamp when the security group was last updated. updated_at = resource.Body('updated_at') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/security_group_rule.py0000664000175000017500000001144500000000000024727 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack.network.v2 import _base from openstack import resource class SecurityGroupRule(_base.NetworkResource, tag.TagMixin): resource_key = 'security_group_rule' resources_key = 'security_group_rules' base_path = '/security-group-rules' # capabilities allow_create = True allow_fetch = True allow_commit = False allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'description', 'direction', 'id', 'protocol', 'remote_group_id', 'security_group_id', 'remote_address_group_id', 'port_range_max', 'port_range_min', 'remote_ip_prefix', 'revision_number', 'project_id', 'tenant_id', 'sort_dir', 'sort_key', ether_type='ethertype', **tag.TagMixin._tag_query_parameters ) # Properties #: Timestamp when the security group rule was created. created_at = resource.Body('created_at') #: The security group rule description. description = resource.Body('description') #: ``ingress`` or ``egress``: The direction in which the security group #: rule is applied. For a compute instance, an ingress security group #: rule is applied to incoming ingress traffic for that instance. #: An egress rule is applied to traffic leaving the instance. direction = resource.Body('direction') #: Must be IPv4 or IPv6, and addresses represented in CIDR must match #: the ingress or egress rules. ether_type = resource.Body('ethertype') #: The maximum port number in the range that is matched by the #: security group rule. The port_range_min attribute constrains #: the port_range_max attribute. If the protocol is ICMP, this #: value must be an ICMP type. port_range_max = resource.Body('port_range_max', type=int) #: The minimum port number in the range that is matched by the #: security group rule. If the protocol is TCP or UDP, this value #: must be less than or equal to the value of the port_range_max #: attribute. If the protocol is ICMP, this value must be an ICMP type. port_range_min = resource.Body('port_range_min', type=int) #: The ID of the project this security group rule is associated with. project_id = resource.Body('project_id') #: The protocol that is matched by the security group rule. #: Valid values are ``null``, ``tcp``, ``udp``, and ``icmp``. protocol = resource.Body('protocol') #: The remote security group ID to be associated with this security #: group rule. You can specify either ``remote_group_id`` or #: ``remote_address_group_id`` or ``remote_ip_prefix``. remote_group_id = resource.Body('remote_group_id') #: The remote address group ID to be associated with this security #: group rule. You can specify either ``remote_group_id`` or #: ``remote_address_group_id`` or ``remote_ip_prefix``. remote_address_group_id = resource.Body('remote_address_group_id') #: The remote IP prefix to be associated with this security group rule. #: You can specify either ``remote_group_id`` or #: ``remote_address_group_id`` or ``remote_ip_prefix``. #: This attribute matches the specified IP prefix as the source or #: destination IP address of the IP packet depending on direction. remote_ip_prefix = resource.Body('remote_ip_prefix') #: The security group ID to associate with this security group rule. security_group_id = resource.Body('security_group_id') #: The ID of the project this security group rule is associated with. tenant_id = resource.Body('tenant_id', deprecated=True) #: Timestamp when the security group rule was last updated. updated_at = resource.Body('updated_at') def _prepare_request(self, *args, **kwargs): _request = super()._prepare_request(*args, **kwargs) # Old versions of Neutron do not handle being passed a # remote_address_group_id and raise and error. Remove it from # the body if it is blank. if not self.remote_address_group_id: if 'security_group_rule' in _request.body: _rule = _request.body['security_group_rule'] _rule.pop('remote_address_group_id', None) return _request ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/segment.py0000664000175000017500000000367200000000000022262 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Segment(resource.Resource): resource_key = 'segment' resources_key = 'segments' base_path = '/segments' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'description', 'name', 'network_id', 'network_type', 'physical_network', 'segmentation_id', 'sort_key', 'sort_dir', ) # Properties #: The segment description. description = resource.Body('description') #: The segment name. name = resource.Body('name') #: The ID of the network associated with this segment. network_id = resource.Body('network_id') #: The type of network associated with this segment, such as #: ``flat``, ``geneve``, ``gre``, ``local``, ``vlan`` or ``vxlan``. network_type = resource.Body('network_type') #: The name of the physical network associated with this segment. physical_network = resource.Body('physical_network') #: The segmentation ID for this segment. The network type #: defines the segmentation model, VLAN ID for ``vlan`` network type #: and tunnel ID for ``geneve``, ``gre`` and ``vxlan`` network types. #: *Type: int* segmentation_id = resource.Body('segmentation_id', type=int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/service_profile.py0000664000175000017500000000313400000000000023771 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ServiceProfile(resource.Resource): resource_key = 'service_profile' resources_key = 'service_profiles' base_path = '/service_profiles' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'description', 'driver', 'project_id', is_enabled='enabled', ) # Properties #: Description of the service flavor profile. description = resource.Body('description') #: Provider driver for the service flavor profile driver = resource.Body('driver') #: Sets enabled flag is_enabled = resource.Body('enabled', type=bool) #: Metainformation of the service flavor profile meta_info = resource.Body('metainfo') #: The owner project ID project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/service_provider.py0000664000175000017500000000241400000000000024163 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ServiceProvider(resource.Resource): resources_key = 'service_providers' base_path = '/service-providers' _allow_unknown_attrs_in_body = True # Capabilities allow_create = False allow_fetch = False allow_commit = False allow_delete = False allow_list = True _query_mapping = resource.QueryParameters( 'service_type', 'name', is_default='default', ) # Properties #: Service type (FIREWALL, FLAVORS, METERING, QOS, etc..) service_type = resource.Body('service_type') #: Name of the service type name = resource.Body('name') #: The default value of service type is_default = resource.Body('default', type=bool) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/sfc_flow_classifier.py0000664000175000017500000000705700000000000024627 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class SfcFlowClassifier(resource.Resource): resource_key = 'flow_classifier' resources_key = 'flow_classifiers' base_path = '/sfc/flow_classifiers' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'description', 'name', 'project_id', 'tenant_id', 'ethertype', 'protocol', 'source_port_range_min', 'source_port_range_max', 'destination_port_range_min', 'destination_port_range_max', 'logical_source_port', 'logical_destination_port', ) # Properties #: Human-readable description for the resource. description = resource.Body('description') #: Human-readable name of the resource. Default is an empty string. name = resource.Body('name') #: Must be IPv4 or IPv6, and addresses represented in CIDR must match # the ingress or egress rules. ethertype = resource.Body('ethertype') #: The IP protocol can be represented by a string, an integer, or null. #: Valid values: any (0), ah (51), dccp (33), egp (8), esp (50), gre (47), #: icmp (1), icmpv6 (58), igmp (2), ipip (4), ipv6-encap (41), #: ipv6-frag (44), ipv6-icmp (58), ipv6-nonxt (59), ipv6-opts (60), #: ipv6-route (43), ospf (89), pgm (113), rsvp (46), sctp (132), tcp (6), #: udp (17), udplite (136), vrrp (112). protocol = resource.Body('protocol') #: Minimum source protocol port. source_port_range_min = resource.Body('source_port_range_min', type=int) #: Maximum source protocol port. source_port_range_max = resource.Body('source_port_range_max', type=int) #: Minimum destination protocol port. destination_port_range_min = resource.Body( 'destination_port_range_min', type=int ) #: Maximum destination protocol port. destination_port_range_max = resource.Body( 'destination_port_range_max', type=int ) #: The source IP prefix. source_ip_prefix = resource.Body('source_ip_prefix') #: The destination IP prefix. destination_ip_prefix = resource.Body('destination_ip_prefix') #: The UUID of the source logical port. logical_source_port = resource.Body('logical_source_port') #: The UUID of the destination logical port. logical_destination_port = resource.Body('logical_destination_port') #: A dictionary of L7 parameters, in the form of #: logical_source_network: uuid, logical_destination_network: uuid. l7_parameters = resource.Body('l7_parameters', type=dict) #: Summary field of a Flow Classifier, composed of the #: protocol, source protcol port, destination ptocolo port, #: logical_source_port, logical_destination_port and #: l7_parameters summary = resource.Computed('summary', default='') project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/sfc_port_chain.py0000664000175000017500000000336300000000000023576 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class SfcPortChain(resource.Resource): resource_key = 'port_chain' resources_key = 'port_chains' base_path = '/sfc/port_chains' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'description', 'name', 'project_id', 'tenant_id', ) # Properties #: Human-readable description for the resource. description = resource.Body('description') #: Human-readable name of the resource. Default is an empty string. name = resource.Body('name') #: List of port-pair-group UUIDs. port_pair_groups = resource.Body('port_pair_groups', type=list) #: List of flow-classifier UUIDs. flow_classifiers = resource.Body('flow_classifiers', type=list) #: A dictionary of chain parameters, correlation values can be #: mpls and nsh, symmetric can be True or False. chain_parameters = resource.Body('chain_parameters', type=dict) project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/sfc_port_pair.py0000664000175000017500000000342000000000000023441 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class SfcPortPair(resource.Resource): resource_key = 'port_pair' resources_key = 'port_pairs' base_path = '/sfc/port_pairs' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'description', 'name', 'egress', 'ingress', 'project_id', 'tenant_id', ) # Properties #: Human-readable description for the resource. description = resource.Body('description') #: Human-readable name of the resource. Default is an empty string. name = resource.Body('name') #: The UUID of the ingress Neutron port. ingress = resource.Body('ingress') #: The UUID of the egress Neutron port. egress = resource.Body('egress') #: A dictionary of service function parameters, correlation values can be #: mpls and nsh, weight which can be an int. service_function_parameters = resource.Body( 'service_function_parameters', type=dict ) project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/sfc_port_pair_group.py0000664000175000017500000000416100000000000024660 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class SfcPortPairGroup(resource.Resource): resource_key = 'port_pair_group' resources_key = 'port_pair_groups' base_path = '/sfc/port_pair_groups' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'description', 'name', 'project_id', 'tenant_id', ) # Properties #: Human-readable description for the resource. description = resource.Body('description') #: Human-readable name of the resource. Default is an empty string. name = resource.Body('name') #: List of port-pair UUIDs. port_pairs = resource.Body('port_pairs', type=list) #: Dictionary of port pair group parameters, in the form of #: lb_fields: list of regex (eth|ip|tcp|udp)_(src|dst)), #: ppg_n_tuple_mapping: ingress_n_tuple or egress_n_tuple. #: The ingress or egress tuple is a dict with the following keys: #: source_ip_prefix, destination_ip_prefix, source_port_range_min, #: source_port_range_max, destination_port_range_min, #: destination_port_range_max. port_pair_group_parameters = resource.Body( 'port_pair_group_parameters', type=dict ) #: True if passive Tap service functions support is enabled, #: default is False. is_tap_enabled = resource.Body('tap_enabled', type=bool) project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/sfc_service_graph.py0000664000175000017500000000302400000000000024263 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class SfcServiceGraph(resource.Resource): resource_key = 'service_graph' resources_key = 'service_graphs' base_path = '/sfc/service_graphs' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'description', 'name', 'project_id', 'tenant_id', ) # Properties #: Human-readable description for the resource. description = resource.Body('description') #: Human-readable name of the resource. Default is an empty string. name = resource.Body('name') #: A dictionary where the key is the source port chain and the #: value is a list of destination port chains. port_chains = resource.Body('port_chains') project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/subnet.py0000664000175000017500000000757400000000000022125 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack.network.v2 import _base from openstack import resource class Subnet(_base.NetworkResource, tag.TagMixin): resource_key = 'subnet' resources_key = 'subnets' base_path = '/subnets' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # NOTE: Query on list or datetime fields are currently not supported. _query_mapping = resource.QueryParameters( 'cidr', 'description', 'gateway_ip', 'ip_version', 'ipv6_address_mode', 'ipv6_ra_mode', 'name', 'network_id', 'segment_id', 'dns_publish_fixed_ip', 'project_id', 'sort_key', 'sort_dir', is_dhcp_enabled='enable_dhcp', subnet_pool_id='subnetpool_id', use_default_subnet_pool='use_default_subnetpool', **tag.TagMixin._tag_query_parameters ) # Properties #: List of allocation pools each of which has a start and an end address #: for this subnet allocation_pools = resource.Body('allocation_pools', type=list) #: The CIDR. cidr = resource.Body('cidr') #: Timestamp when the subnet was created. created_at = resource.Body('created_at') #: The subnet description. description = resource.Body('description') #: A list of DNS nameservers. dns_nameservers = resource.Body('dns_nameservers', type=list) #: Whether to publish DNS records for fixed IPs dns_publish_fixed_ip = resource.Body('dns_publish_fixed_ip', type=bool) #: The gateway IP address. gateway_ip = resource.Body('gateway_ip') #: A list of host routes. host_routes = resource.Body('host_routes', type=list) #: The IP version, which is 4 or 6. #: *Type: int* ip_version = resource.Body('ip_version', type=int) #: The IPv6 address modes which are 'dhcpv6-stateful', 'dhcpv6-stateless' #: or 'slaac'. ipv6_address_mode = resource.Body('ipv6_address_mode') #: The IPv6 router advertisements modes which can be 'slaac', #: 'dhcpv6-stateful', 'dhcpv6-stateless'. ipv6_ra_mode = resource.Body('ipv6_ra_mode') #: Set to ``True`` if DHCP is enabled and ``False`` if DHCP is disabled. #: *Type: bool* is_dhcp_enabled = resource.Body('enable_dhcp', type=bool) #: The subnet name. name = resource.Body('name') #: The ID of the attached network. network_id = resource.Body('network_id') #: The prefix length to use for subnet allocation from a subnet pool prefix_length = resource.Body('prefixlen') #: The ID of the project this subnet is associated with. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The ID of the segment this subnet is associated with. segment_id = resource.Body('segment_id') #: Service types for this subnet service_types = resource.Body('service_types', type=list) #: The subnet pool ID from which to obtain a CIDR. subnet_pool_id = resource.Body('subnetpool_id') #: Timestamp when the subnet was last updated. updated_at = resource.Body('updated_at') #: Whether to use the default subnet pool to obtain a CIDR. use_default_subnet_pool = resource.Body( 'use_default_subnetpool', type=bool ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/subnet_pool.py0000664000175000017500000000707700000000000023154 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack import resource class SubnetPool(resource.Resource, tag.TagMixin): resource_key = 'subnetpool' resources_key = 'subnetpools' base_path = '/subnetpools' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'address_scope_id', 'description', 'ip_version', 'is_default', 'name', 'project_id', 'sort_key', 'sort_dir', is_shared='shared', **tag.TagMixin._tag_query_parameters ) # Properties #: The ID of the address scope associated with the subnet pool. address_scope_id = resource.Body('address_scope_id') #: Timestamp when the subnet pool was created. created_at = resource.Body('created_at') #: The length of the prefix to allocate when the cidr or prefixlen #: attributes are omitted when creating a subnet. *Type: int* default_prefix_length = resource.Body('default_prefixlen', type=int) #: A per-project quota on the prefix space that can be allocated #: from the subnet pool for project subnets. For IPv4 subnet pools, #: default_quota is measured in units of /32. For IPv6 subnet pools, #: default_quota is measured units of /64. All projects that use the #: subnet pool have the same prefix quota applied. *Type: int* default_quota = resource.Body('default_quota', type=int) #: The subnet pool description. description = resource.Body('description') #: Read-only. The IP address family of the list of prefixes. #: *Type: int* ip_version = resource.Body('ip_version', type=int) #: Whether or not this is the default subnet pool. #: *Type: bool* is_default = resource.Body('is_default', type=bool) #: Indicates whether this subnet pool is shared across all projects. #: *Type: bool* is_shared = resource.Body('shared', type=bool) #: The maximum prefix length that can be allocated from the #: subnet pool. *Type: int* maximum_prefix_length = resource.Body('max_prefixlen', type=int) #: The minimum prefix length that can be allocated from the #: subnet pool. *Type: int* minimum_prefix_length = resource.Body('min_prefixlen', type=int) #: The subnet pool name. name = resource.Body('name') #: The ID of the project that owns the subnet pool. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: A list of subnet prefixes that are assigned to the subnet pool. #: The adjacent prefixes are merged and treated as a single prefix. #: *Type: list* prefixes = resource.Body('prefixes', type=list) #: Revision number of the subnet pool. *Type: int* revision_number = resource.Body('revision_number', type=int) #: Timestamp when the subnet pool was last updated. updated_at = resource.Body('updated_at') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/tap_flow.py0000664000175000017500000000350500000000000022426 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class TapFlow(resource.Resource): """Tap Flow""" resource_key = 'tap_flow' resources_key = 'tap_flows' base_path = '/taas/tap_flows' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _allow_unknown_attrs_in_body = True _query_mapping = resource.QueryParameters( "sort_key", "sort_dir", 'name', 'project_id', ) # Properties #: The ID of the tap flow. id = resource.Body('id') #: The tap flow's name. name = resource.Body('name') #: The tap flow's description. description = resource.Body('description') #: The ID of the project that owns the tap flow. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The id of the tap_service with which the tap flow is associated tap_service_id = resource.Body('tap_service_id') #: The direction of the tap flow. direction = resource.Body('direction') #: The status for the tap flow. status = resource.Body('status') #: The id of the port the tap flow is associated with source_port = resource.Body('source_port') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/tap_mirror.py0000664000175000017500000000347500000000000022777 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class TapMirror(resource.Resource): """Tap Mirror""" resource_key = 'tap_mirror' resources_key = 'tap_mirrors' base_path = '/taas/tap_mirrors' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _allow_unknown_attrs_in_body = True _query_mapping = resource.QueryParameters( "sort_key", "sort_dir", 'name', 'project_id' ) # Properties #: The ID of the Tap Mirror. id = resource.Body('id') #: The Tap Mirror name. name = resource.Body('name') #: The Tap Mirror description. description = resource.Body('description') #: The ID of the project that owns the Tap Mirror. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The id of the port the Tap Mirror is associated with port_id = resource.Body('port_id') #: The status for the tap service. directions = resource.Body('directions') #: The destination IP address of the Tap Mirror remote_ip = resource.Body('remote_ip') #: The type of the Tap Mirror, it can be gre or erspanv1 mirror_type = resource.Body('mirror_type') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/tap_service.py0000664000175000017500000000316400000000000023120 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class TapService(resource.Resource): """Tap Service""" resource_key = 'tap_service' resources_key = 'tap_services' base_path = '/taas/tap_services' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _allow_unknown_attrs_in_body = True _query_mapping = resource.QueryParameters( "sort_key", "sort_dir", 'name', 'project_id' ) # Properties #: The ID of the tap service. id = resource.Body('id') #: The tap service name. name = resource.Body('name') #: The tap service description. description = resource.Body('description') #: The ID of the project that owns the tap service. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The id of the port the tap service is associated with port_id = resource.Body('port_id') #: The status for the tap service. status = resource.Body('status') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/trunk.py0000664000175000017500000000571000000000000021756 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack import exceptions from openstack import resource from openstack import utils class Trunk(resource.Resource, tag.TagMixin): resource_key = 'trunk' resources_key = 'trunks' base_path = '/trunks' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'name', 'description', 'port_id', 'status', 'sub_ports', 'project_id', is_admin_state_up='admin_state_up', **tag.TagMixin._tag_query_parameters ) # Properties #: Trunk name. name = resource.Body('name') #: The ID of the project who owns the trunk. Only administrative #: users can specify a project ID other than their own. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The trunk description. description = resource.Body('description') #: The administrative state of the port, which is up ``True`` or #: down ``False``. *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: The ID of the trunk's parent port port_id = resource.Body('port_id') #: The status for the trunk. Possible values are ACTIVE, DOWN, BUILD, #: DEGRADED, and ERROR. status = resource.Body('status') #: A list of ports associated with the trunk. sub_ports = resource.Body('sub_ports', type=list) def add_subports(self, session, subports): url = utils.urljoin('/trunks', self.id, 'add_subports') resp = session.put(url, json={'sub_ports': subports}) exceptions.raise_from_response(resp) self._body.attributes.update(resp.json()) return self def delete_subports(self, session, subports): url = utils.urljoin('/trunks', self.id, 'remove_subports') resp = session.put(url, json={'sub_ports': subports}) exceptions.raise_from_response(resp) self._body.attributes.update(resp.json()) return self def get_subports(self, session): url = utils.urljoin('/trunks', self.id, 'get_subports') resp = session.get(url) exceptions.raise_from_response(resp) self._body.attributes.update(resp.json()) return resp.json() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/vpn_endpoint_group.py0000664000175000017500000000344400000000000024534 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class VpnEndpointGroup(resource.Resource): resource_key = 'endpoint_group' resources_key = 'endpoint_groups' base_path = '/vpn/endpoint-groups' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'description', 'name', 'project_id', 'tenant_id', type='endpoint_type', ) # Properties #: Human-readable description for the resource. description = resource.Body('description') #: List of endpoints of the same type, for the endpoint group. #: The values will depend on type. endpoints = resource.Body('endpoints', type=list) #: Human-readable name of the resource. Default is an empty string. name = resource.Body('name') project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The type of the endpoints in the group. A valid value is subnet, cidr, #: network, router, or vlan. Only subnet and cidr are supported at this #: moment. type = resource.Body('type') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/vpn_ike_policy.py0000664000175000017500000000577100000000000023634 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class VpnIkePolicy(resource.Resource): """VPN IKE policy extension.""" resource_key = 'ikepolicy' resources_key = 'ikepolicies' base_path = '/vpn/ikepolicies' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'auth_algorithm', 'description', 'encryption_algorithm', 'ike_version', 'name', 'pfs', 'project_id', 'phase1_negotiation_mode', ) # Properties #: The authentication hash algorithm. Valid values are sha1, # sha256, sha384, sha512. The default is sha1. auth_algorithm = resource.Body('auth_algorithm') #: A human-readable description for the resource. # Default is an empty string. description = resource.Body('description') #: The encryption algorithm. A valid value is 3des, aes-128, # aes-192, aes-256, and so on. Default is aes-128. encryption_algorithm = resource.Body('encryption_algorithm') #: The IKE version. A valid value is v1 or v2. Default is v1. ike_version = resource.Body('ike_version') #: The lifetime of the security association. The lifetime consists # of a unit and integer value. You can omit either the unit or value # portion of the lifetime. Default unit is seconds and # default value is 3600. lifetime = resource.Body('lifetime', type=dict) #: Human-readable name of the resource. Default is an empty string. name = resource.Body('name') #: Perfect forward secrecy (PFS). A valid value is Group2, # Group5, Group14, and so on. Default is Group5. pfs = resource.Body('pfs') #: The ID of the project. project_id = resource.Body('project_id') #: The IKE mode. A valid value is main, which is the default. phase1_negotiation_mode = resource.Body('phase1_negotiation_mode') #: The units for the lifetime of the security association. # The lifetime consists of a unit and integer value. # You can omit either the unit or value portion of the lifetime. # Default unit is seconds and default value is 3600. units = resource.Body('units') #: The lifetime value, as a positive integer. The lifetime # consists of a unit and integer value. # You can omit either the unit or value portion of the lifetime. # Default unit is seconds and default value is 3600. value = resource.Body('value', type=int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/vpn_ipsec_policy.py0000664000175000017500000000622300000000000024160 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class VpnIpsecPolicy(resource.Resource): resource_key = 'ipsecpolicy' resources_key = 'ipsecpolicies' base_path = '/vpn/ipsecpolicies' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'auth_algorithm', 'description', 'encapsulation_mode', 'encryption_algorithm', 'name', 'pfs', 'project_id', 'phase1_negotiation_mode', 'transform_protocol', ) # Properties #: The authentication hash algorithm. Valid values are sha1, # sha256, sha384, sha512. The default is sha1. auth_algorithm = resource.Body('auth_algorithm') #: A human-readable description for the resource. # Default is an empty string. description = resource.Body('description') #: The encapsulation mode. A valid value is tunnel or transport encapsulation_mode = resource.Body('encapsulation_mode') #: The encryption algorithm. A valid value is 3des, aes-128, # aes-192, aes-256, and so on. Default is aes-128. encryption_algorithm = resource.Body('encryption_algorithm') #: The lifetime of the security association. The lifetime consists # of a unit and integer value. You can omit either the unit or value # portion of the lifetime. Default unit is seconds and # default value is 3600. lifetime = resource.Body('lifetime', type=dict) #: Human-readable name of the resource. Default is an empty string. name = resource.Body('name') #: Perfect forward secrecy (PFS). A valid value is Group2, # Group5, Group14, and so on. Default is Group5. pfs = resource.Body('pfs') #: The ID of the project. project_id = resource.Body('project_id') #: The IKE mode. A valid value is main, which is the default. phase1_negotiation_mode = resource.Body('phase1_negotiation_mode') #: The transform protocol. A valid value is ESP, AH, or AH- ESP. transform_protocol = resource.Body('transform_protocol') #: The units for the lifetime of the security association. # The lifetime consists of a unit and integer value. # You can omit either the unit or value portion of the lifetime. # Default unit is seconds and default value is 3600. units = resource.Body('units') #: The lifetime value, as a positive integer. The lifetime # consists of a unit and integer value. # You can omit either the unit or value portion of the lifetime. # Default unit is seconds and default value is 3600. value = resource.Body('value', type=int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/vpn_ipsec_site_connection.py0000664000175000017500000001223500000000000026044 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class VpnIPSecSiteConnection(resource.Resource): resource_key = 'ipsec_site_connection' resources_key = 'ipsec_site_connections' base_path = '/vpn/ipsec-site-connections' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'auth_mode', 'description', 'ikepolicy_id', 'ipsecpolicy_id', 'initiator', 'local_ep_group_id', 'peer_address', 'local_id', 'mtu', 'name', 'peer_id', 'project_id', 'psk', 'peer_ep_group_id', 'route_mode', 'vpnservice_id', 'status', is_admin_state_up='admin_state_up', ) # Properties #: The dead peer detection (DPD) action. # A valid value is clear, hold, restart, # disabled, or restart-by-peer. Default value is hold. action = resource.Body('action') #: The authentication mode. A valid value # is psk, which is the default. auth_mode = resource.Body('auth_mode') #: A human-readable description for the resource. # Default is an empty string. description = resource.Body('description') #: A dictionary with dead peer detection (DPD) protocol controls. dpd = resource.Body('dpd', type=dict) #: The administrative state of the resource, # which is up (true) or down (false). is_admin_state_up = resource.Body('admin_state_up', type=bool) #: The ID of the IKE policy. ikepolicy_id = resource.Body('ikepolicy_id') #: Indicates whether this VPN can only respond # to connections or both respond # to and initiate connections. A valid value is # response- only or bi-directional. Default is bi-directional. initiator = resource.Body('initiator') #: The ID of the IPsec policy. ipsecpolicy_id = resource.Body('ipsecpolicy_id') #: The dead peer detection (DPD) interval, in seconds. # A valid value is a positive integer. Default is 30. interval = resource.Body('interval', type=int) #: The ID for the endpoint group that contains # private subnets for the local side of the connection. # Yo must specify this parameter with the # peer_ep_group_id parameter unless in backward- compatible # mode where peer_cidrs is provided with # a subnet_id for the VPN service. local_ep_group_id = resource.Body('local_ep_group_id') #: The peer gateway public IPv4 or IPv6 address or FQDN. peer_address = resource.Body('peer_address') #: An ID to be used instead of the external IP address for # a virtual router used in traffic between # instances on different networks in east-west traffic. # Most often, local ID would be domain # name, email address, etc. If this is not configured # then the external IP address will be used as the ID. local_id = resource.Body('local_id') #: The maximum transmission unit (MTU) # value to address fragmentation. Minimum value # is 68 for IPv4, and 1280 for IPv6. mtu = resource.Body('mtu', type=int) #: Human-readable name of the resource. Default is an empty string. name = resource.Body('name') #: The peer router identity for authentication. # A valid value is an IPv4 address, IPv6 address, e-mail address, # key ID, or FQDN. Typically, this value matches # the peer_address value. peer_id = resource.Body('peer_id') #: (Deprecated) Unique list of valid peer private # CIDRs in the form < net_address > / < prefix > . peer_cidrs = resource.Body('peer_cidrs', type=list) #: The ID of the project. project_id = resource.Body('tenant_id') #: The pre-shared key. A valid value is any string. psk = resource.Body('psk') #: The ID for the endpoint group that contains # private CIDRs in the form < net_address > / < prefix > # for the peer side of the connection. You must # specify this parameter with the local_ep_group_id # parameter unless in backward-compatible mode # where peer_cidrs is provided with a subnet_id for the VPN service. peer_ep_group_id = resource.Body('peer_ep_group_id') #: The route mode. A valid value is static, which is the default. route_mode = resource.Body('route_mode') #: The site connection status status = resource.Body('status') #: The dead peer detection (DPD) timeout # in seconds. A valid value is a # positive integer that is greater # than the DPD interval value. Default is 120. timeout = resource.Body('timeout', type=int) #: The ID of the VPN service. vpnservice_id = resource.Body('vpnservice_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/v2/vpn_service.py0000664000175000017500000000435400000000000023141 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class VpnService(resource.Resource): resource_key = 'vpnservice' resources_key = 'vpnservices' base_path = '/vpn/vpnservices' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'description', 'external_v4_ip', 'external_v6_ip', 'name', 'router_id', 'project_id', 'tenant_id', 'subnet_id', is_admin_state_up='admin_state_up', ) # Properties #: Human-readable description for the vpnservice. description = resource.Body('description') #: The external IPv4 address that is used for the VPN service. external_v4_ip = resource.Body('external_v4_ip') #: The external IPv6 address that is used for the VPN service. external_v6_ip = resource.Body('external_v6_ip') #: The administrative state of the vpnservice, which is up ``True`` or #: down ``False``. *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: The vpnservice name. name = resource.Body('name') #: ID of the router into which the VPN service is inserted. router_id = resource.Body('router_id') #: The ID of the project this vpnservice is associated with. project_id = resource.Body('project_id', alias='tenant_id') #: Tenant_id (deprecated attribute). tenant_id = resource.Body('tenant_id', deprecated=True) #: The vpnservice status. status = resource.Body('status') #: The ID of the subnet on which the tenant wants the vpnservice. subnet_id = resource.Body('subnet_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/network/version.py0000664000175000017500000000147100000000000021751 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Version(resource.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' # capabilities allow_list = True # Properties links = resource.Body('links') status = resource.Body('status') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3133461 openstacksdk-4.0.0/openstack/object_store/0000775000175000017500000000000000000000000020700 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/object_store/__init__.py0000664000175000017500000000000000000000000022777 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/object_store/object_store_service.py0000664000175000017500000000143600000000000025460 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.object_store.v1 import _proxy from openstack import service_description class ObjectStoreService(service_description.ServiceDescription): """The object store service.""" supported_versions = { '1': _proxy.Proxy, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3133461 openstacksdk-4.0.0/openstack/object_store/v1/0000775000175000017500000000000000000000000021226 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/object_store/v1/__init__.py0000664000175000017500000000000000000000000023325 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/object_store/v1/_base.py0000664000175000017500000000743000000000000022655 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty from openstack import exceptions from openstack import resource class BaseResource(resource.Resource): commit_method = 'POST' create_method = 'PUT' #: Metadata stored for this resource. *Type: dict* metadata: ty.Dict[str, ty.Any] = {} _custom_metadata_prefix: str _system_metadata: ty.Dict[str, ty.Any] = {} _last_headers: ty.Dict[str, ty.Any] = {} def __init__(self, metadata=None, **attrs): """Process and save metadata known at creation stage""" super().__init__(**attrs) if metadata is not None: for k, v in metadata.items(): if not k.lower().startswith( self._custom_metadata_prefix.lower() ): self.metadata[self._custom_metadata_prefix + k] = v else: self.metadata[k] = v def _prepare_request(self, **kwargs): request = super()._prepare_request(**kwargs) request.headers.update(self._calculate_headers(self.metadata)) return request def _calculate_headers(self, metadata): headers = {} for key in metadata: if key in self._system_metadata.keys(): header = self._system_metadata[key] elif key in self._system_metadata.values(): header = key else: if key.startswith(self._custom_metadata_prefix): header = key else: header = self._custom_metadata_prefix + key headers[header] = metadata[key] return headers def set_metadata(self, session, metadata, refresh=True): request = self._prepare_request() response = session.post( request.url, headers=self._calculate_headers(metadata) ) self._translate_response(response, has_body=False) if refresh: response = session.head(request.url) self._translate_response(response, has_body=False) return self def delete_metadata(self, session, keys): request = self._prepare_request() headers = {key: '' for key in keys} response = session.post( request.url, headers=self._calculate_headers(headers) ) exceptions.raise_from_response( response, error_message="Error deleting metadata keys" ) return self def _set_metadata(self, headers): self.metadata = dict() for header in headers: # RADOS and other stuff in front may actually lowcase headers if header.lower().startswith(self._custom_metadata_prefix.lower()): key = header[len(self._custom_metadata_prefix) :].lower() self.metadata[key] = headers[header] def _translate_response(self, response, has_body=None, error_message=None): # Save headers of the last operation for potential use (get_object of # cloud layer). # This must happen before invoking parent _translate_response, cause it # pops known headers. self._last_headers = response.headers.copy() super()._translate_response( response, has_body=has_body, error_message=error_message ) self._set_metadata(response.headers) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/object_store/v1/_proxy.py0000664000175000017500000013272400000000000023131 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from calendar import timegm import collections from hashlib import sha1 import hmac import json import os import time import typing as ty from urllib import parse from openstack import _log from openstack.cloud import _utils from openstack import exceptions from openstack.object_store.v1 import account as _account from openstack.object_store.v1 import container as _container from openstack.object_store.v1 import info as _info from openstack.object_store.v1 import obj as _obj from openstack import proxy from openstack import utils DEFAULT_OBJECT_SEGMENT_SIZE = 1073741824 # 1GB DEFAULT_MAX_FILE_SIZE = (5 * 1024 * 1024 * 1024 + 2) / 2 EXPIRES_ISO8601_FORMAT = '%Y-%m-%dT%H:%M:%SZ' SHORT_EXPIRES_ISO8601_FORMAT = '%Y-%m-%d' def _get_expiration(expiration): return int(time.time() + expiration) class Proxy(proxy.Proxy): _resource_registry = { "account": _account.Account, "container": _container.Container, "info": _info.Info, "object": _obj.Object, } skip_discovery = True Account = _account.Account Container = _container.Container Object = _obj.Object log = _log.setup_logging('openstack') def _extract_name(self, url, service_type=None, project_id=None): url_path = parse.urlparse(url).path.strip() # Remove / from the beginning to keep the list indexes of interesting # things consistent if url_path.startswith('/'): url_path = url_path[1:] # Split url into parts and exclude potential project_id in some urls url_parts = [ x for x in url_path.split('/') if ( x != project_id and ( not project_id or (project_id and x != 'AUTH_' + project_id) ) ) ] # Strip leading version piece so that # GET /v1/AUTH_xxx # returns ['AUTH_xxx'] if ( url_parts[0] and url_parts[0][0] == 'v' and url_parts[0][1] and url_parts[0][1].isdigit() ): url_parts = url_parts[1:] # Strip out anything that's empty or None parts = [part for part in url_parts if part] # Getting the root of an endpoint is doing version discovery if not parts: return ['account'] if len(parts) == 1: if 'endpoints' in parts: return ['endpoints'] else: return ['container'] else: return ['object'] def get_account_metadata(self): """Get metadata for this account. :rtype: :class:`~openstack.object_store.v1.account.Account` """ return self._head(_account.Account) def set_account_metadata(self, **metadata): """Set metadata for this account. :param kwargs metadata: Key/value pairs to be set as metadata on the container. Custom metadata can be set. Custom metadata are keys and values defined by the user. """ account = self._get_resource(_account.Account, None) account.set_metadata(self, metadata) def delete_account_metadata(self, keys): """Delete metadata for this account. :param keys: The keys of metadata to be deleted. """ account = self._get_resource(_account.Account, None) account.delete_metadata(self, keys) def containers(self, **query): """Obtain Container objects for this account. :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :rtype: A generator of :class:`~openstack.object_store.v1.container.Container` objects. """ return self._list(_container.Container, paginated=True, **query) def create_container(self, name, **attrs): """Create a new container from attributes :param container: Name of the container to create. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.object_store.v1.container.Container`, comprised of the properties on the Container class. :returns: The results of container creation :rtype: :class:`~openstack.object_store.v1.container.Container` """ return self._create(_container.Container, name=name, **attrs) def delete_container(self, container, ignore_missing=True): """Delete a container :param container: The value can be either the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the container does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent server. :returns: ``None`` """ self._delete( _container.Container, container, ignore_missing=ignore_missing ) def get_container_metadata(self, container): """Get metadata for a container :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :returns: One :class:`~openstack.object_store.v1.container.Container` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._head(_container.Container, container) def set_container_metadata(self, container, refresh=True, **metadata): """Set metadata for a container. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param refresh: Flag to trigger refresh of container object re-fetch. :param kwargs metadata: Key/value pairs to be set as metadata on the container. Both custom and system metadata can be set. Custom metadata are keys and values defined by the user. System metadata are keys defined by the Object Store and values defined by the user. The system metadata keys are: - `content_type` - `is_content_type_detected` - `versions_location` - `read_ACL` - `write_ACL` - `sync_to` - `sync_key` """ res = self._get_resource(_container.Container, container) res.set_metadata(self, metadata, refresh=refresh) return res def delete_container_metadata(self, container, keys): """Delete metadata for a container. :param container: The value can be the ID of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param keys: The keys of metadata to be deleted. """ res = self._get_resource(_container.Container, container) res.delete_metadata(self, keys) return res def objects(self, container, **query): """Return a generator that yields the Container's objects. :param container: A container object or the name of a container that you want to retrieve objects from. :type container: :class:`~openstack.object_store.v1.container.Container` :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :rtype: A generator of :class:`~openstack.object_store.v1.obj.Object` objects. """ container = self._get_container_name(container=container) for obj in self._list( _obj.Object, container=container, paginated=True, format='json', **query, ): obj.container = container yield obj def _get_container_name(self, obj=None, container=None): if obj is not None: obj = self._get_resource(_obj.Object, obj) if obj.container is not None: return obj.container if container is not None: container = self._get_resource(_container.Container, container) return container.name raise ValueError("container must be specified") def get_object( self, obj, container=None, resp_chunk_size=1024, outfile=None, remember_content=False, ): """Get the data associated with an object :param obj: The value can be the name of an object or a :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param int resp_chunk_size: chunk size of data to read. Only used if the results are being written to a file or stream is True. (optional, defaults to 1k) :param outfile: Write the object to a file instead of returning the contents. If this option is given, body in the return tuple will be None. outfile can either be a file path given as a string, or a File like object. :param bool remember_content: Flag whether object data should be saved as `data` property of the Object. When left as `false` and `outfile` is not defined data will not be saved and need to be fetched separately. :returns: Instance of the :class:`~openstack.object_store.v1.obj.Object` objects. :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ container_name = self._get_container_name(obj=obj, container=container) _object = self._get_resource( _obj.Object, obj, container=container_name ) request = _object._prepare_request() get_stream = outfile is not None response = self.get( request.url, headers=request.headers, stream=get_stream ) exceptions.raise_from_response(response) _object._translate_response(response, has_body=False) if outfile: if isinstance(outfile, str): outfile_handle = open(outfile, 'wb') else: outfile_handle = outfile for chunk in response.iter_content( resp_chunk_size, decode_unicode=False ): outfile_handle.write(chunk) if isinstance(outfile, str): outfile_handle.close() else: outfile_handle.flush() elif remember_content: _object.data = response.text return _object def download_object(self, obj, container=None, **attrs): """Download the data contained inside an object. :param obj: The value can be the name of an object or a :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ container_name = self._get_container_name(obj=obj, container=container) obj = self._get_resource( _obj.Object, obj, container=container_name, **attrs ) return obj.download(self) def stream_object(self, obj, container=None, chunk_size=1024, **attrs): """Stream the data contained inside an object. :param obj: The value can be the name of an object or a :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :returns: An iterator that iterates over chunk_size bytes """ container_name = self._get_container_name(obj=obj, container=container) obj = self._get_resource( _obj.Object, obj, container=container_name, **attrs ) return obj.stream(self, chunk_size=chunk_size) def create_object( self, container, name, filename=None, md5=None, sha256=None, segment_size=None, use_slo=True, metadata=None, generate_checksums=None, data=None, **headers, ): """Create a file object. Automatically uses large-object segments if needed. :param container: The name of the container to store the file in. This container will be created if it does not exist already. :param name: Name for the object within the container. :param filename: The path to the local file whose contents will be uploaded. Mutually exclusive with data. :param data: The content to upload to the object. Mutually exclusive with filename. :param md5: A hexadecimal md5 of the file. (Optional), if it is known and can be passed here, it will save repeating the expensive md5 process. It is assumed to be accurate. :param sha256: A hexadecimal sha256 of the file. (Optional) See md5. :param segment_size: Break the uploaded object into segments of this many bytes. (Optional) SDK will attempt to discover the maximum value for this from the server if it is not specified, or will use a reasonable default. :param headers: These will be passed through to the object creation API as HTTP Headers. :param use_slo: If the object is large enough to need to be a Large Object, use a static rather than dynamic object. Static Objects will delete segment objects when the manifest object is deleted. (optional, defaults to True) :param generate_checksums: Whether to generate checksums on the client side that get added to headers for later prevention of double uploads of identical data. (optional, defaults to True) :param metadata: This dict will get changed into headers that set metadata of the object :raises: ``:class:`~openstack.exceptions.SDKException``` on operation error. """ if data is not None and filename: raise ValueError( "Both filename and data given. Please choose one." ) if data is not None and not name: raise ValueError("name is a required parameter when data is given") if data is not None and generate_checksums: raise ValueError( "checksums cannot be generated with data parameter" ) if generate_checksums is None: if data is not None: generate_checksums = False else: generate_checksums = True if not metadata: metadata = {} if not filename and data is None: filename = name if generate_checksums and (md5 is None or sha256 is None): (md5, sha256) = utils._get_file_hashes(filename) if md5: metadata[self._connection._OBJECT_MD5_KEY] = md5 if sha256: metadata[self._connection._OBJECT_SHA256_KEY] = sha256 container_name = self._get_container_name(container=container) endpoint = '{container}/{name}'.format( container=container_name, name=name ) if data is not None: self.log.debug( "swift uploading data to %(endpoint)s", {'endpoint': endpoint} ) return self._create( _obj.Object, container=container_name, name=name, data=data, metadata=metadata, **headers, ) # segment_size gets used as a step value in a range call, so needs # to be an int if segment_size: segment_size = int(segment_size) segment_size = self.get_object_segment_size(segment_size) file_size = os.path.getsize(filename) if self.is_object_stale(container_name, name, filename, md5, sha256): self._connection.log.debug( "swift uploading %(filename)s to %(endpoint)s", {'filename': filename, 'endpoint': endpoint}, ) if metadata is not None: # Rely on the class headers calculation for requested metadata meta_headers = _obj.Object()._calculate_headers(metadata) headers.update(meta_headers) if file_size <= segment_size: self._upload_object(endpoint, filename, headers) else: self._upload_large_object( endpoint, filename, headers, file_size, segment_size, use_slo, ) # Backwards compat upload_object = create_object def copy_object(self): """Copy an object.""" raise NotImplementedError def delete_object(self, obj, ignore_missing=True, container=None): """Delete an object :param obj: The value can be either the name of an object or a :class:`~openstack.object_store.v1.container.Container` instance. :param container: The value can be the ID of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the object does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent server. :returns: ``None`` """ container_name = self._get_container_name(obj, container) self._delete( _obj.Object, obj, ignore_missing=ignore_missing, container=container_name, ) def get_object_metadata(self, obj, container=None): """Get metadata for an object. :param obj: The value can be the name of an object or a :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the ID of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :returns: One :class:`~openstack.object_store.v1.obj.Object` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ container_name = self._get_container_name(obj, container) return self._head(_obj.Object, obj, container=container_name) def set_object_metadata(self, obj, container=None, **metadata): """Set metadata for an object. Note: This method will do an extra HEAD call. :param obj: The value can be the name of an object or a :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param kwargs metadata: Key/value pairs to be set as metadata on the container. Both custom and system metadata can be set. Custom metadata are keys and values defined by the user. System metadata are keys defined by the Object Store and values defined by the user. The system metadata keys are: - `content_type` - `content_encoding` - `content_disposition` - `delete_after` - `delete_at` - `is_content_type_detected` """ container_name = self._get_container_name(obj, container) res = self._get_resource(_obj.Object, obj, container=container_name) res.set_metadata(self, metadata) return res def delete_object_metadata(self, obj, container=None, keys=None): """Delete metadata for an object. :param obj: The value can be the name of an object or a :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the ID of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param keys: The keys of metadata to be deleted. """ container_name = self._get_container_name(obj, container) res = self._get_resource(_obj.Object, obj, container=container_name) res.delete_metadata(self, keys) return res def is_object_stale( self, container, name, filename, file_md5=None, file_sha256=None ): """Check to see if an object matches the hashes of a file. :param container: Name of the container. :param name: Name of the object. :param filename: Path to the file. :param file_md5: Pre-calculated md5 of the file contents. Defaults to None which means calculate locally. :param file_sha256: Pre-calculated sha256 of the file contents. Defaults to None which means calculate locally. """ try: metadata = self.get_object_metadata(name, container).metadata except exceptions.NotFoundException: self._connection.log.debug( "swift stale check, no object: {container}/{name}".format( container=container, name=name ) ) return True if not (file_md5 or file_sha256): (file_md5, file_sha256) = utils._get_file_hashes(filename) md5_key = metadata.get( self._connection._OBJECT_MD5_KEY, metadata.get(self._connection._SHADE_OBJECT_MD5_KEY, ''), ) sha256_key = metadata.get( self._connection._OBJECT_SHA256_KEY, metadata.get(self._connection._SHADE_OBJECT_SHA256_KEY, ''), ) up_to_date = utils._hashes_up_to_date( md5=file_md5, sha256=file_sha256, md5_key=md5_key, sha256_key=sha256_key, ) if not up_to_date: self._connection.log.debug( "swift checksum mismatch: " " %(filename)s!=%(container)s/%(name)s", {'filename': filename, 'container': container, 'name': name}, ) return True self._connection.log.debug( "swift object up to date: %(container)s/%(name)s", {'container': container, 'name': name}, ) return False def _upload_large_object( self, endpoint, filename, headers, file_size, segment_size, use_slo ): # If the object is big, we need to break it up into segments that # are no larger than segment_size, upload each of them individually # and then upload a manifest object. The segments can be uploaded in # parallel, so we'll use the async feature of the TaskManager. segment_futures = [] segment_results = [] retry_results = [] retry_futures = [] manifest = [] # Get an OrderedDict with keys being the swift location for the # segment, the value a FileSegment file-like object that is a # slice of the data for the segment. segments = self._get_file_segments( endpoint, filename, file_size, segment_size ) # Schedule the segments for upload for name, segment in segments.items(): # Async call to put - schedules execution and returns a future segment_future = self._connection._pool_executor.submit( self.put, name, headers=headers, data=segment, raise_exc=False ) segment_futures.append(segment_future) # TODO(mordred) Collect etags from results to add to this manifest # dict. Then sort the list of dicts by path. manifest.append( dict( # While Object Storage usually expects the name to be # urlencoded in most requests, the SLO manifest requires # plain object names instead. path=f'/{parse.unquote(name)}', size_bytes=segment.length, ) ) # Try once and collect failed results to retry segment_results, retry_results = self._connection._wait_for_futures( segment_futures, raise_on_error=False ) self._add_etag_to_manifest(segment_results, manifest) for result in retry_results: # Grab the FileSegment for the failed upload so we can retry name = self._object_name_from_url(result.url) segment = segments[name] segment.seek(0) # Async call to put - schedules execution and returns a future segment_future = self._connection._pool_executor.submit( self.put, name, headers=headers, data=segment ) # TODO(mordred) Collect etags from results to add to this manifest # dict. Then sort the list of dicts by path. retry_futures.append(segment_future) # If any segments fail the second time, just throw the error segment_results, retry_results = self._connection._wait_for_futures( retry_futures, raise_on_error=True ) self._add_etag_to_manifest(segment_results, manifest) try: if use_slo: return self._finish_large_object_slo( endpoint, headers, manifest ) else: return self._finish_large_object_dlo(endpoint, headers) except Exception: try: segment_prefix = endpoint.split('/')[-1] self.log.debug( "Failed to upload large object manifest for %s. " "Removing segment uploads.", segment_prefix, ) self._delete_autocreated_image_objects( segment_prefix=segment_prefix ) except Exception: self.log.exception( "Failed to cleanup image objects for %s:", segment_prefix ) raise def _finish_large_object_slo(self, endpoint, headers, manifest): # TODO(mordred) send an etag of the manifest, which is the md5sum # of the concatenation of the etags of the results headers = headers.copy() retries = 3 while True: try: return exceptions.raise_from_response( self.put( endpoint, params={'multipart-manifest': 'put'}, headers=headers, data=json.dumps(manifest), ) ) except Exception: retries -= 1 if retries == 0: raise def _finish_large_object_dlo(self, endpoint, headers): headers = headers.copy() headers['X-Object-Manifest'] = endpoint retries = 3 while True: try: return exceptions.raise_from_response( self.put(endpoint, headers=headers) ) except Exception: retries -= 1 if retries == 0: raise def _upload_object(self, endpoint, filename, headers): with open(filename, 'rb') as dt: return self.put(endpoint, headers=headers, data=dt) def _get_file_segments(self, endpoint, filename, file_size, segment_size): # Use an ordered dict here so that testing can replicate things segments = collections.OrderedDict() for index, offset in enumerate(range(0, file_size, segment_size)): remaining = file_size - (index * segment_size) segment = _utils.FileSegment( filename, offset, segment_size if segment_size < remaining else remaining, ) name = '{endpoint}/{index:0>6}'.format( endpoint=endpoint, index=index ) segments[name] = segment return segments def get_object_segment_size(self, segment_size): """Get a segment size that will work given capabilities""" if segment_size is None: segment_size = DEFAULT_OBJECT_SEGMENT_SIZE min_segment_size = 0 try: # caps = self.get_object_capabilities() caps = self.get_info() except ( exceptions.NotFoundException, exceptions.PreconditionFailedException, ): server_max_file_size = DEFAULT_MAX_FILE_SIZE self._connection.log.info( "Swift capabilities not supported. " "Using default max file size." ) except exceptions.SDKException: raise else: server_max_file_size = caps.swift.get('max_file_size', 0) min_segment_size = caps.slo.get('min_segment_size', 0) if segment_size > server_max_file_size: return server_max_file_size if segment_size < min_segment_size: return min_segment_size return segment_size def _object_name_from_url(self, url): '''Get container_name/object_name from the full URL called. Remove the Swift endpoint from the front of the URL, and remove the leaving / that will leave behind.''' endpoint = self.get_endpoint() object_name = url.replace(endpoint, '') if object_name.startswith('/'): object_name = object_name[1:] return object_name def _add_etag_to_manifest(self, segment_results, manifest): for result in segment_results: if 'Etag' not in result.headers: continue name = self._object_name_from_url(result.url) for entry in manifest: if entry['path'] == f'/{parse.unquote(name)}': entry['etag'] = result.headers['Etag'] def get_info(self): """Get infomation about the object-storage service The object-storage service publishes a set of capabilities that include metadata about maximum values and thresholds. """ return self._get(_info.Info) def set_account_temp_url_key(self, key, secondary=False): """Set the temporary URL key for the account. :param key: Text of the key to use. :param bool secondary: Whether this should set the secondary key. (defaults to False) """ account = self._get_resource(_account.Account, None) account.set_temp_url_key(self, key, secondary) def set_container_temp_url_key(self, container, key, secondary=False): """Set the temporary URL key for a container. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param key: Text of the key to use. :param bool secondary: Whether this should set the secondary key. (defaults to False) """ res = self._get_resource(_container.Container, container) res.set_temp_url_key(self, key, secondary) def get_temp_url_key(self, container=None): """Get the best temporary url key for a given container. Will first try to return Temp-URL-Key-2 then Temp-URL-Key for the container, and if neither exist, will attempt to return Temp-URL-Key-2 then Temp-URL-Key for the account. If neither exist, will return None. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. """ temp_url_key = None if container: container_meta = self.get_container_metadata(container) temp_url_key = ( container_meta.meta_temp_url_key_2 or container_meta.meta_temp_url_key ) if not temp_url_key: account_meta = self.get_account_metadata() temp_url_key = ( account_meta.meta_temp_url_key_2 or account_meta.meta_temp_url_key ) if temp_url_key and not isinstance(temp_url_key, bytes): temp_url_key = temp_url_key.encode('utf8') return temp_url_key def _check_temp_url_key(self, container=None, temp_url_key=None): if temp_url_key: if not isinstance(temp_url_key, bytes): temp_url_key = temp_url_key.encode('utf8') else: temp_url_key = self.get_temp_url_key(container) if not temp_url_key: raise exceptions.SDKException( 'temp_url_key was not given, nor was a temporary url key' ' found for the account or the container.' ) return temp_url_key def generate_form_signature( self, container, object_prefix, redirect_url, max_file_size, max_upload_count, timeout, temp_url_key=None, ): """Generate a signature for a FormPost upload. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param object_prefix: Prefix to apply to limit all object names created using this signature. :param redirect_url: The URL to redirect the browser to after the uploads have completed. :param max_file_size: The maximum file size per file uploaded. :param max_upload_count: The maximum number of uploaded files allowed. :param timeout: The number of seconds from now to allow the form post to begin. :param temp_url_key: The X-Account-Meta-Temp-URL-Key for the account. Optional, if omitted, the key will be fetched from the container or the account. """ max_file_size = int(max_file_size) if max_file_size < 1: raise exceptions.SDKException( 'Please use a positive max_file_size value.' ) max_upload_count = int(max_upload_count) if max_upload_count < 1: raise exceptions.SDKException( 'Please use a positive max_upload_count value.' ) if timeout < 1: raise exceptions.SDKException( 'Please use a positive value.' ) expires = _get_expiration(timeout) temp_url_key = self._check_temp_url_key( container=container, temp_url_key=temp_url_key ) res = self._get_resource(_container.Container, container) endpoint = parse.urlparse(self.get_endpoint()) path = '/'.join([endpoint.path, res.name, object_prefix]) data = '{}\n{}\n{}\n{}\n{}'.format( path, redirect_url, max_file_size, max_upload_count, expires, ) sig = hmac.new(temp_url_key, data.encode(), sha1).hexdigest() return (expires, sig) def generate_temp_url( self, path, seconds, method, absolute=False, prefix=False, iso8601=False, ip_range=None, temp_url_key=None, ): """Generates a temporary URL that gives unauthenticated access to the Swift object. :param path: The full path to the Swift object or prefix if a prefix-based temporary URL should be generated. Example: /v1/AUTH_account/c/o or /v1/AUTH_account/c/prefix. :param seconds: time in seconds or ISO 8601 timestamp. If absolute is False and this is the string representation of an integer, then this specifies the amount of time in seconds for which the temporary URL will be valid. If absolute is True then this specifies an absolute time at which the temporary URL will expire. :param method: A HTTP method, typically either GET or PUT, to allow for this temporary URL. :param absolute: if True then the seconds parameter is interpreted as a Unix timestamp, if seconds represents an integer. :param prefix: if True then a prefix-based temporary URL will be generated. :param iso8601: if True, a URL containing an ISO 8601 UTC timestamp instead of a UNIX timestamp will be created. :param ip_range: if a valid ip range, restricts the temporary URL to the range of ips. :param temp_url_key: The X-Account-Meta-Temp-URL-Key for the account. Optional, if omitted, the key will be fetched from the container or the account. :raises ValueError: if timestamp or path is not in valid format. :return: the path portion of a temporary URL """ try: try: timestamp = float(seconds) except ValueError: formats = ( EXPIRES_ISO8601_FORMAT, EXPIRES_ISO8601_FORMAT[:-1], SHORT_EXPIRES_ISO8601_FORMAT, ) for f in formats: try: t = time.strptime(seconds, f) except ValueError: continue if f == EXPIRES_ISO8601_FORMAT: timestamp = timegm(t) else: # Use local time if UTC designator is missing. timestamp = int(time.mktime(t)) absolute = True break else: raise ValueError() else: if not timestamp.is_integer(): raise ValueError() timestamp = int(timestamp) if timestamp < 0: raise ValueError() except ValueError: raise ValueError( 'time must either be a whole number ' 'or in specific ISO 8601 format.' ) if isinstance(path, bytes): try: path_for_body = path.decode('utf-8') except UnicodeDecodeError: raise ValueError('path must be representable as UTF-8') else: path_for_body = path parts = path_for_body.split('/', 4) if ( len(parts) != 5 or parts[0] or not all(parts[1 : (4 if prefix else 5)]) ): if prefix: raise ValueError('path must at least contain /v1/a/c/') else: raise ValueError( 'path must be full path to an object e.g. /v1/a/c/o' ) standard_methods = ['GET', 'PUT', 'HEAD', 'POST', 'DELETE'] if method.upper() not in standard_methods: self.log.warning( 'Non default HTTP method %s for tempurl ' 'specified, possibly an error', method.upper(), ) expiration: ty.Union[float, int] if not absolute: expiration = _get_expiration(timestamp) else: expiration = timestamp hmac_parts = [ method.upper(), str(expiration), ('prefix:' if prefix else '') + path_for_body, ] if ip_range: if isinstance(ip_range, bytes): try: ip_range = ip_range.decode('utf-8') except UnicodeDecodeError: raise ValueError('ip_range must be representable as UTF-8') hmac_parts.insert(0, "ip=%s" % ip_range) hmac_body = '\n'.join(hmac_parts) temp_url_key = self._check_temp_url_key(temp_url_key=temp_url_key) sig = hmac.new( temp_url_key, hmac_body.encode('utf-8'), sha1 ).hexdigest() if iso8601: exp = time.strftime( EXPIRES_ISO8601_FORMAT, time.gmtime(expiration) ) else: exp = str(expiration) temp_url = '{path}?temp_url_sig={sig}&temp_url_expires={exp}'.format( path=path_for_body, sig=sig, exp=exp, ) if ip_range: temp_url += f'&temp_url_ip_range={ip_range}' if prefix: temp_url += f'&temp_url_prefix={parts[4]}' # Have return type match path from caller if isinstance(path, bytes): return temp_url.encode('utf-8') else: return temp_url def _delete_autocreated_image_objects( self, container=None, segment_prefix=None ): """Delete all objects autocreated for image uploads. This method should generally not be needed, as shade should clean up the objects it uses for object-based image creation. If something goes wrong and it is found that there are leaked objects, this method can be used to delete any objects that shade has created on the user's behalf in service of image uploads. :param str container: Name of the container. Defaults to 'images'. :param str segment_prefix: Prefix for the image segment names to delete. If not given, all image upload segments present are deleted. :returns: True if deletion was succesful, else False. """ if container is None: container = self._connection._OBJECT_AUTOCREATE_CONTAINER # This method only makes sense on clouds that use tasks if not self._connection.image_api_use_tasks: return False deleted = False for obj in self.objects(container, prefix=segment_prefix): meta = self.get_object_metadata(obj).metadata if meta.get(self._connection._OBJECT_AUTOCREATE_KEY) == 'true': self.delete_object(obj, ignore_missing=True) deleted = True return deleted # ========== Project Cleanup ========== def _get_cleanup_dependencies(self): return {'object_store': {'before': []}} def _service_cleanup( self, dry_run=True, client_status_queue=None, identified_resources=None, filters=None, resource_evaluation_fn=None, skip_resources=None, ): if self.should_skip_resource_cleanup( "container", skip_resources ) or self.should_skip_resource_cleanup("object", skip_resources): return is_bulk_delete_supported = False bulk_delete_max_per_request = 1 try: caps = self.get_info() except exceptions.SDKException: pass else: bulk_delete = caps.get("bulk_delete") if bulk_delete is not None: is_bulk_delete_supported = True bulk_delete_max_per_request = bulk_delete.get( "max_deletes_per_request", 10000 ) elements = [] for cont in self.containers(): # Iterate over objects inside container objects_remaining = False for obj in self.objects(cont): need_delete = self._service_cleanup_del_res( self.delete_object, obj, dry_run=True, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) if need_delete: if dry_run: continue elif is_bulk_delete_supported: elements.append(f"{cont.name}/{obj.name}") if len(elements) >= bulk_delete_max_per_request: self._bulk_delete(elements) elements.clear() else: self.delete_object(obj, cont) else: objects_remaining = True if len(elements) > 0: self._bulk_delete(elements) elements.clear() # Eventually delete container itself if not objects_remaining: self._service_cleanup_del_res( self.delete_container, cont, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) def _bulk_delete(self, elements): data = "\n".join([parse.quote(x) for x in elements]) self.delete( "?bulk-delete", data=data, headers={ 'Content-Type': 'text/plain', 'Accept': 'application/json', }, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/object_store/v1/account.py0000664000175000017500000000434300000000000023240 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.object_store.v1 import _base from openstack import resource class Account(_base.BaseResource): _custom_metadata_prefix = "X-Account-Meta-" base_path = "/" allow_fetch = True allow_commit = True allow_head = True #: The total number of bytes that are stored in Object Storage for #: the account. account_bytes_used = resource.Header("x-account-bytes-used", type=int) #: The number of containers. account_container_count = resource.Header( "x-account-container-count", type=int ) #: The number of objects in the account. account_object_count = resource.Header("x-account-object-count", type=int) #: The secret key value for temporary URLs. If not set, #: this header is not returned by this operation. meta_temp_url_key = resource.Header("x-account-meta-temp-url-key") #: A second secret key value for temporary URLs. If not set, #: this header is not returned by this operation. meta_temp_url_key_2 = resource.Header("x-account-meta-temp-url-key-2") #: The timestamp of the transaction. timestamp = resource.Header("x-timestamp") has_body = False requires_id = False def set_temp_url_key(self, proxy, key, secondary=False): """Set the temporary url key for the account. :param proxy: The proxy to use for making this request. :type proxy: :class:`~openstack.proxy.Proxy` :param key: Text of the key to use. :param bool secondary: Whether this should set the secondary key. (defaults to False) """ header = 'Temp-URL-Key' if secondary: header += '-2' return self.set_metadata(proxy, {header: key}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/object_store/v1/container.py0000664000175000017500000001556100000000000023572 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.object_store.v1 import _base from openstack import resource class Container(_base.BaseResource): _custom_metadata_prefix = "X-Container-Meta-" _system_metadata = { "content_type": "content-type", "is_content_type_detected": "x-detect-content-type", "versions_location": "x-versions-location", "history_location": "x-history-location", "read_ACL": "x-container-read", "write_ACL": "x-container-write", "sync_to": "x-container-sync-to", "sync_key": "x-container-sync-key", } base_path = "/" pagination_key = 'X-Account-Container-Count' allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_head = True _query_mapping = resource.QueryParameters('prefix', 'format') # Container body data (when id=None) #: The name of the container. name = resource.Body("name", alternate_id=True, alias='id') #: The number of objects in the container. count = resource.Body("count", type=int, alias='object_count') #: The total number of bytes that are stored in Object Storage #: for the container. bytes = resource.Body("bytes", type=int, alias='bytes_used') # Container metadata (when id=name) #: The number of objects. object_count = resource.Header( "x-container-object-count", type=int, alias='count' ) #: The count of bytes used in total. bytes_used = resource.Header( "x-container-bytes-used", type=int, alias='bytes' ) #: The timestamp of the transaction. timestamp = resource.Header("x-timestamp") # Request headers (when id=None) #: If set to True, Object Storage queries all replicas to return the #: most recent one. If you omit this header, Object Storage responds #: faster after it finds one valid replica. Because setting this #: header to True is more expensive for the back end, use it only #: when it is absolutely needed. *Type: bool* is_newest = resource.Header("x-newest", type=bool) # Request headers (when id=name) #: The ACL that grants read access. If not set, this header is not #: returned by this operation. read_ACL = resource.Header("x-container-read") #: The ACL that grants write access. If not set, this header is not #: returned by this operation. write_ACL = resource.Header("x-container-write") #: The destination for container synchronization. If not set, #: this header is not returned by this operation. sync_to = resource.Header("x-container-sync-to") #: The secret key for container synchronization. If not set, #: this header is not returned by this operation. sync_key = resource.Header("x-container-sync-key") #: Enables versioning on this container. The value is the name #: of another container. You must UTF-8-encode and then URL-encode #: the name before you include it in the header. To disable #: versioning, set the header to an empty string. versions_location = resource.Header("x-versions-location") #: Enables versioning on the container. history_location = resource.Header("x-history-location") #: The MIME type of the list of names. content_type = resource.Header("content-type") #: If set to true, Object Storage guesses the content type based #: on the file extension and ignores the value sent in the #: Content-Type header, if present. *Type: bool* is_content_type_detected = resource.Header( "x-detect-content-type", type=bool ) #: Storage policy used by the container. #: It is not possible to change policy of an existing container storage_policy = resource.Header("x-storage-policy") # TODO(mordred) Shouldn't if-none-match be handled more systemically? #: In combination with Expect: 100-Continue, specify an #: "If-None-Match: \*" header to query whether the server already #: has a copy of the object before any data is sent. if_none_match = resource.Header("if-none-match") #: The secret key value for temporary URLs. If not set, #: this header is not returned by this operation. meta_temp_url_key = resource.Header("x-container-meta-temp-url-key") #: A second secret key value for temporary URLs. If not set, #: this header is not returned by this operation. meta_temp_url_key_2 = resource.Header("x-container-meta-temp-url-key-2") @classmethod def new(cls, **kwargs): # Container uses name as id. Proxy._get_resource calls # Resource.new(id=name) but then we need to do container.name # It's the same thing for Container - make it be the same. name = kwargs.pop('id', None) if name: kwargs.setdefault('name', name) return cls(_synchronized=False, **kwargs) def create(self, session, prepend_key=True, base_path=None): """Create a remote resource based on this instance. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param prepend_key: A boolean indicating whether the resource_key should be prepended in a resource creation request. Default to True. :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_create` is not set to ``True``. """ request = self._prepare_request( requires_id=True, prepend_key=prepend_key, base_path=base_path ) response = session.put(request.url, headers=request.headers) self._translate_response(response, has_body=False) return self def set_temp_url_key(self, proxy, key, secondary=False): """Set the temporary url key for a container. :param proxy: The proxy to use for making this request. :type proxy: :class:`~openstack.proxy.Proxy` :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param key: Text of the key to use. :param bool secondary: Whether this should set the second key. (defaults to False) """ header = 'Temp-URL-Key' if secondary: header += '-2' return self.set_metadata(proxy, {header: key}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/object_store/v1/info.py0000664000175000017500000000665600000000000022550 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import urllib from openstack import exceptions from openstack import resource from openstack import utils class Info(resource.Resource): base_path = "/info" allow_fetch = True _query_mapping = resource.QueryParameters( 'swiftinfo_sig', 'swiftinfo_expires' ) # Properties bulk_delete = resource.Body("bulk_delete", type=dict) swift = resource.Body("swift", type=dict) slo = resource.Body("slo", type=dict) staticweb = resource.Body("staticweb", type=dict) tempurl = resource.Body("tempurl", type=dict) # The endpoint in the catalog has version and project-id in it # To get capabilities, we have to disassemble and reassemble the URL # to append 'info' # This logic is taken from swiftclient def _get_info_url(self, url): URI_PATTERN_VERSION = re.compile(r'\/v\d+\.?\d*(\/.*)?') scheme, netloc, path, params, query, fragment = urllib.parse.urlparse( url ) if URI_PATTERN_VERSION.search(path): path = URI_PATTERN_VERSION.sub('/info', path) else: path = utils.urljoin(path, 'info') return urllib.parse.urlunparse( (scheme, netloc, path, params, query, fragment) ) def fetch( self, session, requires_id=False, base_path=None, skip_cache=False, error_message=None, ): """Get a remote resource based on this instance. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param boolean requires_id: A boolean indicating whether resource ID should be part of the requested URI. :param str base_path: Base part of the URI for fetching resources, if different from :data:`~openstack.resource.Resource.base_path`. :param str error_message: An Error message to be returned if requested object does not exist. :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_fetch` is not set to ``True``. :raises: :exc:`~openstack.exceptions.NotFoundException` if the resource was not found. """ if not self.allow_fetch: raise exceptions.MethodNotSupported(self, "fetch") session = self._get_session(session) info_url = self._get_info_url(session.get_endpoint()) microversion = self._get_microversion(session, action='fetch') response = session.get(info_url, microversion=microversion) kwargs = {} if error_message: kwargs['error_message'] = error_message self.microversion = microversion self._translate_response(response, **kwargs) return self ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/object_store/v1/obj.py0000664000175000017500000003505100000000000022356 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from openstack import exceptions from openstack.object_store.v1 import _base from openstack import resource class Object(_base.BaseResource): _custom_metadata_prefix = "X-Object-Meta-" _system_metadata = { "accept_ranges": "accept-ranges", "content_disposition": "content-disposition", "content_encoding": "content-encoding", "content_type": "content-type", "delete_after": "x-delete-after", "delete_at": "x-delete-at", "is_content_type_detected": "x-detect-content-type", "manifest": "x-object-manifest", # Rax hack - the need CORS as different header "access_control_allow_origin": "access-control-allow-origin", } base_path = "/%(container)s" pagination_key = 'X-Container-Object-Count' allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_head = True _query_mapping = resource.QueryParameters( 'prefix', 'format', 'temp_url_sig', 'temp_url_expires', 'filename', 'multipart_manifest', 'symlink', multipart_manifest='multipart-manifest', ) # Data to be passed during a POST call to create an object on the server. # TODO(mordred) Make a base class BaseDataResource that can be used here # and with glance images that has standard overrides for dealing with # binary data. data = None # URL parameters #: The unique name for the container. container = resource.URI("container") #: The unique name for the object. name = resource.Body("name", alternate_id=True) # Object details # Make these private because they should only matter in the case where # we have a Body with no headers (like if someone programmatically is # creating an Object) _hash = resource.Body("hash") _bytes = resource.Body("bytes", type=int) _last_modified = resource.Body("last_modified") _content_type = resource.Body("content_type") # Headers for HEAD and GET requests #: If set to True, Object Storage queries all replicas to return #: the most recent one. If you omit this header, Object Storage #: responds faster after it finds one valid replica. Because #: setting this header to True is more expensive for the back end, #: use it only when it is absolutely needed. *Type: bool* is_newest = resource.Header("x-newest", type=bool) #: TODO(briancurtin) there's a lot of content here... range = resource.Header("range", type=dict) #: See http://www.ietf.org/rfc/rfc2616.txt. if_match = resource.Header("if-match", type=list) #: In combination with Expect: 100-Continue, specify an #: "If-None-Match: \*" header to query whether the server already #: has a copy of the object before any data is sent. if_none_match = resource.Header("if-none-match", type=list) #: See http://www.ietf.org/rfc/rfc2616.txt. if_modified_since = resource.Header("if-modified-since", type=str) #: See http://www.ietf.org/rfc/rfc2616.txt. if_unmodified_since = resource.Header("if-unmodified-since", type=str) # Query parameters #: Used with temporary URLs to sign the request. For more #: information about temporary URLs, see OpenStack Object Storage #: API v1 Reference. signature = resource.Header("signature") #: Used with temporary URLs to specify the expiry time of the #: signature. For more information about temporary URLs, see #: OpenStack Object Storage API v1 Reference. expires_at = resource.Header("expires") #: If present, this is a dynamic large object manifest object. #: The value is the container and object name prefix of the segment #: objects in the form container/prefix. manifest = resource.Header("x-object-manifest") #: If you include the multipart-manifest=get query parameter and #: the object is a large object, the object contents are not #: returned. Instead, the manifest is returned in the #: X-Object-Manifest response header for dynamic large objects #: or in the response body for static large objects. multipart_manifest = resource.Header("multipart-manifest") # Response headers from HEAD and GET #: HEAD operations do not return content. However, in this #: operation the value in the Content-Length header is not the #: size of the response body. Instead it contains the size of #: the object, in bytes. content_length = resource.Header( "content-length", type=int, alias='_bytes' ) #: The MIME type of the object. content_type = resource.Header("content-type", alias="_content_type") #: The type of ranges that the object accepts. accept_ranges = resource.Header("accept-ranges") #: For objects smaller than 5 GB, this value is the MD5 checksum #: of the object content. The value is not quoted. #: For manifest objects, this value is the MD5 checksum of the #: concatenated string of MD5 checksums and ETags for each of #: the segments in the manifest, and not the MD5 checksum of #: the content that was downloaded. Also the value is enclosed #: in double-quote characters. #: You are strongly recommended to compute the MD5 checksum of #: the response body as it is received and compare this value #: with the one in the ETag header. If they differ, the content #: was corrupted, so retry the operation. etag = resource.Header("etag", alias='_hash') #: Set to True if this object is a static large object manifest object. #: *Type: bool* is_static_large_object = resource.Header( "x-static-large-object", type=bool ) #: If set, the value of the Content-Encoding metadata. #: If not set, this header is not returned by this operation. content_encoding = resource.Header("content-encoding") #: If set, specifies the override behavior for the browser. #: For example, this header might specify that the browser use #: a download program to save this file rather than show the file, #: which is the default. #: If not set, this header is not returned by this operation. content_disposition = resource.Header("content-disposition") #: Specifies the number of seconds after which the object is #: removed. Internally, the Object Storage system stores this #: value in the X-Delete-At metadata item. delete_after = resource.Header("x-delete-after", type=int) #: If set, the time when the object will be deleted by the system #: in the format of a UNIX Epoch timestamp. #: If not set, this header is not returned by this operation. delete_at = resource.Header("x-delete-at") #: If set, to this is a dynamic large object manifest object. #: The value is the container and object name prefix of the #: segment objects in the form container/prefix. object_manifest = resource.Header("x-object-manifest") #: The timestamp of the transaction. timestamp = resource.Header("x-timestamp") #: The date and time that the object was created or the last #: time that the metadata was changed. last_modified_at = resource.Header( "last-modified", alias='_last_modified', aka='updated_at' ) # Headers for PUT and POST requests #: Set to chunked to enable chunked transfer encoding. If used, #: do not set the Content-Length header to a non-zero value. transfer_encoding = resource.Header("transfer-encoding") #: If set to true, Object Storage guesses the content type based #: on the file extension and ignores the value sent in the #: Content-Type header, if present. *Type: bool* is_content_type_detected = resource.Header( "x-detect-content-type", type=bool ) #: If set, this is the name of an object used to create the new #: object by copying the X-Copy-From object. The value is in form #: {container}/{object}. You must UTF-8-encode and then URL-encode #: the names of the container and object before you include them #: in the header. #: Using PUT with X-Copy-From has the same effect as using the #: COPY operation to copy an object. copy_from = resource.Header("x-copy-from") #: If present, this is a symlink object. The value is the relative path #: of the target object in the format /. symlink_target = resource.Header("x-symlink-target") #: If present, and X-Symlink-Target is present, then this is a #: cross-account symlink to an object in the account specified in the #: value. symlink_target_account = resource.Header("x-symlink-target-account") #: CORS for RAX (deviating from standard) access_control_allow_origin = resource.Header( "access-control-allow-origin" ) has_body = False def __init__(self, data=None, **attrs): super().__init__(**attrs) self.data = data # The Object Store treats the metadata for its resources inconsistently so # Object.set_metadata must override the BaseResource.set_metadata to # account for it. def set_metadata(self, session, metadata): # Filter out items with empty values so the create metadata behaviour # is the same as account and container filtered_metadata = { key: value for key, value in metadata.items() if value } # Update from remote if we only have locally created information if not self.last_modified_at: self.head(session) # Get a copy of the original metadata so it doesn't get erased on POST # and update it with the new metadata values. metadata = copy.deepcopy(self.metadata) metadata.update(filtered_metadata) # Include any original system metadata so it doesn't get erased on POST for key in self._system_metadata: value = getattr(self, key) if value and key not in metadata: metadata[key] = value request = self._prepare_request() headers = self._calculate_headers(metadata) response = session.post(request.url, headers=headers) self._translate_response(response, has_body=False) self.metadata.update(metadata) return self # The Object Store treats the metadata for its resources inconsistently so # Object.delete_metadata must override the BaseResource.delete_metadata to # account for it. def delete_metadata(self, session, keys): if not keys: return # If we have an empty object, update it from the remote side so that # we have a copy of the original metadata. Deleting metadata requires # POSTing and overwriting all of the metadata. If we already have # metadata locally, assume this is an existing object. if not self.metadata: self.head(session) metadata = copy.deepcopy(self.metadata) # Include any original system metadata so it doesn't get erased on POST for key in self._system_metadata: value = getattr(self, key) if value: metadata[key] = value # Remove the requested metadata keys # TODO(mordred) Why don't we just look at self._header_mapping() # instead of having system_metadata? deleted = False attr_keys_to_delete = set() for key in keys: if key == 'delete_after': del metadata['delete_at'] else: if key in metadata: del metadata[key] # Delete the attribute from the local copy of the object. # Metadata that doesn't have Component attributes is # handled by self.metadata being reset when we run # self.head if hasattr(self, key): attr_keys_to_delete.add(key) deleted = True # Nothing to delete, skip the POST if not deleted: return self request = self._prepare_request() response = session.post( request.url, headers=self._calculate_headers(metadata) ) exceptions.raise_from_response( response, error_message="Error deleting metadata keys" ) # Only delete from local object if the remote delete was successful for key in attr_keys_to_delete: delattr(self, key) # Just update ourselves from remote again. return self.head(session) def _download(self, session, error_message=None, stream=False): request = self._prepare_request() response = session.get( request.url, headers=request.headers, stream=stream ) exceptions.raise_from_response(response, error_message=error_message) return response def download(self, session, error_message=None): response = self._download(session, error_message=error_message) return response.content def stream(self, session, error_message=None, chunk_size=1024): response = self._download( session, error_message=error_message, stream=True ) return response.iter_content(chunk_size, decode_unicode=False) def create(self, session, base_path=None, **params): request = self._prepare_request(base_path=base_path) response = session.put( request.url, data=self.data, headers=request.headers ) self._translate_response(response, has_body=False) return self def _raw_delete(self, session, microversion=None): if not self.allow_delete: raise exceptions.MethodNotSupported(self, "delete") request = self._prepare_request() session = self._get_session(session) if microversion is None: microversion = self._get_microversion(session, action='delete') if self.is_static_large_object is None: # Fetch metadata to determine SLO flag self.head(session) headers = {} if self.is_static_large_object: headers['multipart-manifest'] = 'delete' return session.delete( request.url, headers=headers, microversion=microversion ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3133461 openstacksdk-4.0.0/openstack/orchestration/0000775000175000017500000000000000000000000021102 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/__init__.py0000664000175000017500000000000000000000000023201 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/orchestration_service.py0000664000175000017500000000144200000000000026061 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.orchestration.v1 import _proxy from openstack import service_description class OrchestrationService(service_description.ServiceDescription): """The orchestration service.""" supported_versions = { '1': _proxy.Proxy, } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.317348 openstacksdk-4.0.0/openstack/orchestration/util/0000775000175000017500000000000000000000000022057 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/util/__init__.py0000664000175000017500000000000000000000000024156 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/util/environment_format.py0000664000175000017500000000361000000000000026345 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import yaml from openstack.orchestration.util import template_format SECTIONS = ( PARAMETER_DEFAULTS, PARAMETERS, RESOURCE_REGISTRY, ENCRYPTED_PARAM_NAMES, EVENT_SINKS, PARAMETER_MERGE_STRATEGIES, ) = ( 'parameter_defaults', 'parameters', 'resource_registry', 'encrypted_param_names', 'event_sinks', 'parameter_merge_strategies', ) def parse(env_str): """Takes a string and returns a dict containing the parsed structure. This includes determination of whether the string is using the YAML format. """ try: env = yaml.load(env_str, Loader=template_format.yaml_loader) except yaml.YAMLError: # NOTE(prazumovsky): we need to return more informative error for # user, so use SafeLoader, which return error message with template # snippet where error has been occurred. try: env = yaml.load(env_str, Loader=yaml.SafeLoader) except yaml.YAMLError as yea: raise ValueError(yea) else: if env is None: env = {} elif not isinstance(env, dict): raise ValueError( 'The environment is not a valid YAML mapping data type.' ) for param in env: if param not in SECTIONS: raise ValueError('environment has wrong section "%s"' % param) return env ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/util/event_utils.py0000664000175000017500000000736500000000000025005 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import time from openstack.cloud import meta from openstack import exceptions # TODO(stephenfin): Convert to use real resources def get_events(cloud, stack_id, event_args, marker=None, limit=None): # TODO(mordred) FIX THIS ONCE assert_calls CAN HANDLE QUERY STRINGS params = collections.OrderedDict() for k in sorted(event_args.keys()): params[k] = event_args[k] if marker: event_args['marker'] = marker if limit: event_args['limit'] = limit response = cloud.orchestration.get( f'/stacks/{stack_id}/events', params=params, ) exceptions.raise_from_response(response) # Show which stack the event comes from (for nested events) events = meta.get_and_munchify('events', response.json()) for e in events: e['stack_name'] = stack_id.split("/")[0] return events def poll_for_events( cloud, stack_name, action=None, poll_period=5, marker=None ): """Continuously poll events and logs for performed action on stack.""" def stop_check_action(a): stop_status = ('%s_FAILED' % action, '%s_COMPLETE' % action) return a in stop_status def stop_check_no_action(a): return a.endswith('_COMPLETE') or a.endswith('_FAILED') if action: stop_check = stop_check_action else: stop_check = stop_check_no_action no_event_polls = 0 msg_template = "\n Stack %(name)s %(status)s \n" def is_stack_event(event): if ( event.get('resource_name', '') != stack_name and event.get('physical_resource_id', '') != stack_name ): return False phys_id = event.get('physical_resource_id', '') links = { link.get('rel'): link.get('href') for link in event.get('links', []) } stack_id = links.get('stack', phys_id).rsplit('/', 1)[-1] return stack_id == phys_id while True: events = get_events( cloud, stack_id=stack_name, event_args={'sort_dir': 'asc', 'marker': marker}, ) if len(events) == 0: no_event_polls += 1 else: no_event_polls = 0 # set marker to last event that was received. marker = getattr(events[-1], 'id', None) for event in events: # check if stack event was also received if is_stack_event(event): stack_status = getattr(event, 'resource_status', '') msg = msg_template % dict( name=stack_name, status=stack_status ) if stop_check(stack_status): return stack_status, msg if no_event_polls >= 2: # after 2 polls with no events, fall back to a stack get stack = cloud.get_stack(stack_name, resolve_outputs=False) stack_status = stack['stack_status'] msg = msg_template % dict(name=stack_name, status=stack_status) if stop_check(stack_status): return stack_status, msg # go back to event polling again no_event_polls = 0 time.sleep(poll_period) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/util/template_format.py0000664000175000017500000000475000000000000025622 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import yaml if hasattr(yaml, 'CSafeLoader'): yaml_loader = yaml.CSafeLoader else: yaml_loader = yaml.SafeLoader # type: ignore class HeatYamlLoader(yaml_loader): pass def _construct_yaml_str(self, node): # Override the default string handling function # to always return unicode objects return self.construct_scalar(node) HeatYamlLoader.add_constructor('tag:yaml.org,2002:str', _construct_yaml_str) # Unquoted dates like 2013-05-23 in yaml files get loaded as objects of type # datetime.data which causes problems in API layer when being processed by # openstack.common.jsonutils. Therefore, make unicode string out of timestamps # until jsonutils can handle dates. HeatYamlLoader.add_constructor( 'tag:yaml.org,2002:timestamp', _construct_yaml_str ) def parse(tmpl_str): """Takes a string and returns a dict containing the parsed structure. This includes determination of whether the string is using the JSON or YAML format. """ # strip any whitespace before the check tmpl_str = tmpl_str.strip() if tmpl_str.startswith('{'): tpl = json.loads(tmpl_str) else: try: tpl = yaml.load(tmpl_str, Loader=HeatYamlLoader) except yaml.YAMLError: # NOTE(prazumovsky): we need to return more informative error for # user, so use SafeLoader, which return error message with template # snippet where error has been occurred. try: tpl = yaml.load(tmpl_str, Loader=yaml.SafeLoader) except yaml.YAMLError as yea: raise ValueError(yea) else: if tpl is None: tpl = {} # Looking for supported version keys in the loaded template if not ( 'HeatTemplateFormatVersion' in tpl or 'heat_template_version' in tpl or 'AWSTemplateFormatVersion' in tpl ): raise ValueError("Template format version not found.") return tpl ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/util/template_utils.py0000664000175000017500000002601700000000000025472 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections.abc import json import typing as ty from urllib import parse from urllib import request from openstack import exceptions from openstack.orchestration.util import environment_format from openstack.orchestration.util import template_format from openstack.orchestration.util import utils def get_template_contents( template_file=None, template_url=None, template_object=None, object_request=None, files=None, existing=False, ): is_object = False tpl = None # Transform a bare file path to a file:// URL. if template_file: template_url = utils.normalise_file_path_to_url(template_file) if template_url: tpl = request.urlopen(template_url).read() elif template_object: is_object = True template_url = template_object tpl = object_request and object_request('GET', template_object) elif existing: return {}, None else: raise exceptions.SDKException( 'Must provide one of template_file,' ' template_url or template_object' ) if not tpl: raise exceptions.SDKException( 'Could not fetch template from %s' % template_url ) try: if isinstance(tpl, bytes): tpl = tpl.decode('utf-8') template = template_format.parse(tpl) except ValueError as e: raise exceptions.SDKException( 'Error parsing template %(url)s %(error)s' % {'url': template_url, 'error': e} ) tmpl_base_url = utils.base_url_for_url(template_url) if files is None: files = {} resolve_template_get_files( template, files, tmpl_base_url, is_object, object_request ) return files, template def resolve_template_get_files( template, files, template_base_url, is_object=False, object_request=None ): def ignore_if(key, value): if key != 'get_file' and key != 'type': return True if not isinstance(value, str): return True if key == 'type' and not value.endswith(('.yaml', '.template')): return True return False def recurse_if(value): return isinstance(value, (dict, list)) get_file_contents( template, files, template_base_url, ignore_if, recurse_if, is_object, object_request, ) def is_template(file_content): try: if isinstance(file_content, bytes): file_content = file_content.decode('utf-8') template_format.parse(file_content) except (ValueError, TypeError): return False return True def get_file_contents( from_data, files, base_url=None, ignore_if=None, recurse_if=None, is_object=False, object_request=None, ): if recurse_if and recurse_if(from_data): if isinstance(from_data, dict): recurse_data = from_data.values() else: recurse_data = from_data for value in recurse_data: get_file_contents( value, files, base_url, ignore_if, recurse_if, is_object, object_request, ) if isinstance(from_data, dict): for key, value in from_data.items(): if ignore_if and ignore_if(key, value): continue if base_url and not base_url.endswith('/'): base_url = base_url + '/' str_url = parse.urljoin(base_url, value) if str_url not in files: if is_object and object_request: file_content = object_request('GET', str_url) else: file_content = utils.read_url_content(str_url) if is_template(file_content): if is_object: template = get_template_contents( template_object=str_url, files=files, object_request=object_request, )[1] else: template = get_template_contents( template_url=str_url, files=files )[1] file_content = json.dumps(template) files[str_url] = file_content # replace the data value with the normalised absolute URL from_data[key] = str_url def deep_update(old, new): '''Merge nested dictionaries.''' # Prevents an error if in a previous iteration # old[k] = None but v[k] = {...}, if old is None: old = {} for k, v in new.items(): if isinstance(v, collections.abc.Mapping): r = deep_update(old.get(k, {}), v) old[k] = r else: old[k] = new[k] return old def process_multiple_environments_and_files( env_paths=None, template=None, template_url=None, env_path_is_object=None, object_request=None, env_list_tracker=None, ): """Reads one or more environment files. Reads in each specified environment file and returns a dictionary of the filenames->contents (suitable for the files dict) and the consolidated environment (after having applied the correct overrides based on order). If a list is provided in the env_list_tracker parameter, the behavior is altered to take advantage of server-side environment resolution. Specifically, this means: * Populating env_list_tracker with an ordered list of environment file URLs to be passed to the server * Including the contents of each environment file in the returned files dict, keyed by one of the URLs in env_list_tracker :param env_paths: list of paths to the environment files to load; if None, empty results will be returned :type env_paths: list or None :param template: unused; only included for API compatibility :param template_url: unused; only included for API compatibility :param env_list_tracker: if specified, environment filenames will be stored within :type env_list_tracker: list or None :return: tuple of files dict and a dict of the consolidated environment :rtype: tuple """ merged_files: ty.Dict[str, str] = {} merged_env: ty.Dict[str, ty.Dict] = {} # If we're keeping a list of environment files separately, include the # contents of the files in the files dict include_env_in_files = env_list_tracker is not None if env_paths: for env_path in env_paths: files, env = process_environment_and_files( env_path=env_path, template=template, template_url=template_url, env_path_is_object=env_path_is_object, object_request=object_request, include_env_in_files=include_env_in_files, ) # 'files' looks like {"filename1": contents, "filename2": contents} # so a simple update is enough for merging merged_files.update(files) # 'env' can be a deeply nested dictionary, so a simple update is # not enough merged_env = deep_update(merged_env, env) if env_list_tracker is not None: env_url = utils.normalise_file_path_to_url(env_path) env_list_tracker.append(env_url) return merged_files, merged_env def process_environment_and_files( env_path=None, template=None, template_url=None, env_path_is_object=None, object_request=None, include_env_in_files=False, ): """Loads a single environment file. Returns an entry suitable for the files dict which maps the environment filename to its contents. :param env_path: full path to the file to load :type env_path: str or None :param include_env_in_files: if specified, the raw environment file itself will be included in the returned files dict :type include_env_in_files: bool :return: tuple of files dict and the loaded environment as a dict :rtype: (dict, dict) """ files: ty.Dict[str, str] = {} env: ty.Dict[str, ty.Dict] = {} is_object = env_path_is_object and env_path_is_object(env_path) if is_object: raw_env = object_request and object_request('GET', env_path) env = environment_format.parse(raw_env) env_base_url = utils.base_url_for_url(env_path) resolve_environment_urls( env.get('resource_registry'), files, env_base_url, is_object=True, object_request=object_request, ) elif env_path: env_url = utils.normalise_file_path_to_url(env_path) env_base_url = utils.base_url_for_url(env_url) raw_env = request.urlopen(env_url).read() env = environment_format.parse(raw_env) resolve_environment_urls( env.get('resource_registry'), files, env_base_url ) if include_env_in_files: files[env_url] = json.dumps(env) return files, env def resolve_environment_urls( resource_registry, files, env_base_url, is_object=False, object_request=None, ): """Handles any resource URLs specified in an environment. :param resource_registry: mapping of type name to template filename :type resource_registry: dict :param files: dict to store loaded file contents into :type files: dict :param env_base_url: base URL to look in when loading files :type env_base_url: str or None """ if resource_registry is None: return rr = resource_registry base_url = rr.get('base_url', env_base_url) def ignore_if(key, value): if key == 'base_url': return True if isinstance(value, dict): return True if '::' in value: # Built in providers like: "X::Compute::Server" # don't need downloading. return True if key in ['hooks', 'restricted_actions']: return True get_file_contents( rr, files, base_url, ignore_if, is_object=is_object, object_request=object_request, ) for res_name, res_dict in rr.get('resources', {}).items(): res_base_url = res_dict.get('base_url', base_url) get_file_contents( res_dict, files, res_base_url, ignore_if, is_object=is_object, object_request=object_request, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/util/utils.py0000664000175000017500000000343200000000000023573 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import os from urllib import error from urllib import parse from urllib import request from openstack import exceptions def base_url_for_url(url): parsed = parse.urlparse(url) parsed_dir = os.path.dirname(parsed.path) return parse.urljoin(url, parsed_dir) def normalise_file_path_to_url(path): if parse.urlparse(path).scheme: return path path = os.path.abspath(path) return parse.urljoin('file:', request.pathname2url(path)) def read_url_content(url): try: # TODO(mordred) Use requests content = request.urlopen(url).read() except error.URLError: raise exceptions.SDKException('Could not fetch contents for %s' % url) if content: try: content = content.decode('utf-8') except ValueError: content = base64.encodebytes(content) return content def resource_nested_identifier(rsrc): nested_link = [ link for link in rsrc.links or [] if link.get('rel') == 'nested' ] if nested_link: nested_href = nested_link[0].get('href') nested_identifier = nested_href.split("/")[-2:] return "/".join(nested_identifier) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.317348 openstacksdk-4.0.0/openstack/orchestration/v1/0000775000175000017500000000000000000000000021430 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/v1/__init__.py0000664000175000017500000000000000000000000023527 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/v1/_proxy.py0000664000175000017500000006151300000000000023330 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack.orchestration.util import template_utils from openstack.orchestration.v1 import resource as _resource from openstack.orchestration.v1 import software_config as _sc from openstack.orchestration.v1 import software_deployment as _sd from openstack.orchestration.v1 import stack as _stack from openstack.orchestration.v1 import stack_environment as _stack_environment from openstack.orchestration.v1 import stack_event as _stack_event from openstack.orchestration.v1 import stack_files as _stack_files from openstack.orchestration.v1 import stack_template as _stack_template from openstack.orchestration.v1 import template as _template from openstack import proxy from openstack import resource # TODO(rladntjr4): Some of these methods support lookup by ID, while # others support lookup by ID or name. We should choose one and use # it consistently. class Proxy(proxy.Proxy): _resource_registry = { "resource": _resource.Resource, "software_config": _sc.SoftwareConfig, "software_deployment": _sd.SoftwareDeployment, "stack": _stack.Stack, "stack_environment": _stack_environment.StackEnvironment, "stack_files": _stack_files.StackFiles, "stack_template": _stack_template.StackTemplate, } def _extract_name_consume_url_parts(self, url_parts): if ( len(url_parts) == 3 and url_parts[0] == 'software_deployments' and url_parts[1] == 'metadata' ): # Another nice example of totally different URL naming scheme, # which we need to repair /software_deployment/metadata/server_id - # just replace server_id with metadata to keep further logic return ['software_deployment', 'metadata'] if ( url_parts[0] == 'stacks' and len(url_parts) > 2 and not url_parts[2] in ['preview', 'resources'] ): # orchestrate introduce having stack name and id part of the URL # (/stacks/name/id/everything_else), so if on third position we # have not a known part - discard it, not to brake further logic del url_parts[2] return super()._extract_name_consume_url_parts(url_parts) def read_env_and_templates( self, template_file=None, template_url=None, template_object=None, files=None, environment_files=None, ): """Read templates and environment content and prepares corresponding stack attributes :param string template_file: Path to the template. :param string template_url: URL of template. :param string template_object: URL to retrieve template object. :param dict files: dict of additional file content to include. :param environment_files: Paths to environment files to apply. :returns: Attributes dict to be set on the :class:`~openstack.orchestration.v1.stack.Stack` :rtype: dict """ stack_attrs = dict() envfiles = dict() tpl_files = None if environment_files: ( envfiles, env, ) = template_utils.process_multiple_environments_and_files( env_paths=environment_files ) stack_attrs['environment'] = env if template_file or template_url or template_object: tpl_files, template = template_utils.get_template_contents( template_file=template_file, template_url=template_url, template_object=template_object, files=files, ) stack_attrs['template'] = template if tpl_files or envfiles: stack_attrs['files'] = dict( list(tpl_files.items()) + list(envfiles.items()) ) return stack_attrs def create_stack(self, preview=False, **attrs): """Create a new stack from attributes :param bool preview: When ``True``, a preview endpoint will be used to verify the template *Default: ``False``* :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.orchestration.v1.stack.Stack`, comprised of the properties on the Stack class. :returns: The results of stack creation :rtype: :class:`~openstack.orchestration.v1.stack.Stack` """ base_path = None if not preview else '/stacks/preview' return self._create(_stack.Stack, base_path=base_path, **attrs) def find_stack( self, name_or_id, ignore_missing=True, resolve_outputs=True ): """Find a single stack :param name_or_id: The name or ID of a stack. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.orchestration.v1.stack.Stack` or None """ return self._find( _stack.Stack, name_or_id, ignore_missing=ignore_missing, resolve_outputs=resolve_outputs, ) def stacks(self, **query): """Return a generator of stacks :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of stack objects :rtype: :class:`~openstack.orchestration.v1.stack.Stack` """ return self._list(_stack.Stack, **query) def get_stack(self, stack, resolve_outputs=True): """Get a single stack :param stack: The value can be the ID of a stack or a :class:`~openstack.orchestration.v1.stack.Stack` instance. :param resolve_outputs: Whether stack should contain outputs resolved. :returns: One :class:`~openstack.orchestration.v1.stack.Stack` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_stack.Stack, stack, resolve_outputs=resolve_outputs) def update_stack(self, stack, preview=False, **attrs): """Update a stack :param stack: The value can be the ID of a stack or a :class:`~openstack.orchestration.v1.stack.Stack` instance. :param kwargs attrs: The attributes to update on the stack represented by ``value``. :returns: The updated stack :rtype: :class:`~openstack.orchestration.v1.stack.Stack` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ res = self._get_resource(_stack.Stack, stack, **attrs) return res.update(self, preview) def delete_stack(self, stack, ignore_missing=True): """Delete a stack :param stack: The value can be either the ID of a stack or a :class:`~openstack.orchestration.v1.stack.Stack` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the stack does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent stack. :returns: ``None`` """ self._delete(_stack.Stack, stack, ignore_missing=ignore_missing) def check_stack(self, stack): """Check a stack's status Since this is an asynchronous action, the only way to check the result is to track the stack's status. :param stack: The value can be either the ID of a stack or an instance of :class:`~openstack.orchestration.v1.stack.Stack`. :returns: ``None`` """ if isinstance(stack, _stack.Stack): stk_obj = stack else: stk_obj = _stack.Stack.existing(id=stack) stk_obj.check(self) def abandon_stack(self, stack): """Abandon a stack's without deleting it's resources :param stack: The value can be either the ID of a stack or an instance of :class:`~openstack.orchestration.v1.stack.Stack`. :returns: ``None`` """ res = self._get_resource(_stack.Stack, stack) return res.abandon(self) def export_stack(self, stack): """Get the stack data in JSON format :param stack: The value can be the ID or a name or an instance of :class:`~openstack.orchestration.v1.stack.Stack` :returns: A dictionary containing the stack data. :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ if isinstance(stack, _stack.Stack): obj = stack else: obj = self._find(_stack.Stack, stack, ignore_missing=False) return obj.export(self) def suspend_stack(self, stack): """Suspend a stack status :param stack: The value can be either the ID of a stack or an instance of :class:`~openstack.orchestration.v1.stack.Stack`. :returns: ``None`` """ res = self._get_resource(_stack.Stack, stack) res.suspend(self) def resume_stack(self, stack): """Resume a stack status :param stack: The value can be either the ID of a stack or an instance of :class:`~openstack.orchestration.v1.stack.Stack`. :returns: ``None`` """ res = self._get_resource(_stack.Stack, stack) res.resume(self) def get_stack_template(self, stack): """Get template used by a stack :param stack: The value can be the ID of a stack or an instance of :class:`~openstack.orchestration.v1.stack.Stack` :returns: One object of :class:`~openstack.orchestration.v1.stack_template.StackTemplate` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ if isinstance(stack, _stack.Stack): obj = stack else: obj = self._find(_stack.Stack, stack, ignore_missing=False) return self._get( _stack_template.StackTemplate, requires_id=False, stack_name=obj.name, stack_id=obj.id, ) def get_stack_environment(self, stack): """Get environment used by a stack :param stack: The value can be the ID of a stack or an instance of :class:`~openstack.orchestration.v1.stack.Stack` :returns: One object of :class:`~openstack.orchestration.v1.stack_environment.StackEnvironment` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ if isinstance(stack, _stack.Stack): obj = stack else: obj = self._find(_stack.Stack, stack, ignore_missing=False) return self._get( _stack_environment.StackEnvironment, requires_id=False, stack_name=obj.name, stack_id=obj.id, ) def get_stack_files(self, stack): """Get files used by a stack :param stack: The value can be the ID of a stack or an instance of :class:`~openstack.orchestration.v1.stack.Stack` :returns: A dictionary containing the names and contents of all files used by the stack. :raises: :class:`~openstack.exceptions.NotFoundException` when the stack cannot be found. """ if isinstance(stack, _stack.Stack): stk = stack else: stk = self._find(_stack.Stack, stack, ignore_missing=False) obj = _stack_files.StackFiles(stack_name=stk.name, stack_id=stk.id) return obj.fetch(self) def resources(self, stack, **query): """Return a generator of resources :param stack: This can be a stack object, or the name of a stack for which the resources are to be listed. :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of resource objects if the stack exists and there are resources in it. If the stack cannot be found, an exception is thrown. :rtype: A generator of :class:`~openstack.orchestration.v1.resource.Resource` :raises: :class:`~openstack.exceptions.NotFoundException` when the stack cannot be found. """ # first try treat the value as a stack object or an ID if isinstance(stack, _stack.Stack): obj = stack else: obj = self._find(_stack.Stack, stack, ignore_missing=False) return self._list( _resource.Resource, stack_name=obj.name, stack_id=obj.id, **query ) def create_software_config(self, **attrs): """Create a new software config from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.orchestration.v1.software_config.SoftwareConfig`, comprised of the properties on the SoftwareConfig class. :returns: The results of software config creation :rtype: :class:`~openstack.orchestration.v1.software_config.SoftwareConfig` """ return self._create(_sc.SoftwareConfig, **attrs) def software_configs(self, **query): """Returns a generator of software configs :param dict query: Optional query parameters to be sent to limit the software configs returned. :returns: A generator of software config objects. :rtype: :class:`~openstack.orchestration.v1.software_config.SoftwareConfig` """ return self._list(_sc.SoftwareConfig, **query) def get_software_config(self, software_config): """Get details about a specific software config. :param software_config: The value can be the ID of a software config or a instace of :class:`~openstack.orchestration.v1.software_config.SoftwareConfig`, :returns: An object of type :class:`~openstack.orchestration.v1.software_config.SoftwareConfig` """ return self._get(_sc.SoftwareConfig, software_config) def delete_software_config(self, software_config, ignore_missing=True): """Delete a software config :param software_config: The value can be either the ID of a software config or an instance of :class:`~openstack.orchestration.v1.software_config.SoftwareConfig` :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the software config does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent software config. :returns: ``None`` """ self._delete( _sc.SoftwareConfig, software_config, ignore_missing=ignore_missing ) def create_software_deployment(self, **attrs): """Create a new software deployment from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment`, comprised of the properties on the SoftwareDeployment class. :returns: The results of software deployment creation :rtype: :class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment` """ return self._create(_sd.SoftwareDeployment, **attrs) def software_deployments(self, **query): """Returns a generator of software deployments :param dict query: Optional query parameters to be sent to limit the software deployments returned. :returns: A generator of software deployment objects. :rtype: :class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment` """ return self._list(_sd.SoftwareDeployment, **query) def get_software_deployment(self, software_deployment): """Get details about a specific software deployment resource :param software_deployment: The value can be the ID of a software deployment or an instace of :class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment`, :returns: An object of type :class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment` """ return self._get(_sd.SoftwareDeployment, software_deployment) def delete_software_deployment( self, software_deployment, ignore_missing=True ): """Delete a software deployment :param software_deployment: The value can be either the ID of a software deployment or an instance of :class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment` :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the software deployment does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent software deployment. :returns: ``None`` """ self._delete( _sd.SoftwareDeployment, software_deployment, ignore_missing=ignore_missing, ) def update_software_deployment(self, software_deployment, **attrs): """Update a software deployment :param server: Either the ID of a software deployment or an instance of :class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment` :param dict attrs: The attributes to update on the software deployment represented by ``software_deployment``. :returns: The updated software deployment :rtype: :class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment` """ return self._update( _sd.SoftwareDeployment, software_deployment, **attrs ) def validate_template( self, template, environment=None, template_url=None, ignore_errors=None ): """Validates a template. :param template: The stack template on which the validation is performed. :param environment: A JSON environment for the stack, if provided. :param template_url: A URI to the location containing the stack template for validation. This parameter is only required if the ``template`` parameter is None. This parameter is ignored if ``template`` is specified. :param ignore_errors: A string containing comma separated error codes to ignore. Currently the only valid error code is '99001'. :returns: The result of template validation. :raises: :class:`~openstack.exceptions.InvalidRequest` if neither `template` not `template_url` is provided. :raises: :class:`~openstack.exceptions.HttpException` if the template fails the validation. """ if template is None and template_url is None: raise exceptions.InvalidRequest( "'template_url' must be specified when template is None" ) tmpl = _template.Template.new() return tmpl.validate( self, template, environment=environment, template_url=template_url, ignore_errors=ignore_errors, ) def wait_for_status( self, res, status='ACTIVE', failures=None, interval=2, wait=120 ): """Wait for a resource to be in a particular status. :param res: The resource to wait on to reach the specified status. The resource must have a ``status`` attribute. :type resource: A :class:`~openstack.resource.Resource` object. :param status: Desired status. :param failures: Statuses that would be interpreted as failures. :type failures: :py:class:`list` :param interval: Number of seconds to wait before to consecutive checks. Default to 2. :param wait: Maximum number of seconds to wait before the change. Default to 120. :returns: The resource is returned on success. :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition to the desired status failed to occur in specified seconds. :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource has transited to one of the failure statuses. :raises: :class:`~AttributeError` if the resource does not have a ``status`` attribute. """ failures = [] if failures is None else failures return resource.wait_for_status( self, res, status, failures, interval, wait ) def wait_for_delete(self, res, interval=2, wait=120): """Wait for a resource to be deleted. :param res: The resource to wait on to be deleted. :type resource: A :class:`~openstack.resource.Resource` object. :param interval: Number of seconds to wait before to consecutive checks. Default to 2. :param wait: Maximum number of seconds to wait before the change. Default to 120. :returns: The resource is returned on success. :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition to delete failed to occur in the specified seconds. """ return resource.wait_for_delete(self, res, interval, wait) def get_template_contents( self, template_file=None, template_url=None, template_object=None, files=None, ): try: return template_utils.get_template_contents( template_file=template_file, template_url=template_url, template_object=template_object, files=files, ) except Exception as e: raise exceptions.SDKException( "Error in processing template files: %s" % str(e) ) def _get_cleanup_dependencies(self): return { 'orchestration': {'before': ['compute', 'network', 'identity']} } def _service_cleanup( self, dry_run=True, client_status_queue=None, identified_resources=None, filters=None, resource_evaluation_fn=None, skip_resources=None, ): if self.should_skip_resource_cleanup("stack", skip_resources): return stacks = [] for obj in self.stacks(): need_delete = self._service_cleanup_del_res( self.delete_stack, obj, dry_run=dry_run, client_status_queue=client_status_queue, identified_resources=identified_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn, ) if not dry_run and need_delete: stacks.append(obj) for stack in stacks: self.wait_for_delete(stack) def stack_events(self, stack, resource_name=None, **attr): """Get a stack events :param stack: The value can be the ID of a stack or an instance of :class:`~openstack.orchestration.v1.stack.Stack` :param resource_name: The name of resource. If the resource_name is not None, the base_path changes. :returns: A generator of stack_events objects :rtype: :class:`~openstack.orchestration.v1.stack_event.StackEvent` """ if isinstance(stack, _stack.Stack): obj = stack else: obj = self._get(_stack.Stack, stack) if resource_name: base_path = '/stacks/%(stack_name)s/%(stack_id)s/resources/%(resource_name)s/events' return self._list( _stack_event.StackEvent, stack_name=obj.name, stack_id=obj.id, resource_name=resource_name, base_path=base_path, **attr ) return self._list( _stack_event.StackEvent, stack_name=obj.name, stack_id=obj.id, **attr ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/v1/resource.py0000664000175000017500000000427000000000000023634 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Resource(resource.Resource): name_attribute = 'resource_name' resource_key = 'resource' resources_key = 'resources' base_path = '/stacks/%(stack_name)s/%(stack_id)s/resources' # capabilities allow_create = False allow_list = True allow_retrieve = False allow_delete = False allow_commit = False # Properties #: A list of dictionaries containing links relevant to the resource. links = resource.Body('links') #: ID of the logical resource, usually the literal name of the resource #: as it appears in the stack template. logical_resource_id = resource.Body( 'logical_resource_id', alternate_id=True ) #: Name of the resource. name = resource.Body('resource_name') #: ID of the physical resource (if any) that backs up the resource. For #: example, it contains a nova server ID if the resource is a nova #: server. physical_resource_id = resource.Body('physical_resource_id') #: A list of resource names that depend on this resource. This #: property facilitates the deduction of resource dependencies. #: *Type: list* required_by = resource.Body('required_by', type=list) #: A string representation of the resource type. resource_type = resource.Body('resource_type') #: A string representing the status the resource is currently in. status = resource.Body('resource_status') #: A string that explains why the resource is in its current status. status_reason = resource.Body('resource_status_reason') #: Timestamp of the last update made to the resource. updated_at = resource.Body('updated_time') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/v1/software_config.py0000664000175000017500000000372000000000000025163 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class SoftwareConfig(resource.Resource): resource_key = 'software_config' resources_key = 'software_configs' base_path = '/software_configs' # capabilities allow_create = True allow_list = True allow_fetch = True allow_delete = True allow_commit = False # Properties #: Configuration script or manifest that defines which configuration is #: performed config = resource.Body('config') #: The date and time when the software config resource was created. created_at = resource.Body('creation_time') #: A string indicating the namespace used for grouping software configs. group = resource.Body('group') #: A list of schemas each representing an input this software config #: expects. inputs = resource.Body('inputs') #: Name of the software config. name = resource.Body('name') #: A string that contains options that are specific to the configuration #: management tool that this resource uses. options = resource.Body('options') #: A list of schemas each representing an output this software config #: produces. outputs = resource.Body('outputs') def create(self, session, base_path=None): # This overrides the default behavior of resource creation because # heat doesn't accept resource_key in its request. return super().create(session, prepend_key=False, base_path=base_path) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/v1/software_deployment.py0000664000175000017500000000513400000000000026077 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class SoftwareDeployment(resource.Resource): resource_key = 'software_deployment' resources_key = 'software_deployments' base_path = '/software_deployments' # capabilities allow_create = True allow_list = True allow_fetch = True allow_delete = True allow_commit = True # Properties #: The stack action that triggers this deployment resource. action = resource.Body('action') #: The UUID of the software config resource that runs when applying to the #: server. config_id = resource.Body('config_id') #: A map containing the names and values of all inputs to the config. input_values = resource.Body('input_values', type=dict) #: A map containing the names and values from the deployment. output_values = resource.Body('output_values', type=dict) #: The UUID of the compute server to which the configuration applies. server_id = resource.Body('server_id') #: The ID of the authentication project which can also perform operations #: on this deployment. stack_user_project_id = resource.Body('stack_user_project_id') #: Current status of the software deployment. status = resource.Body('status') #: Error description for the last status change. status_reason = resource.Body('status_reason') #: The date and time when the software deployment resource was created. created_at = resource.Body('creation_time') #: The date and time when the software deployment resource was created. updated_at = resource.Body('updated_time') def create(self, session, base_path=None): # This overrides the default behavior of resource creation because # heat doesn't accept resource_key in its request. return super().create(session, prepend_key=False, base_path=base_path) def commit(self, session, base_path=None): # This overrides the default behavior of resource creation because # heat doesn't accept resource_key in its request. return super().commit(session, prepend_key=False, base_path=base_path) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/v1/stack.py0000664000175000017500000002643700000000000023123 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import tag from openstack import exceptions from openstack import resource from openstack import utils class Stack(resource.Resource): name_attribute = 'stack_name' resource_key = 'stack' resources_key = 'stacks' base_path = '/stacks' # capabilities allow_create = True allow_list = True allow_fetch = True allow_commit = True allow_delete = True _query_mapping = resource.QueryParameters( 'action', 'name', 'status', 'project_id', 'owner_id', 'username', project_id='tenant_id', **tag.TagMixin._tag_query_parameters, ) # Properties #: A list of resource objects that will be added if a stack update # is performed. added = resource.Body('added') #: Placeholder for AWS compatible template listing capabilities #: required by the stack. capabilities = resource.Body('capabilities') #: Timestamp of the stack creation. created_at = resource.Body('creation_time') #: A text description of the stack. description = resource.Body('description') #: A list of resource objects that will be deleted if a stack #: update is performed. deleted = resource.Body('deleted', type=list) #: Timestamp of the stack deletion. deleted_at = resource.Body('deletion_time') #: A JSON environment for the stack. environment = resource.Body('environment') #: An ordered list of names for environment files found in the files dict. environment_files = resource.Body('environment_files', type=list) #: Additional files referenced in the template or the environment files = resource.Body('files', type=dict) #: Name of the container in swift that has child #: templates and environment files. files_container = resource.Body('files_container') #: Whether the stack will support a rollback operation on stack #: create/update failures. *Type: bool* is_rollback_disabled = resource.Body('disable_rollback', type=bool) #: A list of dictionaries containing links relevant to the stack. links = resource.Body('links') #: Name of the stack. name = resource.Body('stack_name') stack_name = resource.URI('stack_name') #: Placeholder for future extensions where stack related events #: can be published. notification_topics = resource.Body('notification_topics') #: A list containing output keys and values from the stack, if any. outputs = resource.Body('outputs') #: The ID of the owner stack if any. owner_id = resource.Body('stack_owner') #: A dictionary containing the parameter names and values for the stack. parameters = resource.Body('parameters', type=dict) #: The ID of the parent stack if any parent_id = resource.Body('parent') #: A list of resource objects that will be replaced if a stack update #: is performed. replaced = resource.Body('replaced') #: A string representation of the stack status, e.g. ``CREATE_COMPLETE``. status = resource.Body('stack_status') #: A text explaining how the stack transits to its current status. status_reason = resource.Body('stack_status_reason') #: A list of strings used as tags on the stack tags = resource.Body('tags', type=list, default=[]) #: A dict containing the template use for stack creation. template = resource.Body('template', type=dict) #: Stack template description text. Currently contains the same text #: as that of the ``description`` property. template_description = resource.Body('template_description') #: A string containing the URL where a stack template can be found. template_url = resource.Body('template_url') #: Stack operation timeout in minutes. timeout_mins = resource.Body('timeout_mins') #: A list of resource objects that will remain unchanged if a stack #: update is performed. unchanged = resource.Body('unchanged') #: A list of resource objects that will have their properties updated #: in place if a stack update is performed. updated = resource.Body('updated') #: Timestamp of last update on the stack. updated_at = resource.Body('updated_time') #: The ID of the user project created for this stack. user_project_id = resource.Body('stack_user_project_id') def create(self, session, base_path=None): # This overrides the default behavior of resource creation because # heat doesn't accept resource_key in its request. return super().create(session, prepend_key=False, base_path=base_path) def commit(self, session, base_path=None): # This overrides the default behavior of resource creation because # heat doesn't accept resource_key in its request. return super().commit( session, prepend_key=False, has_body=False, base_path=None ) def update(self, session, preview=False): # This overrides the default behavior of resource update because # we need to use other endpoint for update preview. base_path = None if self.name and self.id: base_path = '/stacks/{stack_name}/{stack_id}'.format( stack_name=self.name, stack_id=self.id, ) elif self.name or self.id: # We have only one of name/id. Do not try to build a stacks/NAME/ID # path base_path = '/stacks/{stack_identity}'.format( stack_identity=self.name or self.id ) request = self._prepare_request( prepend_key=False, requires_id=False, base_path=base_path ) microversion = self._get_microversion(session, action='commit') request_url = request.url if preview: request_url = utils.urljoin(request_url, 'preview') response = session.put( request_url, json=request.body, headers=request.headers, microversion=microversion, ) self.microversion = microversion self._translate_response(response, has_body=True) return self def _action(self, session, body): """Perform stack actions""" url = utils.urljoin(self.base_path, self._get_id(self), 'actions') resp = session.post(url, json=body, microversion=self.microversion) exceptions.raise_from_response(resp) return resp def check(self, session): return self._action(session, {'check': ''}) def abandon(self, session): url = utils.urljoin( self.base_path, self.name, self._get_id(self), 'abandon' ) resp = session.delete(url) return resp.json() def export(self, session): """Export a stack data :param session: The session to use for making this request. :return: A dictionary containing the stack data. """ url = utils.urljoin( self.base_path, self.name, self._get_id(self), 'export' ) resp = session.get(url) exceptions.raise_from_response(resp) return resp.json() def suspend(self, session): """Suspend a stack :param session: The session to use for making this request :returns: None """ body = {"suspend": None} self._action(session, body) def resume(self, session): """Resume a stack :param session: The session to use for making this request :returns: None """ body = {"resume": None} self._action(session, body) def fetch( self, session, requires_id=True, base_path=None, error_message=None, skip_cache=False, resolve_outputs=True, ): if not self.allow_fetch: raise exceptions.MethodNotSupported(self, "fetch") request = self._prepare_request( requires_id=requires_id, base_path=base_path ) # session = self._get_session(session) microversion = self._get_microversion(session, action='fetch') # NOTE(gtema): would be nice to simply use QueryParameters, however # Heat return 302 with parameters being set into URL and requests # apply parameters again, what results in them being set doubled if not resolve_outputs: request.url = request.url + '?resolve_outputs=False' response = session.get( request.url, microversion=microversion, skip_cache=skip_cache ) kwargs = {} if error_message: kwargs['error_message'] = error_message self.microversion = microversion self._translate_response(response, **kwargs) if self and self.status in ['DELETE_COMPLETE', 'ADOPT_COMPLETE']: raise exceptions.NotFoundException( "No stack found for %s" % self.id ) return self @classmethod def find(cls, session, name_or_id, ignore_missing=True, **params): """Find a resource by its name or id. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param name_or_id: This resource's identifier, if needed by the request. The default is ``None``. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict params: Any additional parameters to be passed into underlying methods, such as to :meth:`~openstack.resource.Resource.existing` in order to pass on URI parameters. :return: The :class:`Resource` object matching the given name or id or None if nothing matches. :raises: :class:`openstack.exceptions.DuplicateResource` if more than one resource is found for this request. :raises: :class:`openstack.exceptions.NotFoundException` if nothing is found and ignore_missing is ``False``. """ session = cls._get_session(session) # Try to short-circuit by looking directly for a matching ID. try: match = cls.existing( id=name_or_id, connection=session._get_connection(), **params ) return match.fetch(session, **params) except exceptions.NotFoundException: pass # NOTE(gtema) we do not do list, since previous call has done this # for us already if ignore_missing: return None raise exceptions.NotFoundException( f"No {cls.__name__} found for {name_or_id}" ) StackPreview = Stack ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/v1/stack_environment.py0000664000175000017500000000330400000000000025533 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class StackEnvironment(resource.Resource): base_path = "/stacks/%(stack_name)s/%(stack_id)s/environment" # capabilities allow_create = False allow_list = False allow_fetch = True allow_delete = False allow_commit = False # Properties #: Name of the stack where the template is referenced. name = resource.URI('stack_name') # Backwards compat stack_name = name #: ID of the stack where the template is referenced. id = resource.URI('stack_id') # type: ignore # Backwards compat stack_id = id # type: ignore #: A list of parameter names whose values are encrypted encrypted_param_names = resource.Body('encrypted_param_names') #: A list of event sinks event_sinks = resource.Body('event_sinks') #: A map of parameters and their default values defined for the stack. parameter_defaults = resource.Body('parameter_defaults') #: A map of parametes defined in the stack template. parameters = resource.Body('parameters', type=dict) #: A map containing customized resource definitions. resource_registry = resource.Body('resource_registry', type=dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/v1/stack_event.py0000664000175000017500000000351500000000000024314 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class StackEvent(resource.Resource): base_path = '/stacks/%(stack_name)s/%(stack_id)s/events' resources_key = 'events' # capabilities allow_create = False allow_list = True allow_fetch = True allow_delete = False allow_commit = False _query_mapping = resource.QueryParameters( "resource_action", "resource_status", "resource_name", "resource_type", "nested_depth", "sort_key", "sort_dir", ) # Properties #: The date and time when the event was created event_time = resource.Body('event_time') #: The ID of the event object id = resource.Body('id') #: A list of dictionaries containing links relevant to the stack. links = resource.Body('links') #: The ID of the logical stack resource. logical_resource_id = resource.Body('logical_resource_id') #: The ID of the stack physical resource. physical_resource_id = resource.Body('physical_resource_id') #: The name of the resource. resource_name = resource.Body('resource_name') #: The status of the resource. resource_status = resource.Body('resource_status') #: The reason for the current stack resource state. resource_status_reason = resource.Body('resource_status_reason') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/v1/stack_files.py0000664000175000017500000000262300000000000024274 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class StackFiles(resource.Resource): base_path = "/stacks/%(stack_name)s/%(stack_id)s/files" # capabilities allow_create = False allow_list = False allow_fetch = True allow_delete = False allow_commit = False # Properties #: Name of the stack where the template is referenced. name = resource.URI('stack_name') # Backwards compat stack_name = name #: ID of the stack where the template is referenced. id = resource.URI('stack_id') # type: ignore # Backwards compat stack_id = id # type: ignore def fetch(self, session, base_path=None): # The stack files response contains a map of filenames and file # contents. request = self._prepare_request(requires_id=False, base_path=base_path) resp = session.get(request.url) return resp.json() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/v1/stack_template.py0000664000175000017500000000374300000000000025011 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class StackTemplate(resource.Resource): base_path = "/stacks/%(stack_name)s/%(stack_id)s/template" # capabilities allow_create = False allow_list = False allow_fetch = True allow_delete = False allow_commit = False # Properties #: Name of the stack where the template is referenced. name = resource.URI('stack_name') # Backwards compat. _stack_name will never match, but the alias will # point it to the value pulled for name. stack_name = resource.URI('_stack_name', alias='name') #: ID of the stack where the template is referenced. stack_id = resource.URI('stack_id', alternate_id=True) #: The description specified in the template description = resource.Body('Description') #: The version of the orchestration HOT template. heat_template_version = resource.Body('heat_template_version') #: Key and value that contain output data. outputs = resource.Body('outputs', type=dict) #: Key and value pairs that contain template parameters parameters = resource.Body('parameters', type=dict) #: Key and value pairs that contain definition of resources in the #: template resources = resource.Body('resources', type=dict) # List parameters grouped. parameter_groups = resource.Body('parameter_groups', type=list) # Restrict conditions which supported since '2016-10-14'. conditions = resource.Body('conditions', type=dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/v1/template.py0000664000175000017500000000336200000000000023621 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from urllib import parse from openstack import resource class Template(resource.Resource): # capabilities allow_create = False allow_list = False allow_fetch = False allow_delete = False allow_commit = False # Properties #: The description specified in the template description = resource.Body('Description') #: Key and value pairs that contain template parameters parameters = resource.Body('Parameters', type=dict) #: A list of parameter groups each contains a lsit of parameter names. parameter_groups = resource.Body('ParameterGroups', type=list) def validate( self, session, template, environment=None, template_url=None, ignore_errors=None, ): url = '/validate' body = {'template': template} if environment is not None: body['environment'] = environment if template_url is not None: body['template_url'] = template_url if ignore_errors: qry = parse.urlencode({'ignore_errors': ignore_errors}) url = '?'.join([url, qry]) resp = session.post(url, json=body) self._translate_response(resp) return self ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/orchestration/version.py0000664000175000017500000000147100000000000023144 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Version(resource.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' # capabilities allow_list = True # Properties links = resource.Body('links') status = resource.Body('status') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.317348 openstacksdk-4.0.0/openstack/placement/0000775000175000017500000000000000000000000020166 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/placement/__init__.py0000664000175000017500000000000000000000000022265 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/placement/placement_service.py0000664000175000017500000000142600000000000024233 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.placement.v1 import _proxy from openstack import service_description class PlacementService(service_description.ServiceDescription): """The placement service.""" supported_versions = { '1': _proxy.Proxy, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3213499 openstacksdk-4.0.0/openstack/placement/v1/0000775000175000017500000000000000000000000020514 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/placement/v1/__init__.py0000664000175000017500000000000000000000000022613 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/placement/v1/_proxy.py0000664000175000017500000004425000000000000022413 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.placement.v1 import resource_class as _resource_class from openstack.placement.v1 import resource_provider as _resource_provider from openstack.placement.v1 import ( resource_provider_inventory as _resource_provider_inventory, ) from openstack.placement.v1 import trait as _trait from openstack import proxy from openstack import resource class Proxy(proxy.Proxy): _resource_registry = { "resource_class": _resource_class.ResourceClass, "resource_provider": _resource_provider.ResourceProvider, } # resource classes def create_resource_class(self, **attrs): """Create a new resource class from attributes. :param attrs: Keyword arguments which will be used to create a :class:`~openstack.placement.v1.resource_provider.ResourceClass`, comprised of the properties on the ResourceClass class. :returns: The results of resource class creation :rtype: :class:`~openstack.placement.v1.resource_class.ResourceClass` """ return self._create(_resource_class.ResourceClass, **attrs) def delete_resource_class(self, resource_class, ignore_missing=True): """Delete a resource class :param resource_class: The value can be either the ID of a resource class or an :class:`~openstack.placement.v1.resource_class.ResourceClass`, instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource class does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent resource class. :returns: ``None`` """ self._delete( _resource_class.ResourceClass, resource_class, ignore_missing=ignore_missing, ) def update_resource_class(self, resource_class, **attrs): """Update a resource class :param resource_class: The value can be either the ID of a resource class or an :class:`~openstack.placement.v1.resource_class.ResourceClass`, instance. :param attrs: The attributes to update on the resource class represented by ``resource_class``. :returns: The updated resource class :rtype: :class:`~openstack.placement.v1.resource_class.ResourceClass` """ return self._update( _resource_class.ResourceClass, resource_class, **attrs, ) def get_resource_class(self, resource_class): """Get a single resource_class. :param resource_class: The value can be either the ID of a resource class or an :class:`~openstack.placement.v1.resource_class.ResourceClass`, instance. :returns: An instance of :class:`~openstack.placement.v1.resource_class.ResourceClass` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource class matching the criteria could be found. """ return self._get( _resource_class.ResourceClass, resource_class, ) def resource_classes(self, **query): """Retrieve a generator of resource classs. :param kwargs query: Optional query parameters to be sent to restrict the resource classs to be returned. :returns: A generator of resource class instances. """ return self._list(_resource_class.ResourceClass, **query) # resource providers def create_resource_provider(self, **attrs): """Create a new resource provider from attributes. :param attrs: Keyword arguments which will be used to create a :class:`~openstack.placement.v1.resource_provider.ResourceProvider`, comprised of the properties on the ResourceProvider class. :returns: The results of resource provider creation :rtype: :class:`~openstack.placement.v1.resource_provider.ResourceProvider` """ # noqa: E501 return self._create(_resource_provider.ResourceProvider, **attrs) def delete_resource_provider(self, resource_provider, ignore_missing=True): """Delete a resource provider :param resource_provider: The value can be either the ID of a resource provider or an :class:`~openstack.placement.v1.resource_provider.ResourceProvider`, instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource provider does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent resource provider. :returns: ``None`` """ self._delete( _resource_provider.ResourceProvider, resource_provider, ignore_missing=ignore_missing, ) def update_resource_provider(self, resource_provider, **attrs): """Update a resource provider :param resource_provider: The value can be either the ID of a resource provider or an :class:`~openstack.placement.v1.resource_provider.ResourceProvider`, instance. :param attrs: The attributes to update on the resource provider represented by ``resource_provider``. :returns: The updated resource provider :rtype: :class:`~openstack.placement.v1.resource_provider.ResourceProvider` """ # noqa: E501 return self._update( _resource_provider.ResourceProvider, resource_provider, **attrs, ) def get_resource_provider(self, resource_provider): """Get a single resource_provider. :param resource_provider: The value can be either the ID of a resource provider or an :class:`~openstack.placement.v1.resource_provider.ResourceProvider`, instance. :returns: An instance of :class:`~openstack.placement.v1.resource_provider.ResourceProvider` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource provider matching the criteria could be found. """ return self._get( _resource_provider.ResourceProvider, resource_provider, ) def find_resource_provider(self, name_or_id, ignore_missing=True): """Find a single resource_provider. :param name_or_id: The name or ID of a resource provider. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: An instance of :class:`~openstack.placement.v1.resource_provider.ResourceProvider` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource provider matching the criteria could be found. """ return self._find( _resource_provider.ResourceProvider, name_or_id, ignore_missing=ignore_missing, ) def resource_providers(self, **query): """Retrieve a generator of resource providers. :param kwargs query: Optional query parameters to be sent to restrict the resource providers to be returned. :returns: A generator of resource provider instances. """ return self._list(_resource_provider.ResourceProvider, **query) # resource provider aggregates def get_resource_provider_aggregates(self, resource_provider): """Get a list of aggregates for a resource provider. :param resource_provider: The value can be either the ID of a resource provider or an :class:`~openstack.placement.v1.resource_provider.ResourceProvider`, instance. :returns: An instance of :class:`~openstack.placement.v1.resource_provider.ResourceProvider` with the ``aggregates`` attribute populated. :raises: :class:`~openstack.exceptions.NotFoundException` when no resource provider matching the criteria could be found. """ res = self._get_resource( _resource_provider.ResourceProvider, resource_provider, ) return res.fetch_aggregates(self) def set_resource_provider_aggregates(self, resource_provider, *aggregates): """Update aggregates for a resource provider. :param resource_provider: The value can be either the ID of a resource provider or an :class:`~openstack.placement.v1.resource_provider.ResourceProvider`, instance. :param aggregates: A list of aggregates. These aggregates will replace all aggregates currently present. :returns: An instance of :class:`~openstack.placement.v1.resource_provider.ResourceProvider` with the ``aggregates`` attribute populated with the updated value. :raises: :class:`~openstack.exceptions.NotFoundException` when no resource provider matching the criteria could be found. """ res = self._get_resource( _resource_provider.ResourceProvider, resource_provider, ) return res.set_aggregates(self, aggregates=aggregates) # resource provider inventories def create_resource_provider_inventory( self, resource_provider, resource_class, *, total, **attrs, ): """Create a new resource provider inventory from attributes :param resource_provider: Either the ID of a resource provider or a :class:`~openstack.placement.v1.resource_provider.ResourceProvider` instance. :param total: The actual amount of the resource that the provider can accommodate. :param attrs: Keyword arguments which will be used to create a :class:`~openstack.placement.v1.resource_provider_inventory.ResourceProviderInventory`, comprised of the properties on the ResourceProviderInventory class. :returns: The results of resource provider inventory creation :rtype: :class:`~openstack.placement.v1.resource_provider_inventory.ResourceProviderInventory` """ # noqa: E501 resource_provider_id = resource.Resource._get_id(resource_provider) resource_class_name = resource.Resource._get_id(resource_class) return self._create( _resource_provider_inventory.ResourceProviderInventory, resource_provider_id=resource_provider_id, resource_class=resource_class_name, total=total, **attrs, ) def delete_resource_provider_inventory( self, resource_provider_inventory, resource_provider=None, ignore_missing=True, ): """Delete a resource provider inventory :param resource_provider_inventory: The value can be either the ID of a resource provider or an :class:`~openstack.placement.v1.resource_provider_inventory.ResourceProviderInventory`, instance. :param resource_provider: Either the ID of a resource provider or a :class:`~openstack.placement.v1.resource_provider.ResourceProvider` instance. This value must be specified when ``resource_provider_inventory`` is an ID. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource provider inventory does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent resource provider inventory. :returns: ``None`` """ resource_provider_id = self._get_uri_attribute( resource_provider_inventory, resource_provider, 'resource_provider_id', ) self._delete( _resource_provider_inventory.ResourceProviderInventory, resource_provider_inventory, resource_provider_id=resource_provider_id, ignore_missing=ignore_missing, ) def update_resource_provider_inventory( self, resource_provider_inventory, resource_provider=None, *, resource_provider_generation=None, **attrs, ): """Update a resource provider's inventory :param resource_provider_inventory: The value can be either the ID of a resource provider inventory or an :class:`~openstack.placement.v1.resource_provider_inventory.ResourceProviderInventory`, instance. :param resource_provider: Either the ID of a resource provider or a :class:`~openstack.placement.v1.resource_provider.ResourceProvider` instance. This value must be specified when ``resource_provider_inventory`` is an ID. :attrs kwargs: The attributes to update on the resource provider inventory represented by ``resource_provider_inventory``. :returns: The updated resource provider inventory :rtype: :class:`~openstack.placement.v1.resource_provider_inventory.ResourceProviderInventory` """ # noqa: E501 resource_provider_id = self._get_uri_attribute( resource_provider_inventory, resource_provider, 'resource_provider_id', ) return self._update( _resource_provider_inventory.ResourceProviderInventory, resource_provider_inventory, resource_provider_id=resource_provider_id, resource_provider_generation=resource_provider_generation, **attrs, ) def get_resource_provider_inventory( self, resource_provider_inventory, resource_provider=None, ): """Get a single resource_provider_inventory :param resource_provider_inventory: The value can be either the ID of a resource provider inventory or an :class:`~openstack.placement.v1.resource_provider_inventory.ResourceProviderInventory`, instance. :param resource_provider: Either the ID of a resource provider or a :class:`~openstack.placement.v1.resource_provider.ResourceProvider` instance. This value must be specified when ``resource_provider_inventory`` is an ID. :returns: An instance of :class:`~openstack.placement.v1.resource_provider_inventory.ResourceProviderInventory` :raises: :class:`~openstack.exceptions.NotFoundException` when no resource provider inventory matching the criteria could be found. """ resource_provider_id = self._get_uri_attribute( resource_provider_inventory, resource_provider, 'resource_provider_id', ) return self._get( _resource_provider_inventory.ResourceProviderInventory, resource_provider_inventory, resource_provider_id=resource_provider_id, ) def resource_provider_inventories(self, resource_provider, **query): """Retrieve a generator of resource provider inventories :param resource_provider: Either the ID of a resource provider or a :class:`~openstack.placement.v1.resource_provider.ResourceProvider` instance. :param query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of resource provider inventory instances. """ resource_provider_id = resource.Resource._get_id(resource_provider) return self._list( _resource_provider_inventory.ResourceProviderInventory, resource_provider_id=resource_provider_id, **query, ) # ========== Traits ========== def create_trait(self, name): """Create a new trait :param name: The name of the new trait :returns: The results of trait creation :rtype: :class:`~openstack.placement.v1.trait.Trait` """ return self._create(_trait.Trait, name=name) def delete_trait(self, trait, ignore_missing=True): """Delete a trait :param trait: The value can be either the ID of a trait or an :class:`~openstack.placement.v1.trait.Trait`, instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource provider inventory does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent resource provider inventory. :returns: ``None`` """ self._delete(_trait.Trait, trait, ignore_missing=ignore_missing) def get_trait(self, trait): """Get a single trait :param trait: The value can be either the ID of a trait or an :class:`~openstack.placement.v1.trait.Trait`, instance. :returns: An instance of :class:`~openstack.placement.v1.resource_provider_inventory.ResourceProviderInventory` :raises: :class:`~openstack.exceptions.NotFoundException` when no trait matching the criteria could be found. """ return self._get(_trait.Trait, trait) def traits(self, **query): """Retrieve a generator of traits :param query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of trait objects """ return self._list(_trait.Trait, **query) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/placement/v1/resource_class.py0000664000175000017500000000167700000000000024115 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ResourceClass(resource.Resource): resource_key = None resources_key = 'resource_classes' base_path = '/resource_classes' # Capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Added in 1.2 _max_microversion = '1.2' name = resource.Body('name', alternate_id=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/placement/v1/resource_provider.py0000664000175000017500000000774300000000000024642 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class ResourceProvider(resource.Resource): resource_key = None resources_key = 'resource_providers' base_path = '/resource_providers' # Capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Filters _query_mapping = resource.QueryParameters( 'name', 'member_of', 'resources', 'in_tree', 'required', id='uuid', ) # The parent_provider_uuid and root_provider_uuid fields were introduced in # 1.14 # The required query parameter was added in 1.18 # The create operation started returning a body in 1.20 _max_microversion = '1.20' # Properties #: Aggregates aggregates = resource.Body('aggregates', type=list, list_type=str) #: The UUID of a resource provider. id = resource.Body('uuid', alternate_id=True) #: A consistent view marker that assists with the management of concurrent #: resource provider updates. generation = resource.Body('generation') #: Links pertaining to this flavor. This is a list of dictionaries, #: each including keys ``href`` and ``rel``. links = resource.Body('links') #: The name of this resource provider. name = resource.Body('name') #: The UUID of the immediate parent of the resource provider. parent_provider_id = resource.Body('parent_provider_uuid') #: Read-only UUID of the top-most provider in this provider tree. root_provider_id = resource.Body('root_provider_uuid') def fetch_aggregates(self, session): """List aggregates set on the resource provider :param session: The session to use for making this request :return: The resource provider with aggregates populated """ url = utils.urljoin(self.base_path, self.id, 'aggregates') microversion = self._get_microversion(session, action='fetch') response = session.get(url, microversion=microversion) exceptions.raise_from_response(response) data = response.json() updates = {'aggregates': data['aggregates']} if utils.supports_microversion(session, '1.19'): updates['generation'] = data['resource_provider_generation'] self._body.attributes.update(updates) return self def set_aggregates(self, session, aggregates=None): """Replaces aggregates on the resource provider :param session: The session to use for making this request :param list aggregates: List of aggregates :return: The resource provider with updated aggregates populated """ url = utils.urljoin(self.base_path, self.id, 'aggregates') microversion = self._get_microversion(session, action='commit') body = { 'aggregates': aggregates or [], } if utils.supports_microversion(session, '1.19'): body['resource_provider_generation'] = self.generation response = session.put(url, json=body, microversion=microversion) exceptions.raise_from_response(response) data = response.json() updates = {'aggregates': data['aggregates']} if 'resource_provider_generation' in data: updates['resource_provider_generation'] = data[ 'resource_provider_generation' ] self._body.attributes.update(updates) return self ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/placement/v1/resource_provider_inventory.py0000664000175000017500000001547200000000000026755 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource class ResourceProviderInventory(resource.Resource): resource_key = None resources_key = None base_path = '/resource_providers/%(resource_provider_id)s/inventories' _query_mapping = resource.QueryParameters( include_pagination_defaults=False ) # Capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: The UUID of a resource provider. resource_provider_id = resource.URI('resource_provider_id') #: The name of the resource class. resource_class = resource.Body('resource_class', alternate_id=True) #: A consistent view marker that assists with the management of concurrent #: resource provider updates. resource_provider_generation = resource.Body( 'resource_provider_generation', type=int, ) #: It is used in determining whether consumption of the resource of the #: provider can exceed physical constraints. allocation_ratio = resource.Body('allocation_ratio', type=float) #: A maximum amount any single allocation against an inventory can have. max_unit = resource.Body('max_unit', type=int) #: A minimum amount any single allocation against an inventory can have. min_unit = resource.Body('min_unit', type=int) #: The amount of the resource a provider has reserved for its own use. reserved = resource.Body('reserved', type=int) #: A representation of the divisible amount of the resource that may be #: requested. For example, step_size = 5 means that only values divisible #: by 5 (5, 10, 15, etc.) can be requested. step_size = resource.Body('step_size', type=int) #: The actual amount of the resource that the provider can accommodate. total = resource.Body('total', type=int) def commit( self, session, prepend_key=True, has_body=True, retry_on_conflict=None, base_path=None, *, microversion=None, **kwargs, ): # resource_provider_generation must always be provided on update, but # it will appear to be identical (by design) so we strip it. Prevent # tihs happening. self._body._dirty.add('resource_provider_generation') return super().commit( session, prepend_key=prepend_key, has_body=has_body, retry_on_conflict=retry_on_conflict, base_path=base_path, microversion=microversion, **kwargs, ) # TODO(stephenfin): It would be nicer if we could do this in Resource # itself since the logic is also found elsewhere (e.g. # openstack.identity.v2.extension.Extension) but that code is a bit of a # rat's nest right now and needs a spring clean @classmethod def list( cls, session, paginated=True, base_path=None, allow_unknown_params=False, *, microversion=None, **params, ): """This method is a generator which yields resource objects. A re-implementation of :meth:`~openstack.resource.Resource.list` that handles placement's single, unpaginated list implementation. Refer to :meth:`~openstack.resource.Resource.list` for full documentation including parameter, exception and return type documentation. """ session = cls._get_session(session) if microversion is None: microversion = cls._get_microversion(session, action='list') if base_path is None: base_path = cls.base_path # There is no server-side filtering, only client-side client_filters = {} # Gather query parameters which are not supported by the server for k, v in params.items(): if ( # Known attr hasattr(cls, k) # Is real attr property and isinstance(getattr(cls, k), resource.Body) # not included in the query_params and k not in cls._query_mapping._mapping.keys() ): client_filters[k] = v uri = base_path % params uri_params = {} for k, v in params.items(): # We need to gather URI parts to set them on the resource later if hasattr(cls, k) and isinstance(getattr(cls, k), resource.URI): uri_params[k] = v def _dict_filter(f, d): """Dict param based filtering""" if not d: return False for key in f.keys(): if isinstance(f[key], dict): if not _dict_filter(f[key], d.get(key, None)): return False elif d.get(key, None) != f[key]: return False return True response = session.get( uri, headers={"Accept": "application/json"}, params={}, microversion=microversion, ) exceptions.raise_from_response(response) data = response.json() for resource_class, resource_data in data['inventories'].items(): resource_inventory = { 'resource_class': resource_class, 'resource_provider_generation': data[ 'resource_provider_generation' ], # noqa: E501 **resource_data, **uri_params, } value = cls.existing( microversion=microversion, connection=session._get_connection(), **resource_inventory, ) filters_matched = True # Iterate over client filters and return only if matching for key in client_filters.keys(): if isinstance(client_filters[key], dict): if not _dict_filter( client_filters[key], value.get(key, None), ): filters_matched = False break elif value.get(key, None) != client_filters[key]: filters_matched = False break if filters_matched: yield value return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/placement/v1/trait.py0000664000175000017500000001051400000000000022212 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource class Trait(resource.Resource): resource_key = None resources_key = None base_path = '/traits' # Capabilities allow_create = True allow_fetch = True allow_delete = True allow_list = True create_method = 'PUT' # Added in 1.6 _max_microversion = '1.6' _query_mapping = resource.QueryParameters( 'name', 'associated', include_pagination_defaults=False, ) name = resource.Body('name', alternate_id=True) @classmethod def list( cls, session, paginated=True, base_path=None, allow_unknown_params=False, *, microversion=None, **params, ): """This method is a generator which yields resource objects. A re-implementation of :meth:`~openstack.resource.Resource.list` that handles the list of strings (as opposed to a list of objects) that this call returns. Refer to :meth:`~openstack.resource.Resource.list` for full documentation including parameter, exception and return type documentation. """ session = cls._get_session(session) if microversion is None: microversion = cls._get_microversion(session, action='list') if base_path is None: base_path = cls.base_path # There is no server-side filtering, only client-side client_filters = {} # Gather query parameters which are not supported by the server for k, v in params.items(): if ( # Known attr hasattr(cls, k) # Is real attr property and isinstance(getattr(cls, k), resource.Body) # not included in the query_params and k not in cls._query_mapping._mapping.keys() ): client_filters[k] = v uri = base_path % params uri_params = {} for k, v in params.items(): # We need to gather URI parts to set them on the resource later if hasattr(cls, k) and isinstance(getattr(cls, k), resource.URI): uri_params[k] = v def _dict_filter(f, d): """Dict param based filtering""" if not d: return False for key in f.keys(): if isinstance(f[key], dict): if not _dict_filter(f[key], d.get(key, None)): return False elif d.get(key, None) != f[key]: return False return True response = session.get( uri, headers={"Accept": "application/json"}, params={}, microversion=microversion, ) exceptions.raise_from_response(response) data = response.json() for trait_name in data['traits']: trait = { 'name': trait_name, **uri_params, } value = cls.existing( microversion=microversion, connection=session._get_connection(), **trait, ) filters_matched = True # Iterate over client filters and return only if matching for key in client_filters.keys(): if isinstance(client_filters[key], dict): if not _dict_filter( client_filters[key], value.get(key, None), ): filters_matched = False break elif value.get(key, None) != client_filters[key]: filters_matched = False break if filters_matched: yield value return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/proxy.py0000664000175000017500000010332600000000000017756 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import typing as ty import urllib from urllib.parse import urlparse try: import simplejson JSONDecodeError = simplejson.scanner.JSONDecodeError except ImportError: JSONDecodeError = ValueError # type: ignore import iso8601 import jmespath from keystoneauth1 import adapter from openstack import _log from openstack import exceptions from openstack import resource ResourceType = ty.TypeVar('ResourceType', bound=resource.Resource) # The _check_resource decorator is used on Proxy methods to ensure that # the `actual` argument is in fact the type of the `expected` argument. # It does so under two cases: # 1. When strict=False, if and only if `actual` is a Resource instance, # it is checked to see that it's an instance of the `expected` class. # This allows `actual` to be other types, such as strings, when it makes # sense to accept a raw id value. # 2. When strict=True, `actual` must be an instance of the `expected` class. def _check_resource(strict=False): def wrap(method): def check(self, expected, actual=None, *args, **kwargs): if ( strict and actual is not None and not isinstance(actual, resource.Resource) ): raise ValueError("A %s must be passed" % expected.__name__) elif isinstance(actual, resource.Resource) and not isinstance( actual, expected ): raise ValueError( "Expected %s but received %s" % (expected.__name__, actual.__class__.__name__) ) return method(self, expected, actual, *args, **kwargs) return check return wrap def normalize_metric_name(name): name = name.replace('.', '_') name = name.replace(':', '_') return name class Proxy(adapter.Adapter): """Represents a service.""" retriable_status_codes: ty.Optional[ty.List[int]] = None """HTTP status codes that should be retried by default. The number of retries is defined by the configuration in parameters called ``_status_code_retries``. """ _resource_registry: ty.Dict[str, ty.Type[resource.Resource]] = {} """Registry of the supported resourses. Dictionary of resource names (key) types (value). """ def __init__( self, session, statsd_client=None, statsd_prefix=None, prometheus_counter=None, prometheus_histogram=None, influxdb_config=None, influxdb_client=None, *args, **kwargs, ): # NOTE(dtantsur): keystoneauth defaults retriable_status_codes to None, # override it with a class-level value. kwargs.setdefault( 'retriable_status_codes', self.retriable_status_codes ) super().__init__(session=session, *args, **kwargs) self._statsd_client = statsd_client self._statsd_prefix = statsd_prefix self._prometheus_counter = prometheus_counter self._prometheus_histogram = prometheus_histogram self._influxdb_client = influxdb_client self._influxdb_config = influxdb_config if self.service_type: log_name = f'openstack.{self.service_type}' else: log_name = 'openstack' self.log = _log.setup_logging(log_name) def _get_cache_key_prefix(self, url): """Calculate cache prefix for the url""" name_parts = self._extract_name( url, self.service_type, self.session.get_project_id() ) return '.'.join([self.service_type] + name_parts) def _invalidate_cache(self, conn, key_prefix): """Invalidate all cache entries starting with given prefix""" for k in set(conn._api_cache_keys): if k.startswith(key_prefix): conn._cache.delete(k) conn._api_cache_keys.remove(k) def request( self, url, method, error_message=None, raise_exc=False, connect_retries=1, global_request_id=None, *args, **kwargs, ): conn = self._get_connection() if not global_request_id: # Per-request setting should take precedence global_request_id = conn._global_request_id key = None key_prefix = self._get_cache_key_prefix(url) # The caller might want to force cache bypass. skip_cache = kwargs.pop('skip_cache', False) if conn.cache_enabled: # Construct cache key. It consists of: # service.name_parts.URL.str(kwargs) key = '.'.join([key_prefix, url, str(kwargs)]) # Track cache key for invalidating possibility conn._api_cache_keys.add(key) try: if conn.cache_enabled and not skip_cache and method == 'GET': # Get the object expiration time from config # default to 0 to disable caching for this resource type expiration_time = int( conn._cache_expirations.get(key_prefix, 0) ) # Get from cache or execute and cache response = conn._cache.get_or_create( key=key, creator=super().request, creator_args=( [url, method], dict( connect_retries=connect_retries, raise_exc=raise_exc, global_request_id=global_request_id, **kwargs, ), ), expiration_time=expiration_time, ) else: # invalidate cache if we send modification request or user # asked for cache bypass self._invalidate_cache(conn, key_prefix) # Pass through the API request bypassing cache response = super().request( url, method, connect_retries=connect_retries, raise_exc=raise_exc, global_request_id=global_request_id, **kwargs, ) for h in response.history: self._report_stats(h) self._report_stats(response) return response except Exception as e: # If we want metrics to be generated we also need to generate some # in case of exceptions as well, so that timeouts and connection # problems (especially when called from ansible) are being # generated as well. self._report_stats(None, url, method, e) raise # TODO(stephenfin): service_type is unused and should be dropped @functools.lru_cache(maxsize=256) def _extract_name(self, url, service_type=None, project_id=None): """Produce a key name to use in logging/metrics from the URL path. We want to be able to logic/metric sane general things, so we pull the url apart to generate names. The function returns a list because there are two different ways in which the elements want to be combined below (one for logging, one for statsd) Some examples are likely useful:: /servers -> ['servers'] /servers/{id} -> ['server'] /servers/{id}/os-security-groups -> ['server', 'os-security-groups'] /v2.0/networks.json -> ['networks'] """ url_path = urllib.parse.urlparse(url).path.strip() # Remove / from the beginning to keep the list indexes of interesting # things consistent if url_path.startswith('/'): url_path = url_path[1:] # Special case for neutron, which puts .json on the end of urls if url_path.endswith('.json'): url_path = url_path[: -len('.json')] # Split url into parts and exclude potential project_id in some urls url_parts = [ x for x in url_path.split('/') if ( x != project_id and ( not project_id or (project_id and x != 'AUTH_' + project_id) ) ) ] if url_parts[-1] == 'detail': # Special case detail calls # GET /servers/detail # returns ['servers', 'detail'] name_parts = url_parts[-2:] else: # Strip leading version piece so that # GET /v2.0/networks # returns ['networks'] if ( url_parts[0] and url_parts[0][0] == 'v' and url_parts[0][1] and url_parts[0][1].isdigit() ): url_parts = url_parts[1:] name_parts = self._extract_name_consume_url_parts(url_parts) # Keystone Token fetching is a special case, so we name it "tokens" # NOTE(gtema): there is no metric triggered for regular authorization # with openstack.connect(), since it bypassed SDK and goes directly to # keystoneauth1. If you need to measure performance of the token # fetching - trigger a separate call. if url_path.endswith('tokens'): name_parts = ['tokens'] if not name_parts: name_parts = ['discovery'] # Strip out anything that's empty or None return [part for part in name_parts if part] def _extract_name_consume_url_parts(self, url_parts): """Pull out every other URL portion. For example, ``GET /servers/{id}/os-security-groups`` returns ``['server', 'os-security-groups']``. """ name_parts = [] for idx in range(0, len(url_parts)): if not idx % 2 and url_parts[idx]: # If we are on first segment and it end with 's' stip this 's' # to differentiate LIST and GET_BY_ID if ( len(url_parts) > idx + 1 and url_parts[idx][-1] == 's' and url_parts[idx][-2:] != 'is' ): name_parts.append(url_parts[idx][:-1]) else: name_parts.append(url_parts[idx]) return name_parts def _report_stats(self, response, url=None, method=None, exc=None): if self._statsd_client: self._report_stats_statsd(response, url, method, exc) if self._prometheus_counter and self._prometheus_histogram: self._report_stats_prometheus(response, url, method, exc) if self._influxdb_client: self._report_stats_influxdb(response, url, method, exc) def _report_stats_statsd(self, response, url=None, method=None, exc=None): try: if response is not None and not url: url = response.request.url if response is not None and not method: method = response.request.method name_parts = [ normalize_metric_name(f) for f in self._extract_name( url, self.service_type, self.session.get_project_id() ) ] key = '.'.join( [ self._statsd_prefix, normalize_metric_name(self.service_type), method, '_'.join(name_parts), ] ) with self._statsd_client.pipeline() as pipe: if response is not None: duration = int(response.elapsed.total_seconds() * 1000) metric_name = '{}.{}'.format( key, str(response.status_code) ) pipe.timing(metric_name, duration) pipe.incr(metric_name) if duration > 1000: pipe.incr('%s.over_1000' % key) elif exc is not None: pipe.incr('%s.failed' % key) pipe.incr('%s.attempted' % key) except Exception: # We do not want errors in metric reporting ever break client self.log.exception("Exception reporting metrics") def _report_stats_prometheus( self, response, url=None, method=None, exc=None ): if response is not None and not url: url = response.request.url if response is not None and not method: method = response.request.method parsed_url = urlparse(url) endpoint = "{}://{}{}".format( parsed_url.scheme, parsed_url.netloc, parsed_url.path ) if response is not None: labels = dict( method=method, endpoint=endpoint, service_type=self.service_type, status_code=response.status_code, ) self._prometheus_counter.labels(**labels).inc() self._prometheus_histogram.labels(**labels).observe( response.elapsed.total_seconds() * 1000 ) def _report_stats_influxdb( self, response, url=None, method=None, exc=None ): # NOTE(gtema): status_code is saved both as tag and field to give # ability showing it as a value and not only as a legend. # However Influx is not ok with having same name in tags and fields, # therefore use different names. if response is not None and not url: url = response.request.url if response is not None and not method: method = response.request.method tags = dict( method=method, name='_'.join( [ normalize_metric_name(f) for f in self._extract_name( url, self.service_type, self.session.get_project_id() ) ] ), ) fields = dict(attempted=1) if response is not None: fields['duration'] = int(response.elapsed.total_seconds() * 1000) tags['status_code'] = str(response.status_code) # Note(gtema): emit also status_code as a value (counter) fields[str(response.status_code)] = 1 fields[f'{method}.{response.status_code}'] = 1 # Note(gtema): status_code field itself is also very helpful on the # graphs to show what was the code, instead of counting its # occurences fields['status_code_val'] = response.status_code elif exc: fields['failed'] = 1 if 'additional_metric_tags' in self._influxdb_config: tags.update(self._influxdb_config['additional_metric_tags']) measurement = ( self._influxdb_config.get('measurement', 'openstack_api') if self._influxdb_config else 'openstack_api' ) # Note(gtema) append service name into the measurement name measurement = f'{measurement}.{self.service_type}' data = [dict(measurement=measurement, tags=tags, fields=fields)] try: self._influxdb_client.write_points(data) except Exception: self.log.exception('Error writing statistics to InfluxDB') def _get_connection(self): """Get the Connection object associated with this Proxy. When the Session is created, a reference to the Connection is attached to the ``_sdk_connection`` attribute. We also add a reference to it directly on ourselves. Use one of them. """ return getattr( self, '_connection', getattr(self.session, '_sdk_connection', None) ) def _get_resource( self, resource_type: ty.Type[ResourceType], value, **attrs ) -> ResourceType: """Get a resource object to work on :param resource_type: The type of resource to operate on. This should be a subclass of :class:`~openstack.resource.Resource` with a ``from_id`` method. :param value: The ID of a resource or an object of ``resource_type`` class if using an existing instance, or ``utils.Munch``, or None to create a new instance. :param attrs: A dict containing arguments for forming the request URL, if needed. """ conn = self._get_connection() if value is None: # Create a bare resource res = resource_type.new(connection=conn, **attrs) elif isinstance(value, dict) and not isinstance( value, resource.Resource ): res = resource_type._from_munch(value, connection=conn) res._update(**attrs) elif not isinstance(value, resource_type): # Create from an ID res = resource_type.new(id=value, connection=conn, **attrs) else: # An existing resource instance res = value res._update(**attrs) return res def _get_uri_attribute(self, child, parent, name): """Get a value to be associated with a URI attribute `child` will not be None here as it's a required argument on the proxy method. `parent` is allowed to be None if `child` is an actual resource, but when an ID is given for the child one must also be provided for the parent. An example of this is that a parent is a Server and a child is a ServerInterface. """ if parent is None: value = getattr(child, name) else: value = resource.Resource._get_id(parent) return value @ty.overload def _find( self, resource_type: ty.Type[ResourceType], name_or_id: str, ignore_missing: ty.Literal[True] = True, **attrs, ) -> ty.Optional[ResourceType]: ... @ty.overload def _find( self, resource_type: ty.Type[ResourceType], name_or_id: str, ignore_missing: ty.Literal[False], **attrs, ) -> ResourceType: ... # excuse the duplication here: it's mypy's fault # https://github.com/python/mypy/issues/14764 @ty.overload def _find( self, resource_type: ty.Type[ResourceType], name_or_id: str, ignore_missing: bool, **attrs, ) -> ty.Optional[ResourceType]: ... def _find( self, resource_type: ty.Type[ResourceType], name_or_id: str, ignore_missing: bool = True, **attrs, ) -> ty.Optional[ResourceType]: """Find a resource :param name_or_id: The name or ID of a resource to find. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict attrs: Attributes to be passed onto the :meth:`~openstack.resource.Resource.find` method, such as query parameters. :returns: An instance of ``resource_type`` or None """ return resource_type.find( self, name_or_id, ignore_missing=ignore_missing, **attrs ) @_check_resource(strict=False) def _delete( self, resource_type: ty.Type[ResourceType], value, ignore_missing=True, **attrs, ) -> ty.Optional[ResourceType]: """Delete a resource :param resource_type: The type of resource to delete. This should be a :class:`~openstack.resource.Resource` subclass with a ``from_id`` method. :param value: The value to delete. Can be either the ID of a resource or a :class:`~openstack.resource.Resource` subclass. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent resource. :param dict attrs: Attributes to be used to form the request URL such as the ID of a parent resource. :returns: The result of the ``delete`` :raises: ``ValueError`` if ``value`` is a :class:`~openstack.resource.Resource` that doesn't match the ``resource_type``. :class:`~openstack.exceptions.NotFoundException` when ignore_missing if ``False`` and a nonexistent resource is attempted to be deleted. """ res = self._get_resource(resource_type, value, **attrs) try: rv = res.delete(self) except exceptions.NotFoundException: if ignore_missing: return None raise return rv @_check_resource(strict=False) def _update( self, resource_type: ty.Type[ResourceType], value, base_path=None, **attrs, ) -> ResourceType: """Update a resource :param resource_type: The type of resource to update. :type resource_type: :class:`~openstack.resource.Resource` :param value: The resource to update. This must either be a :class:`~openstack.resource.Resource` or an id that corresponds to a resource. :param str base_path: Base part of the URI for updating resources, if different from :data:`~openstack.resource.Resource.base_path`. :param dict attrs: Attributes to be passed onto the :meth:`~openstack.resource.Resource.update` method to be updated. These should correspond to either :class:`~openstack.resource.Body` or :class:`~openstack.resource.Header` values on this resource. :returns: The result of the ``update`` :rtype: :class:`~openstack.resource.Resource` """ res = self._get_resource(resource_type, value, **attrs) return res.commit(self, base_path=base_path) def _create( self, resource_type: ty.Type[ResourceType], base_path=None, **attrs, ) -> ResourceType: """Create a resource from attributes :param resource_type: The type of resource to create. :type resource_type: :class:`~openstack.resource.Resource` :param str base_path: Base part of the URI for creating resources, if different from :data:`~openstack.resource.Resource.base_path`. :param path_args: A dict containing arguments for forming the request URL, if needed. :param dict attrs: Attributes to be passed onto the :meth:`~openstack.resource.Resource.create` method to be created. These should correspond to either :class:`~openstack.resource.Body` or :class:`~openstack.resource.Header` values on this resource. :returns: The result of the ``create`` :rtype: :class:`~openstack.resource.Resource` """ # Check for attributes whose names conflict with the parameters # specified in the method. conflicting_attrs = attrs.get('__conflicting_attrs', {}) if conflicting_attrs: for k, v in conflicting_attrs.items(): attrs[k] = v attrs.pop('__conflicting_attrs') conn = self._get_connection() res = resource_type.new(connection=conn, **attrs) return res.create(self, base_path=base_path) def _bulk_create( self, resource_type: ty.Type[ResourceType], data, base_path=None, ) -> ty.Generator[ResourceType, None, None]: """Create a resource from attributes :param resource_type: The type of resource to create. :type resource_type: :class:`~openstack.resource.Resource` :param list data: List of attributes dicts to be passed onto the :meth:`~openstack.resource.Resource.create` method to be created. These should correspond to either :class:`~openstack.resource.Body` or :class:`~openstack.resource.Header` values on this resource. :param str base_path: Base part of the URI for creating resources, if different from :data:`~openstack.resource.Resource.base_path`. :returns: A generator of Resource objects. :rtype: :class:`~openstack.resource.Resource` """ return resource_type.bulk_create(self, data, base_path=base_path) @_check_resource(strict=False) def _get( self, resource_type: ty.Type[ResourceType], value=None, requires_id=True, base_path=None, skip_cache=False, **attrs, ) -> ResourceType: """Fetch a resource :param resource_type: The type of resource to get. :type resource_type: :class:`~openstack.resource.Resource` :param value: The value to get. Can be either the ID of a resource or a :class:`~openstack.resource.Resource` subclass. :param str base_path: Base part of the URI for fetching resources, if different from :data:`~openstack.resource.Resource.base_path`. :param bool skip_cache: A boolean indicating whether optional API cache should be skipped for this invocation. :param dict attrs: Attributes to be passed onto the :meth:`~openstack.resource.Resource.get` method. These should correspond to either :class:`~openstack.resource.Body` or :class:`~openstack.resource.Header` values on this resource. :returns: The result of the ``fetch`` :rtype: :class:`~openstack.resource.Resource` """ res = self._get_resource(resource_type, value, **attrs) return res.fetch( self, requires_id=requires_id, base_path=base_path, skip_cache=skip_cache, error_message="No {resource_type} found for {value}".format( resource_type=resource_type.__name__, value=value ), ) def _list( self, resource_type: ty.Type[ResourceType], paginated=True, base_path=None, jmespath_filters=None, **attrs, ) -> ty.Generator[ResourceType, None, None]: """List a resource :param resource_type: The type of resource to list. This should be a :class:`~openstack.resource.Resource` subclass with a ``from_id`` method. :param bool paginated: When set to ``False``, expect all of the data to be returned in one response. When set to ``True``, the resource supports data being returned across multiple pages. :param str base_path: Base part of the URI for listing resources, if different from :data:`~openstack.resource.Resource.base_path`. :param str jmespath_filters: A string containing a jmespath expression for further filtering. :param dict attrs: Attributes to be passed onto the :meth:`~openstack.resource.Resource.list` method. These should correspond to either :class:`~openstack.resource.URI` values or appear in :data:`~openstack.resource.Resource._query_mapping`. :returns: A generator of Resource objects. :raises: ``ValueError`` if ``value`` is a :class:`~openstack.resource.Resource` that doesn't match the ``resource_type``. """ data = resource_type.list( self, paginated=paginated, base_path=base_path, **attrs ) if jmespath_filters and isinstance(jmespath_filters, str): return jmespath.search(jmespath_filters, data) return data def _head( self, resource_type: ty.Type[ResourceType], value=None, base_path=None, **attrs, ) -> ResourceType: """Retrieve a resource's header :param resource_type: The type of resource to retrieve. :type resource_type: :class:`~openstack.resource.Resource` :param value: The value of a specific resource to retreive headers for. Can be either the ID of a resource, a :class:`~openstack.resource.Resource` subclass, or ``None``. :param str base_path: Base part of the URI for heading resources, if different from :data:`~openstack.resource.Resource.base_path`. :param dict attrs: Attributes to be passed onto the :meth:`~openstack.resource.Resource.head` method. These should correspond to :class:`~openstack.resource.URI` values. :returns: The result of the ``head`` call :rtype: :class:`~openstack.resource.Resource` """ res = self._get_resource(resource_type, value, **attrs) return res.head(self, base_path=base_path) def _get_cleanup_dependencies(self): return None def _service_cleanup( self, dry_run=True, client_status_queue=None, identified_resources=None, filters=None, resource_evaluation_fn=None, skip_resources=None, ): return None def _service_cleanup_del_res( self, del_fn, obj, dry_run=True, client_status_queue=None, identified_resources=None, filters=None, resource_evaluation_fn=None, ): need_delete = False try: if resource_evaluation_fn and callable(resource_evaluation_fn): # Ask a user-provided evaluation function if we need to delete # the resource need_del = resource_evaluation_fn( obj, filters, identified_resources ) if isinstance(need_del, bool): # Just double check function returned bool need_delete = need_del else: need_delete = ( self._service_cleanup_resource_filters_evaluation( obj, filters=filters ) ) if need_delete: if client_status_queue: # Put into queue for client status info client_status_queue.put(obj) if identified_resources is not None: # Put into internal dict shared between threads so that # other services might know which other resources were # identified identified_resources[obj.id] = obj if not dry_run: del_fn(obj) except Exception as e: self.log.exception('Cannot delete resource %s: %s', obj, str(e)) return need_delete def _service_cleanup_resource_filters_evaluation(self, obj, filters=None): part_cond = [] if filters is not None and isinstance(filters, dict): for k, v in filters.items(): try: res_val = None if k == 'created_at' and hasattr(obj, 'created_at'): res_val = getattr(obj, 'created_at') if k == 'updated_at' and hasattr(obj, 'updated_at'): res_val = getattr(obj, 'updated_at') if res_val: res_date = iso8601.parse_date(res_val) cmp_date = iso8601.parse_date(v) if res_date and cmp_date and res_date <= cmp_date: part_cond.append(True) else: part_cond.append(False) else: # There are filters set, but we can't get required # attribute, so skip the resource self.log.debug( 'Requested cleanup attribute %s is not ' 'available on the resource' % k ) part_cond.append(False) except Exception: self.log.exception('Error during condition evaluation') if all(part_cond): return True else: return False def should_skip_resource_cleanup(self, resource=None, skip_resources=None): if resource is None or skip_resources is None: return False resource_name = f"{self.service_type.replace('-', '_')}.{resource}" if resource_name in skip_resources: self.log.debug( f"Skipping resource {resource_name} in project cleanup" ) return True return False # TODO(stephenfin): Remove this and all users. Use of this generally indicates # a missing Resource type. def _json_response(response, result_key=None, error_message=None): """Temporary method to use to bridge from ShadeAdapter to SDK calls.""" exceptions.raise_from_response(response, error_message=error_message) if not response.content: # This doesn't have any content return response # Some REST calls do not return json content. Don't decode it. if 'application/json' not in response.headers.get('Content-Type'): return response try: result_json = response.json() except JSONDecodeError: return response return result_json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/py.typed0000664000175000017500000000000000000000000017703 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/resource.py0000664000175000017500000027341000000000000020426 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base resource class. The :class:`~openstack.resource.Resource` class is a base class that represent a remote resource. The attributes that comprise a request or response for this resource are specified as class members on the Resource subclass where their values are of a component type, including :class:`~openstack.resource.Body`, :class:`~openstack.resource.Header`, and :class:`~openstack.resource.URI`. For update management, :class:`~openstack.resource.Resource` employs a series of :class:`~openstack.resource._ComponentManager` instances to look after the attributes of that particular component type. This is particularly useful for Body and Header types, so that only the values necessary are sent in requests to the server. When making requests, each of the managers are looked at to gather the necessary URI, body, and header data to build a request to be sent via keystoneauth's sessions. Responses from keystoneauth are then converted into this Resource class' appropriate components and types and then returned to the caller. """ import abc import collections import inspect import itertools import operator import typing as ty import urllib.parse import warnings import jsonpatch from keystoneauth1 import adapter from keystoneauth1 import discover from requests import structures from openstack import _log from openstack import exceptions from openstack import format from openstack import utils from openstack import warnings as os_warnings _SEEN_FORMAT = '{name}_seen' LOG = _log.setup_logging(__name__) def _convert_type(value, data_type, list_type=None): # This should allow handling list of dicts that have their own # Component type directly. See openstack/compute/v2/limits.py # and the RateLimit type for an example. if not data_type: return value if issubclass(data_type, list): if isinstance(value, (list, tuple, set)): if not list_type: return value ret = [] for raw in value: ret.append(_convert_type(raw, list_type)) return ret elif list_type: return [_convert_type(value, list_type)] # "if-match" in Object is a good example of the need here return [value] elif isinstance(value, data_type): return value if not isinstance(value, data_type): if issubclass(data_type, format.Formatter): return data_type.deserialize(value) # This should allow handling sub-dicts that have their own # Component type directly. See openstack/compute/v2/limits.py # and the AbsoluteLimits type for an example. if isinstance(value, dict): return data_type(**value) try: return data_type(value) except ValueError: # If we can not convert data to the expected type return empty # instance of the expected type. # This is necessary to handle issues like with flavor.swap where # empty string means "0". return data_type() class _BaseComponent(abc.ABC): # The name this component is being tracked as in the Resource key: str # The class to be used for mappings _map_cls: ty.Type[ty.Mapping] = dict #: Marks the property as deprecated. deprecated = False #: Deprecation reason message used to warn users when deprecated == True deprecation_reason = None #: Control field used to manage the deprecation warning. We want to warn #: only once when the attribute is retrieved in the code. already_warned_deprecation = False def __init__( self, name, type=None, default=None, alias=None, aka=None, alternate_id=False, list_type=None, coerce_to_default=False, deprecated=False, deprecation_reason=None, **kwargs, ): """A typed descriptor for a component that makes up a Resource :param name: The name this component exists as on the server :param type: The type this component is expected to be by the server. By default this is None, meaning any value you specify will work. If you specify type=dict and then set a component to a string, __set__ will fail, for example. :param default: Typically None, but any other default can be set. :param alias: If set, alternative attribute on object to return. :param aka: If set, additional name attribute would be available under. :param alternate_id: When `True`, this property is known internally as a value that can be sent with requests that require an ID but when `id` is not a name the Resource has. This is a relatively uncommon case, and this setting should only be used once per Resource. :param list_type: If type is `list`, list_type designates what the type of the elements of the list should be. :param coerce_to_default: If the Component is None or not present, force the given default to be used. If a default is not given but a type is given, construct an empty version of the type in question. :param deprecated: Indicates if the option is deprecated. If it is, we display a warning message to the user. :param deprecation_reason: Custom deprecation message. """ self.name = name self.type = type if type is not None and coerce_to_default and not default: self.default = type() else: self.default = default self.alias = alias self.aka = aka self.alternate_id = alternate_id self.list_type = list_type self.coerce_to_default = coerce_to_default self.deprecated = deprecated self.deprecation_reason = deprecation_reason def __get__(self, instance, owner): if instance is None: return self attributes = getattr(instance, self.key) try: value = attributes[self.name] except KeyError: value = self.default if self.alias: # Resource attributes can be aliased to each other. If neither # of them exist, then simply doing a # getattr(instance, self.alias) here sends things into # infinite recursion (this _get method is what gets called # when getattr(instance) is called. # To combat that, we set a flag on the instance saying that # we have seen the current name, and we check before trying # to resolve the alias if there is already a flag set for that # alias name. We then remove the seen flag for ourselves after # we exit the alias getattr to clean up after ourselves for # the next time. alias_flag = _SEEN_FORMAT.format(name=self.alias) if not getattr(instance, alias_flag, False): seen_flag = _SEEN_FORMAT.format(name=self.name) # Prevent infinite recursion setattr(instance, seen_flag, True) value = getattr(instance, self.alias) delattr(instance, seen_flag) self.warn_if_deprecated_property(value) return value # self.type() should not be called on None objects. if value is None: return None # This warning are pretty intruisive. Every time attribute is accessed # a warning is being thrown. In neutron clients we have way too many # places that still refer to tenant_id even though they may also # properly support project_id. For now we silence tenant_id warnings. if self.name != "tenant_id": self.warn_if_deprecated_property(value) return _convert_type(value, self.type, self.list_type) def warn_if_deprecated_property(self, value): deprecated = object.__getattribute__(self, 'deprecated') deprecation_reason = object.__getattribute__( self, 'deprecation_reason', ) if value and deprecated: warnings.warn( "The field %r has been deprecated. %s" % (self.name, deprecation_reason or "Avoid usage."), os_warnings.RemovedFieldWarning, ) return value def __set__(self, instance, value): if self.coerce_to_default and value is None: value = self.default if value != self.default: value = _convert_type(value, self.type, self.list_type) attributes = getattr(instance, self.key) attributes[self.name] = value def __delete__(self, instance): try: attributes = getattr(instance, self.key) del attributes[self.name] except KeyError: pass class Body(_BaseComponent): """Body attributes""" key = "_body" class Header(_BaseComponent): """Header attributes""" key = "_header" _map_cls = structures.CaseInsensitiveDict class URI(_BaseComponent): """URI attributes""" key = "_uri" class Computed(_BaseComponent): """Computed attributes""" key = "_computed" class _ComponentManager(collections.abc.MutableMapping): """Storage of a component type""" attributes: ty.Dict[str, ty.Any] def __init__(self, attributes=None, synchronized=False): self.attributes = dict() if attributes is None else attributes.copy() self._dirty = set() if synchronized else set(self.attributes.keys()) def __getitem__(self, key): return self.attributes[key] def __setitem__(self, key, value): try: orig = self.attributes[key] except KeyError: changed = True else: changed = orig != value if changed: self.attributes[key] = value self._dirty.add(key) def __delitem__(self, key): del self.attributes[key] self._dirty.add(key) def __iter__(self): return iter(self.attributes) def __len__(self): return len(self.attributes) @property def dirty(self): """Return a dict of modified attributes""" return {key: self.attributes.get(key, None) for key in self._dirty} def clean(self, only=None): """Signal that the resource no longer has modified attributes. :param only: an optional set of attributes to no longer consider changed """ if only: self._dirty = self._dirty - set(only) else: self._dirty = set() class _Request: """Prepared components that go into a KSA request""" def __init__(self, url, body, headers): self.url = url self.body = body self.headers = headers class QueryParameters: def __init__( self, *names, include_pagination_defaults=True, **mappings, ): """Create a dict of accepted query parameters :param names: List of strings containing client-side query parameter names. Each name in the list maps directly to the name expected by the server. :param mappings: Key-value pairs where the key is the client-side name we'll accept here and the value is the name the server expects, e.g, ``changes_since=changes-since``. Additionally, a value can be a dict with optional keys: - ``name`` - server-side name, - ``type`` - callable to convert from client to server representation :param include_pagination_defaults: If true, include default pagination parameters, ``limit`` and ``marker``. These are the most common query parameters used for listing resources in OpenStack APIs. """ self._mapping: ty.Dict[str, ty.Union[str, ty.Dict]] = {} if include_pagination_defaults: self._mapping.update({"limit": "limit", "marker": "marker"}) self._mapping.update({name: name for name in names}) self._mapping.update(mappings) def _validate(self, query, base_path=None, allow_unknown_params=False): """Check that supplied query keys match known query mappings :param dict query: Collection of key-value pairs where each key is the client-side parameter name or server side name. :param base_path: Formatted python string of the base url path for the resource. :param allow_unknown_params: Exclude query params not known by the resource. :returns: Filtered collection of the supported QueryParameters """ expected_params = list(self._mapping) expected_params.extend( value.get('name', key) if isinstance(value, dict) else value for key, value in self._mapping.items() ) if base_path: expected_params += utils.get_string_format_keys(base_path) invalid_keys = set(query) - set(expected_params) if not invalid_keys: return query else: if not allow_unknown_params: raise exceptions.InvalidResourceQuery( message="Invalid query params: %s" % ",".join(invalid_keys), extra_data=invalid_keys, ) else: known_keys = set(query).intersection(set(expected_params)) return {k: query[k] for k in known_keys} def _transpose(self, query, resource_type): """Transpose the keys in query based on the mapping If a query is supplied with its server side name, we will still use it, but take preference to the client-side name when both are supplied. :param dict query: Collection of key-value pairs where each key is the client-side parameter name to be transposed to its server side name. :param resource_type: Class of a resource. """ result = {} for client_side, server_side in self._mapping.items(): if isinstance(server_side, dict): name = server_side.get('name', client_side) type_ = server_side.get('type') else: name = server_side type_ = None # NOTE(dtantsur): a small hack to be compatible with both # single-argument (like int) and double-argument type functions. try: provide_resource_type = ( len(inspect.getfullargspec(type_).args) > 1 ) except TypeError: provide_resource_type = False if client_side in query: value = query[client_side] elif name in query: value = query[name] else: continue if type_ is not None: if provide_resource_type: result[name] = type_(value, resource_type) else: result[name] = type_(value) else: result[name] = value return result class Resource(dict): # TODO(mordred) While this behaves mostly like a munch for the purposes # we need, sub-resources, such as Server.security_groups, which is a list # of dicts, will contain lists of real dicts, not lists of munch-like dict # objects. We should probably figure out a Resource class, perhaps # SubResource or something, that we can use to define the data-model of # complex object attributes where those attributes are not already covered # by a different resource such as Server.image which should ultimately # be an Image. We subclass dict so that things like json.dumps and pprint # will work properly. #: Singular form of key for resource. resource_key: ty.Optional[str] = None #: Plural form of key for resource. resources_key: ty.Optional[str] = None #: Key used for pagination links pagination_key: ty.Optional[str] = None #: The ID of this resource. id = Body("id") #: The name of this resource. name: ty.Union[Body, URI] = Body("name") #: The OpenStack location of this resource. location: ty.Union[Computed, Body, Header] = Computed('location') #: Mapping of accepted query parameter names. _query_mapping = QueryParameters() #: The base part of the URI for this resource. base_path: str = "" #: Allow create operation for this resource. allow_create = False #: Allow get operation for this resource. allow_fetch = False #: Allow update operation for this resource. allow_commit = False #: Allow delete operation for this resource. allow_delete = False #: Allow list operation for this resource. allow_list = False #: Allow head operation for this resource. allow_head = False #: Allow patch operation for this resource. allow_patch = False #: Commits happen without header or body being dirty. allow_empty_commit = False #: Method for committing a resource (PUT, PATCH, POST) commit_method = "PUT" #: Method for creating a resource (POST, PUT) create_method = "POST" #: Whether commit uses JSON patch format. commit_jsonpatch = False #: Do calls for this resource require an id requires_id = True #: Whether create requires an ID (determined from method if None). create_requires_id: ty.Optional[bool] = None #: Whether create should exclude ID in the body of the request. create_exclude_id_from_body = False #: Do responses for this resource have bodies has_body = True #: Does create returns a body (if False requires ID), defaults to has_body create_returns_body: ty.Optional[bool] = None #: Maximum microversion to use for getting/creating/updating the Resource _max_microversion: ty.Optional[str] = None #: API microversion (string or None) this Resource was loaded with microversion = None _connection = None _body: _ComponentManager _header: _ComponentManager _uri: _ComponentManager _computed: _ComponentManager _original_body: ty.Dict[str, ty.Any] = {} _store_unknown_attrs_as_properties = False _allow_unknown_attrs_in_body = False _unknown_attrs_in_body: ty.Dict[str, ty.Any] = {} # Placeholder for aliases as dict of {__alias__:__original} _attr_aliases: ty.Dict[str, str] = {} def __init__(self, _synchronized=False, connection=None, **attrs): """The base resource :param bool _synchronized: This is not intended to be used directly. See :meth:`~openstack.resource.Resource.new` and :meth:`~openstack.resource.Resource.existing`. :param openstack.connection.Connection connection: Reference to the Connection being used. Defaults to None to allow Resource objects to be used without an active Connection, such as in unit tests. Use of ``self._connection`` in Resource code should protect itself with a check for None. """ self._connection = connection self.microversion = attrs.pop('microversion', None) self._unknown_attrs_in_body = {} # NOTE: _collect_attrs modifies **attrs in place, removing # items as they match up with any of the body, header, # or uri mappings. body, header, uri, computed = self._collect_attrs(attrs) if self._allow_unknown_attrs_in_body: self._unknown_attrs_in_body.update(attrs) self._body = _ComponentManager( attributes=body, synchronized=_synchronized ) self._header = _ComponentManager( attributes=header, synchronized=_synchronized ) self._uri = _ComponentManager( attributes=uri, synchronized=_synchronized ) self._computed = _ComponentManager( attributes=computed, synchronized=_synchronized ) if self.commit_jsonpatch or self.allow_patch: # We need the original body to compare against if _synchronized: self._original_body = self._body.attributes.copy() elif self.id: # Never record ID as dirty. self._original_body = {self._alternate_id() or 'id': self.id} else: self._original_body = {} if self._store_unknown_attrs_as_properties: # When storing of unknown attributes is requested - ensure # we have properties attribute (with type=None) self._store_unknown_attrs_as_properties = ( hasattr(self.__class__, 'properties') and self.__class__.properties.type is None ) self._update_location() for attr, component in self._attributes_iterator(): if component.aka: # Register alias for the attribute (local name) self._attr_aliases[component.aka] = attr # TODO(mordred) This is terrible, but is a hack at the moment to ensure # json.dumps works. The json library does basically if not obj: and # obj.items() ... but I think the if not obj: is short-circuiting down # in the C code and thus since we don't store the data in self[] it's # always False even if we override __len__ or __bool__. dict.update(self, self.to_dict()) @classmethod def _attributes_iterator(cls, components=tuple([Body, Header])): """Iterator over all Resource attributes""" # isinstance stricly requires this to be a tuple # Since we're looking at class definitions we need to include # subclasses, so check the whole MRO. for klass in cls.__mro__: for attr, component in klass.__dict__.items(): if isinstance(component, components): yield attr, component def __repr__(self): pairs = [ "{}={}".format(k, v if v is not None else 'None') for k, v in dict( itertools.chain( self._body.attributes.items(), self._header.attributes.items(), self._uri.attributes.items(), self._computed.attributes.items(), ) ).items() ] args = ", ".join(pairs) return "{}.{}({})".format( self.__module__, self.__class__.__name__, args ) def __eq__(self, comparand): """Return True if another resource has the same contents""" if not isinstance(comparand, Resource): return False return all( [ self._body.attributes == comparand._body.attributes, self._header.attributes == comparand._header.attributes, self._uri.attributes == comparand._uri.attributes, self._computed.attributes == comparand._computed.attributes, ] ) def __getattribute__(self, name): """Return an attribute on this instance This is mostly a pass-through except for a specialization on the 'id' name, as this can exist under a different name via the `alternate_id` argument to resource.Body. """ if name == "id": if name in self._body: return self._body[name] else: key = self._alternate_id() if key: return self._body.get(key) else: try: return object.__getattribute__(self, name) except AttributeError as e: if name in self._attr_aliases: # Hmm - not found. But hey, the alias exists... return object.__getattribute__( self, self._attr_aliases[name] ) if self._allow_unknown_attrs_in_body: # Last chance, maybe it's in body as attribute which isn't # in the mapping at all... if name in self._unknown_attrs_in_body: return self._unknown_attrs_in_body[name] raise e def __getitem__(self, name): """Provide dictionary access for elements of the data model.""" # Check the class, since BaseComponent is a descriptor and thus # behaves like its wrapped content. If we get it on the class, # it returns the BaseComponent itself, not the results of __get__. real_item = getattr(self.__class__, name, None) if not real_item and name in self._attr_aliases: # Not found? But we know an alias exists. name = self._attr_aliases[name] real_item = getattr(self.__class__, name, None) if isinstance(real_item, _BaseComponent): return getattr(self, name) if not real_item: # In order to maintain backwards compatibility where we were # returning Munch (and server side names) and Resource object with # normalized attributes we can offer dict access via server side # names. for attr, component in self._attributes_iterator(tuple([Body])): if component.name == name: warnings.warn( "Access to '%s[%s]' is deprecated. " "Use '%s.%s' attribute instead" % (self.__class__, name, self.__class__, attr), os_warnings.LegacyAPIWarning, ) return getattr(self, attr) if self._allow_unknown_attrs_in_body: if name in self._unknown_attrs_in_body: return self._unknown_attrs_in_body[name] raise KeyError(name) def __delitem__(self, name): delattr(self, name) def __setitem__(self, name, value): real_item = getattr(self.__class__, name, None) if isinstance(real_item, _BaseComponent): self.__setattr__(name, value) else: if self._allow_unknown_attrs_in_body: self._unknown_attrs_in_body[name] = value return raise KeyError( "{name} is not found. {module}.{cls} objects do not support" " setting arbitrary keys through the" " dict interface.".format( module=self.__module__, cls=self.__class__.__name__, name=name, ) ) def _attributes( self, remote_names=False, components=None, include_aliases=True ): """Generate list of supported attributes""" attributes = [] if not components: components = tuple([Body, Header, Computed, URI]) for attr, component in self._attributes_iterator(components): key = attr if not remote_names else component.name attributes.append(key) if include_aliases and component.aka: attributes.append(component.aka) return attributes def keys(self): # NOTE(mordred) In python2, dict.keys returns a list. In python3 it # returns a dict_keys view. For 2, we can return a list from the # itertools chain. In 3, return the chain so it's at least an iterator. # It won't strictly speaking be an actual dict_keys, so it's possible # we may want to get more clever, but for now let's see how far this # will take us. # NOTE(gtema) For now let's return list of 'public' attributes and not # remotes or "unknown" return self._attributes() def items(self): # This method is critically required for Ansible "jsonify" # NOTE(gtema) For some reason when running from SDK itself the native # implementation of the method is absolutely sifficient, when called # from Ansible - the values are often empty. Even integrating all # Ansible internal methods did not help to find the root cause. Another # fact is that under Py2 everything is fine, while under Py3 it fails. # There is currently no direct test for Ansible-SDK issue. It is tested # implicitely in the keypair role for ansible module, where an assert # verifies presence of attributes. res = [] for attr in self._attributes(): # Append key, value tuple to result list res.append((attr, self[attr])) return res def _update(self, **attrs): """Given attributes, update them on this instance This is intended to be used from within the proxy layer when updating instances that may have already been created. """ self.microversion = attrs.pop('microversion', None) body, header, uri, computed = self._collect_attrs(attrs) self._body.update(body) self._header.update(header) self._uri.update(uri) self._computed.update(computed) self._update_location() # TODO(mordred) This is terrible, but is a hack at the moment to ensure # json.dumps works. The json library does basically if not obj: and # obj.items() ... but I think the if not obj: is short-circuiting down # in the C code and thus since we don't store the data in self[] it's # always False even if we override __len__ or __bool__. dict.update(self, self.to_dict()) def _collect_attrs(self, attrs): """Given attributes, return a dict per type of attribute This method splits up **attrs into separate dictionaries that correspond to the relevant body, header, and uri attributes that exist on this class. """ body = self._consume_body_attrs(attrs) header = self._consume_header_attrs(attrs) uri = self._consume_uri_attrs(attrs) if attrs: if self._allow_unknown_attrs_in_body: body.update(attrs) elif self._store_unknown_attrs_as_properties: # Keep also remaining (unknown) attributes body = self._pack_attrs_under_properties(body, attrs) if any([body, header, uri]): attrs = self._compute_attributes(body, header, uri) body.update(self._consume_attrs(self._body_mapping(), attrs)) header.update(self._consume_attrs(self._header_mapping(), attrs)) uri.update(self._consume_attrs(self._uri_mapping(), attrs)) computed = self._consume_attrs(self._computed_mapping(), attrs) # TODO(mordred) We should make a Location Resource and add it here # instead of just the dict. if self._connection: computed.setdefault('location', self._connection.current_location) return body, header, uri, computed def _update_location(self): """Update location to include resource project/zone information. Location should describe the location of the resource. For some resources, where the resource doesn't have any such baked-in notion we assume the resource exists in the same project as the logged-in user's token. However, if a resource contains a project_id, then that project is where the resource lives, and the location should reflect that. """ if not self._connection: return kwargs = {} if hasattr(self, 'project_id'): kwargs['project_id'] = self.project_id if hasattr(self, 'availability_zone'): kwargs['zone'] = self.availability_zone if kwargs: self.location = self._connection._get_current_location(**kwargs) def _compute_attributes(self, body, header, uri): """Compute additional attributes from the remote resource.""" return {} def _consume_body_attrs(self, attrs): return self._consume_mapped_attrs(Body, attrs) def _consume_header_attrs(self, attrs): return self._consume_mapped_attrs(Header, attrs) def _consume_uri_attrs(self, attrs): return self._consume_mapped_attrs(URI, attrs) def _update_from_body_attrs(self, attrs): body = self._consume_body_attrs(attrs) self._body.attributes.update(body) self._body.clean() def _update_from_header_attrs(self, attrs): headers = self._consume_header_attrs(attrs) self._header.attributes.update(headers) self._header.clean() def _update_uri_from_attrs(self, attrs): uri = self._consume_uri_attrs(attrs) self._uri.attributes.update(uri) self._uri.clean() def _consume_mapped_attrs(self, mapping_cls, attrs): mapping = self._get_mapping(mapping_cls) return self._consume_attrs(mapping, attrs) def _consume_attrs(self, mapping, attrs): """Given a mapping and attributes, return relevant matches This method finds keys in attrs that exist in the mapping, then both transposes them to their server-side equivalent key name to be returned, and finally pops them out of attrs. This allows us to only calculate their place and existence in a particular type of Resource component one time, rather than looking at the same source dict several times. """ relevant_attrs = {} consumed_keys = [] for key, value in attrs.items(): # We want the key lookup in mapping to be case insensitive if the # mapping is, thus the use of get. We want value to be exact. # If we find a match, we then have to loop over the mapping for # to find the key to return, as there isn't really a "get me the # key that matches this other key". We lower() in the inner loop # because we've already done case matching in the outer loop. if key in mapping.values() or mapping.get(key): for map_key, map_value in mapping.items(): if key.lower() in (map_key.lower(), map_value.lower()): relevant_attrs[map_key] = value consumed_keys.append(key) continue for key in consumed_keys: attrs.pop(key) return relevant_attrs def _clean_body_attrs(self, attrs): """Mark the attributes as up-to-date.""" self._body.clean(only=attrs) if self.commit_jsonpatch or self.allow_patch: for attr in attrs: if attr in self._body: self._original_body[attr] = self._body[attr] @classmethod def _get_mapping(cls, component): """Return a dict of attributes of a given component on the class""" mapping = component._map_cls() ret = component._map_cls() for key, value in cls._attributes_iterator(component): # Make sure base classes don't end up overwriting # mappings we've found previously in subclasses. if key not in mapping: # Make it this way first, to get MRO stuff correct. mapping[key] = value.name for k, v in mapping.items(): ret[v] = k return ret @classmethod def _body_mapping(cls): """Return all Body members of this class""" return cls._get_mapping(Body) @classmethod def _header_mapping(cls): """Return all Header members of this class""" return cls._get_mapping(Header) @classmethod def _uri_mapping(cls): """Return all URI members of this class""" return cls._get_mapping(URI) @classmethod def _computed_mapping(cls): """Return all URI members of this class""" return cls._get_mapping(Computed) @classmethod def _alternate_id(cls): """Return the name of any value known as an alternate_id NOTE: This will only ever return the first such alternate_id. Only one alternate_id should be specified. Returns an empty string if no name exists, as this method is consumed by _get_id and passed to getattr. """ for value in cls.__dict__.values(): if isinstance(value, Body): if value.alternate_id: return value.name return "" @staticmethod def _get_id(value): """If a value is a Resource, return the canonical ID This will return either the value specified by `id` or `alternate_id` in that order if `value` is a Resource. If `value` is anything other than a Resource, likely to be a string already representing an ID, it is returned. """ if isinstance(value, Resource): return value.id else: return value @classmethod def new(cls, **kwargs): """Create a new instance of this resource. When creating the instance set the ``_synchronized`` parameter of :class:`Resource` to ``False`` to indicate that the resource does not yet exist on the server side. This marks all attributes passed in ``**kwargs`` as "dirty" on the resource, and thusly tracked as necessary in subsequent calls such as :meth:`update`. :param dict kwargs: Each of the named arguments will be set as attributes on the resulting Resource object. """ return cls(_synchronized=False, **kwargs) @classmethod def existing(cls, connection=None, **kwargs): """Create an instance of an existing remote resource. When creating the instance set the ``_synchronized`` parameter of :class:`Resource` to ``True`` to indicate that it represents the state of an existing server-side resource. As such, all attributes passed in ``**kwargs`` are considered "clean", such that an immediate :meth:`update` call would not generate a body of attributes to be modified on the server. :param dict kwargs: Each of the named arguments will be set as attributes on the resulting Resource object. """ return cls(_synchronized=True, connection=connection, **kwargs) @classmethod def _from_munch(cls, obj, synchronized=True, connection=None): """Create an instance from a ``utils.Munch`` object. This is intended as a temporary measure to convert between shade-style Munch objects and original openstacksdk resources. :param obj: a ``utils.Munch`` object to convert from. :param bool synchronized: whether this object already exists on server Must be set to ``False`` for newly created objects. """ return cls(_synchronized=synchronized, connection=connection, **obj) def _attr_to_dict(self, attr, to_munch): """For a given attribute, convert it into a form suitable for a dict value. :param bool attr: Attribute name to convert :return: A dictionary of key/value pairs where keys are named as they exist as attributes of this class. :param bool _to_munch: Converts subresources to munch instead of dict. """ value = getattr(self, attr, None) if isinstance(value, Resource): return value.to_dict(_to_munch=to_munch) elif isinstance(value, dict) and to_munch: return utils.Munch(value) elif value and isinstance(value, list): converted = [] for raw in value: if isinstance(raw, Resource): converted.append(raw.to_dict(_to_munch=to_munch)) elif isinstance(raw, dict) and to_munch: converted.append(utils.Munch(raw)) else: converted.append(raw) return converted return value def to_dict( self, body=True, headers=True, computed=True, ignore_none=False, original_names=False, _to_munch=False, ): """Return a dictionary of this resource's contents :param bool body: Include the :class:`~openstack.resource.Body` attributes in the returned dictionary. :param bool headers: Include the :class:`~openstack.resource.Header` attributes in the returned dictionary. :param bool computed: Include the :class:`~openstack.resource.Computed` attributes in the returned dictionary. :param bool ignore_none: When True, exclude key/value pairs where the value is None. This will exclude attributes that the server hasn't returned. :param bool original_names: When True, use attribute names as they were received from the server. :param bool _to_munch: For internal use only. Converts to `utils.Munch` instead of dict. :return: A dictionary of key/value pairs where keys are named as they exist as attributes of this class. """ mapping: ty.Union[utils.Munch, ty.Dict] if _to_munch: mapping = utils.Munch() else: mapping = {} components: ty.List[ty.Type[_BaseComponent]] = [] if body: components.append(Body) if headers: components.append(Header) if computed: components.append(Computed) if not components: raise ValueError( "At least one of `body`, `headers` or `computed` must be True" ) if body and self._allow_unknown_attrs_in_body: for key in self._unknown_attrs_in_body: converted = self._attr_to_dict( key, to_munch=_to_munch, ) if not ignore_none or converted is not None: mapping[key] = converted # NOTE: This is similar to the implementation in _get_mapping # but is slightly different in that we're looking at an instance # and we're mapping names on this class to their actual stored # values. # NOTE: isinstance stricly requires components to be a tuple for attr, component in self._attributes_iterator(tuple(components)): if original_names: key = component.name else: key = attr for key in filter(None, (key, component.aka)): # Make sure base classes don't end up overwriting # mappings we've found previously in subclasses. if key not in mapping: converted = self._attr_to_dict( attr, to_munch=_to_munch, ) if ignore_none and converted is None: continue mapping[key] = converted return mapping # Compatibility with the utils.Munch.toDict method toDict = to_dict # Make the munch copy method use to_dict copy = to_dict def _to_munch(self, original_names=True): """Convert this resource into a Munch compatible with shade.""" return self.to_dict( body=True, headers=False, original_names=original_names, _to_munch=True, ) def _unpack_properties_to_resource_root(self, body): if not body: return # We do not want to modify caller body = body.copy() props = body.pop('properties', {}) if props and isinstance(props, dict): # unpack dict of properties back to the root of the resource body.update(props) elif props and isinstance(props, str): # A string value only - bring it back body['properties'] = props return body def _pack_attrs_under_properties(self, body, attrs): props = body.get('properties', {}) if not isinstance(props, dict): props = {'properties': props} props.update(attrs) body['properties'] = props return body def _prepare_request_body( self, patch, prepend_key, *, resource_request_key=None, ): body: ty.Union[ty.Dict[str, ty.Any], ty.List[ty.Any]] if patch: if not self._store_unknown_attrs_as_properties: # Default case new = self._body.attributes original_body = self._original_body else: new = self._unpack_properties_to_resource_root( self._body.attributes ) original_body = self._unpack_properties_to_resource_root( self._original_body ) # NOTE(gtema) sort result, since we might need validate it in tests body = sorted( list(jsonpatch.make_patch(original_body, new).patch), key=operator.itemgetter('path'), ) else: if not self._store_unknown_attrs_as_properties: # Default case body = self._body.dirty else: body = self._unpack_properties_to_resource_root( self._body.dirty ) if prepend_key: if resource_request_key is not None: body = {resource_request_key: body} elif self.resource_key is not None: body = {self.resource_key: body} return body def _prepare_request( self, requires_id=None, prepend_key=False, patch=False, base_path=None, params=None, *, resource_request_key=None, **kwargs, ): """Prepare a request to be sent to the server Create operations don't require an ID, but all others do, so only try to append an ID when it's needed with requires_id. Create and update operations sometimes require their bodies to be contained within an dict -- if the instance contains a resource_key and prepend_key=True, the body will be wrapped in a dict with that key. If patch=True, a JSON patch is prepared instead of the full body. Return a _Request object that contains the constructed URI as well a body and headers that are ready to send. Only dirty body and header contents will be returned. """ if requires_id is None: requires_id = self.requires_id # Conditionally construct arguments for _prepare_request_body request_kwargs = {"patch": patch, "prepend_key": prepend_key} if resource_request_key is not None: request_kwargs['resource_request_key'] = resource_request_key body = self._prepare_request_body(**request_kwargs) # TODO(mordred) Ensure headers have string values better than this headers = {} for k, v in self._header.dirty.items(): if isinstance(v, list): headers[k] = ", ".join(v) else: headers[k] = str(v) if base_path is None: base_path = self.base_path uri = base_path % self._uri.attributes if requires_id: if self.id is None: raise exceptions.InvalidRequest( "Request requires an ID but none was found" ) uri = utils.urljoin(uri, self.id) if params: query_params = urllib.parse.urlencode(params) uri += '?' + query_params return _Request(uri, body, headers) def _translate_response( self, response, has_body=None, error_message=None, *, resource_response_key=None, ): """Given a KSA response, inflate this instance with its data DELETE operations don't return a body, so only try to work with a body when has_body is True. This method updates attributes that correspond to headers and body on this instance and clears the dirty set. """ if has_body is None: has_body = self.has_body exceptions.raise_from_response(response, error_message=error_message) if has_body: try: body = response.json() if resource_response_key and resource_response_key in body: body = body[resource_response_key] elif self.resource_key and self.resource_key in body: body = body[self.resource_key] # Do not allow keys called "self" through. Glance chose # to name a key "self", so we need to pop it out because # we can't send it through cls.existing and into the # Resource initializer. "self" is already the first # argument and is practically a reserved word. body.pop("self", None) body_attrs = self._consume_body_attrs(body) if self._allow_unknown_attrs_in_body: body_attrs.update(body) self._unknown_attrs_in_body.update(body) elif self._store_unknown_attrs_as_properties: body_attrs = self._pack_attrs_under_properties( body_attrs, body ) self._body.attributes.update(body_attrs) self._body.clean() if self.commit_jsonpatch or self.allow_patch: # We need the original body to compare against self._original_body = body_attrs.copy() except ValueError: # Server returned not parse-able response (202, 204, etc) # Do simply nothing pass headers = self._consume_header_attrs(response.headers) self._header.attributes.update(headers) self._header.clean() self._update_location() dict.update(self, self.to_dict()) @classmethod def _get_session(cls, session): """Attempt to get an Adapter from a raw session. Some older code used conn.session has the session argument to Resource methods. That does not work anymore, as Resource methods expect an Adapter not a session. We've hidden an _sdk_connection on the Session stored on the connection. If we get something that isn't an Adapter, pull the connection from the Session and look up the adapter by service_type. """ # TODO(mordred) We'll need to do this for every method in every # Resource class that is calling session.$something to be complete. if isinstance(session, adapter.Adapter): return session raise ValueError( "The session argument to Resource methods requires either an" " instance of an openstack.proxy.Proxy object or at the very least" " a raw keystoneauth1.adapter.Adapter." ) @classmethod def _get_microversion(cls, session, *, action): """Get microversion to use for the given action. The base version uses the following logic: 1. If the session has a default microversion for the current service, just use it. 2. If ``self._max_microversion`` is not ``None``, use minimum between it and the maximum microversion supported by the server. 3. Otherwise use ``None``. Subclasses can override this method if more complex logic is needed. :param session: The session to use for making the request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param action: One of "fetch", "commit", "create", "delete", "patch". :type action: str :return: Microversion as string or ``None`` """ if action not in { 'list', 'fetch', 'commit', 'create', 'delete', 'patch', }: raise ValueError('Invalid action: %s' % action) if session.default_microversion: return session.default_microversion return utils.maximum_supported_microversion( session, cls._max_microversion ) def _assert_microversion_for( self, session, action, expected, error_message=None, maximum=None, ): """Enforce that the microversion for action satisfies the requirement. :param session: :class`keystoneauth1.adapter.Adapter` :param action: One of "fetch", "commit", "create", "delete". :param expected: Expected microversion. :param error_message: Optional error message with details. Will be prepended to the message generated here. :param maximum: Maximum microversion. :return: resulting microversion as string. :raises: :exc:`~openstack.exceptions.NotSupported` if the version used for the action is lower than the expected one. """ def _raise(message): if error_message: error_message.rstrip('.') message = f'{error_message}. {message}' raise exceptions.NotSupported(message) actual = self._get_microversion(session, action=action) if actual is None: message = ( "API version %s is required, but the default " "version will be used." ) % expected _raise(message) actual_n = discover.normalize_version_number(actual) if expected is not None: expected_n = discover.normalize_version_number(expected) if actual_n < expected_n: message = ( "API version %(expected)s is required, but %(actual)s " "will be used." ) % {'expected': expected, 'actual': actual} _raise(message) if maximum is not None: maximum_n = discover.normalize_version_number(maximum) # Assume that if a service supports higher versions, it also # supports lower ones. Breaks for services that remove old API # versions (which is not something they should do). if actual_n > maximum_n: return maximum return actual def create( self, session, prepend_key=True, base_path=None, *, resource_request_key=None, resource_response_key=None, microversion=None, **params, ): """Create a remote resource based on this instance. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param prepend_key: A boolean indicating whether the resource_key should be prepended in a resource creation request. Default to True. :param str base_path: Base part of the URI for creating resources, if different from :data:`~openstack.resource.Resource.base_path`. :param str resource_request_key: Overrides the usage of self.resource_key when prepending a key to the request body. Ignored if `prepend_key` is false. :param str resource_response_key: Overrides the usage of self.resource_key when processing response bodies. Ignored if `prepend_key` is false. :param str microversion: API version to override the negotiated one. :param dict params: Additional params to pass. :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_create` is not set to ``True``. """ if not self.allow_create: raise exceptions.MethodNotSupported(self, 'create') session = self._get_session(session) if microversion is None: microversion = self._get_microversion(session, action='create') requires_id = ( self.create_requires_id if self.create_requires_id is not None else self.create_method == 'PUT' ) # Construct request arguments. request_kwargs = { "requires_id": requires_id, "prepend_key": prepend_key, "base_path": base_path, } if resource_request_key is not None: request_kwargs['resource_request_key'] = resource_request_key if self.create_exclude_id_from_body: self._body._dirty.discard("id") if self.create_method == 'PUT': request = self._prepare_request(**request_kwargs) response = session.put( request.url, json=request.body, headers=request.headers, microversion=microversion, params=params, ) elif self.create_method == 'POST': request = self._prepare_request(**request_kwargs) response = session.post( request.url, json=request.body, headers=request.headers, microversion=microversion, params=params, ) else: raise exceptions.ResourceFailure( "Invalid create method: %s" % self.create_method ) has_body = ( self.has_body if self.create_returns_body is None else self.create_returns_body ) self.microversion = microversion response_kwargs = { "has_body": has_body, } if resource_response_key is not None: response_kwargs['resource_response_key'] = resource_response_key self._translate_response(response, **response_kwargs) # direct comparision to False since we need to rule out None if self.has_body and self.create_returns_body is False: # fetch the body if it's required but not returned by create fetch_kwargs = {} if resource_response_key is not None: fetch_kwargs = {'resource_response_key': resource_response_key} return self.fetch(session, **fetch_kwargs) return self @classmethod def bulk_create( cls, session, data, prepend_key=True, base_path=None, *, microversion=None, **params, ): """Create multiple remote resources based on this class and data. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param data: list of dicts, which represent resources to create. :param prepend_key: A boolean indicating whether the resource_key should be prepended in a resource creation request. Default to True. :param str base_path: Base part of the URI for creating resources, if different from :data:`~openstack.resource.Resource.base_path`. :param str microversion: API version to override the negotiated one. :param dict params: Additional params to pass. :return: A generator of :class:`Resource` objects. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_create` is not set to ``True``. """ if not cls.allow_create: raise exceptions.MethodNotSupported(cls, 'create') if not ( data and isinstance(data, list) and all([isinstance(x, dict) for x in data]) ): raise ValueError('Invalid data passed: %s' % data) session = cls._get_session(session) if microversion is None: microversion = cls._get_microversion(session, action='create') requires_id = ( cls.create_requires_id if cls.create_requires_id is not None else cls.create_method == 'PUT' ) if cls.create_method == 'PUT': method = session.put elif cls.create_method == 'POST': method = session.post else: raise exceptions.ResourceFailure( "Invalid create method: %s" % cls.create_method ) _body: ty.List[ty.Any] = [] resources = [] for attrs in data: # NOTE(gryf): we need to create resource objects, since # _prepare_request only works on instances, not classes. # Those objects will be used in case where request doesn't return # JSON data representing created resource, and yet it's required # to return newly created resource objects. resource = cls.new(connection=session._get_connection(), **attrs) resources.append(resource) request = resource._prepare_request( requires_id=requires_id, base_path=base_path ) _body.append(request.body) body: ty.Union[ty.Dict[str, ty.Any], ty.List[ty.Any]] = _body if prepend_key: assert cls.resources_key body = {cls.resources_key: body} response = method( request.url, json=body, headers=request.headers, microversion=microversion, params=params, ) exceptions.raise_from_response(response) data = response.json() if cls.resources_key: data = data[cls.resources_key] if not isinstance(data, list): data = [data] has_body = ( cls.has_body if cls.create_returns_body is None else cls.create_returns_body ) if has_body and cls.create_returns_body is False: return (r.fetch(session) for r in resources) else: return ( cls.existing( microversion=microversion, connection=session._get_connection(), **res_dict, ) for res_dict in data ) def fetch( self, session, requires_id=True, base_path=None, error_message=None, skip_cache=False, *, resource_response_key=None, microversion=None, **params, ): """Get a remote resource based on this instance. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param boolean requires_id: A boolean indicating whether resource ID should be part of the requested URI. :param str base_path: Base part of the URI for fetching resources, if different from :data:`~openstack.resource.Resource.base_path`. :param str error_message: An Error message to be returned if requested object does not exist. :param bool skip_cache: A boolean indicating whether optional API cache should be skipped for this invocation. :param str resource_response_key: Overrides the usage of self.resource_key when processing the response body. :param str microversion: API version to override the negotiated one. :param dict params: Additional parameters that can be consumed. :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_fetch` is not set to ``True``. :raises: :exc:`~openstack.exceptions.NotFoundException` if the resource was not found. """ if not self.allow_fetch: raise exceptions.MethodNotSupported(self, 'fetch') request = self._prepare_request( requires_id=requires_id, base_path=base_path, ) session = self._get_session(session) if microversion is None: microversion = self._get_microversion(session, action='fetch') response = session.get( request.url, microversion=microversion, params=params, skip_cache=skip_cache, ) kwargs = {} if error_message: kwargs['error_message'] = error_message self.microversion = microversion if resource_response_key is not None: kwargs['resource_response_key'] = resource_response_key self._translate_response(response, **kwargs) return self def head(self, session, base_path=None, *, microversion=None): """Get headers from a remote resource based on this instance. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param str base_path: Base part of the URI for fetching resources, if different from :data:`~openstack.resource.Resource.base_path`. :param str microversion: API version to override the negotiated one. :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_head` is not set to ``True``. :raises: :exc:`~openstack.exceptions.NotFoundException` if the resource was not found. """ if not self.allow_head: raise exceptions.MethodNotSupported(self, 'head') session = self._get_session(session) if microversion is None: microversion = self._get_microversion(session, action='fetch') request = self._prepare_request(base_path=base_path) response = session.head(request.url, microversion=microversion) self.microversion = microversion self._translate_response(response, has_body=False) return self @property def requires_commit(self): """Whether the next commit() call will do anything.""" return ( self._body.dirty or self._header.dirty or self.allow_empty_commit ) def commit( self, session, prepend_key=True, has_body=True, retry_on_conflict=None, base_path=None, *, microversion=None, **kwargs, ): """Commit the state of the instance to the remote resource. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param prepend_key: A boolean indicating whether the resource_key should be prepended in a resource update request. Default to True. :param bool retry_on_conflict: Whether to enable retries on HTTP CONFLICT (409). Value of ``None`` leaves the `Adapter` defaults. :param str base_path: Base part of the URI for modifying resources, if different from :data:`~openstack.resource.Resource.base_path`. :param str microversion: API version to override the negotiated one. :param dict kwargs: Parameters that will be passed to _prepare_request() :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_commit` is not set to ``True``. """ if not self.allow_commit: raise exceptions.MethodNotSupported(self, 'commit') # The id cannot be dirty for an commit self._body._dirty.discard("id") # Only try to update if we actually have anything to commit. if not self.requires_commit: return self # Avoid providing patch unconditionally to avoid breaking subclasses # without it. if self.commit_jsonpatch: kwargs['patch'] = True request = self._prepare_request( prepend_key=prepend_key, base_path=base_path, **kwargs, ) if microversion is None: microversion = self._get_microversion(session, action='commit') return self._commit( session, request, self.commit_method, microversion, has_body=has_body, retry_on_conflict=retry_on_conflict, ) def _commit( self, session, request, method, microversion, has_body=True, retry_on_conflict=None, ): session = self._get_session(session) kwargs = {} retriable_status_codes = set(session.retriable_status_codes or ()) if retry_on_conflict: kwargs['retriable_status_codes'] = retriable_status_codes | {409} elif retry_on_conflict is not None and retriable_status_codes: # The baremetal proxy defaults to retrying on conflict, allow # overriding it via an explicit retry_on_conflict=False. kwargs['retriable_status_codes'] = retriable_status_codes - {409} try: call = getattr(session, method.lower()) except AttributeError: raise exceptions.ResourceFailure( "Invalid commit method: %s" % method ) response = call( request.url, json=request.body, headers=request.headers, microversion=microversion, **kwargs, ) self.microversion = microversion self._translate_response(response, has_body=has_body) return self def _convert_patch(self, patch): if not isinstance(patch, list): patch = [patch] converted = [] for item in patch: try: path = item['path'] parts = path.lstrip('/').split('/', 1) field = parts[0] except (KeyError, IndexError): raise ValueError("Malformed or missing path in %s" % item) try: component = getattr(self.__class__, field) except AttributeError: server_field = field else: server_field = component.name if len(parts) > 1: new_path = f'/{server_field}/{parts[1]}' else: new_path = '/%s' % server_field converted.append(dict(item, path=new_path)) return converted def patch( self, session, patch=None, prepend_key=True, has_body=True, retry_on_conflict=None, base_path=None, *, microversion=None, ): """Patch the remote resource. Allows modifying the resource by providing a list of JSON patches to apply to it. The patches can use both the original (server-side) and SDK field names. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param patch: Additional JSON patch as a list or one patch item. If provided, it is applied on top of any changes to the current resource. :param prepend_key: A boolean indicating whether the resource_key should be prepended in a resource update request. Default to True. :param bool retry_on_conflict: Whether to enable retries on HTTP CONFLICT (409). Value of ``None`` leaves the `Adapter` defaults. :param str base_path: Base part of the URI for modifying resources, if different from :data:`~openstack.resource.Resource.base_path`. :param str microversion: API version to override the negotiated one. :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_patch` is not set to ``True``. """ if not self.allow_patch: raise exceptions.MethodNotSupported(self, 'patch') # The id cannot be dirty for an commit self._body._dirty.discard("id") # Only try to update if we actually have anything to commit. if not patch and not self.requires_commit: return self request = self._prepare_request( prepend_key=prepend_key, base_path=base_path, patch=True, ) if microversion is None: microversion = self._get_microversion(session, action='patch') if patch: request.body += self._convert_patch(patch) return self._commit( session, request, 'PATCH', microversion, has_body=has_body, retry_on_conflict=retry_on_conflict, ) def delete( self, session, error_message=None, *, microversion=None, **kwargs ): """Delete the remote resource based on this instance. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param str microversion: API version to override the negotiated one. :param dict kwargs: Parameters that will be passed to _prepare_request() :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_commit` is not set to ``True``. :raises: :exc:`~openstack.exceptions.NotFoundException` if the resource was not found. """ response = self._raw_delete( session, microversion=microversion, **kwargs ) kwargs = {} if error_message: kwargs['error_message'] = error_message self._translate_response(response, has_body=False, **kwargs) return self def _raw_delete(self, session, microversion=None, **kwargs): if not self.allow_delete: raise exceptions.MethodNotSupported(self, 'delete') request = self._prepare_request(**kwargs) session = self._get_session(session) if microversion is None: microversion = self._get_microversion(session, action='delete') return session.delete( request.url, headers=request.headers, microversion=microversion, ) @classmethod def list( cls, session, paginated=True, base_path=None, allow_unknown_params=False, *, microversion=None, headers=None, **params, ): """This method is a generator which yields resource objects. This resource object list generator handles pagination and takes query params for response filtering. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param bool paginated: ``True`` if a GET to this resource returns a paginated series of responses, or ``False`` if a GET returns only one page of data. **When paginated is False only one page of data will be returned regardless of the API's support of pagination.** :param str base_path: Base part of the URI for listing resources, if different from :data:`~openstack.resource.Resource.base_path`. :param bool allow_unknown_params: ``True`` to accept, but discard unknown query parameters. This allows getting list of 'filters' and passing everything known to the server. ``False`` will result in validation exception when unknown query parameters are passed. :param str microversion: API version to override the negotiated one. :param dict headers: Additional headers to inject into the HTTP request. :param dict params: These keyword arguments are passed through the :meth:`~openstack.resource.QueryParamter._transpose` method to find if any of them match expected query parameters to be sent in the *params* argument to :meth:`~keystoneauth1.adapter.Adapter.get`. They are additionally checked against the :data:`~openstack.resource.Resource.base_path` format string to see if any path fragments need to be filled in by the contents of this argument. Parameters supported as filters by the server side are passed in the API call, remaining parameters are applied as filters to the retrieved results. :return: A generator of :class:`Resource` objects. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_list` is not set to ``True``. :raises: :exc:`~openstack.exceptions.InvalidResourceQuery` if query contains invalid params. """ if not cls.allow_list: raise exceptions.MethodNotSupported(cls, 'list') session = cls._get_session(session) if microversion is None: microversion = cls._get_microversion(session, action='list') if base_path is None: base_path = cls.base_path api_filters = cls._query_mapping._validate( params, base_path=base_path, allow_unknown_params=True, ) client_filters = {} # Gather query parameters which are not supported by the server for k, v in params.items(): if ( # Known attr hasattr(cls, k) # Is real attr property and isinstance(getattr(cls, k), Body) # not included in the query_params and k not in cls._query_mapping._mapping.keys() ): client_filters[k] = v query_params = cls._query_mapping._transpose(api_filters, cls) uri = base_path % params uri_params = {} limit = query_params.get('limit') for k, v in params.items(): # We need to gather URI parts to set them on the resource later if hasattr(cls, k) and isinstance(getattr(cls, k), URI): uri_params[k] = v def _dict_filter(f, d): """Dict param based filtering""" if not d: return False for key in f.keys(): if isinstance(f[key], dict): if not _dict_filter(f[key], d.get(key, None)): return False elif d.get(key, None) != f[key]: return False return True headers_final = {"Accept": "application/json"} if headers: headers_final = {**headers_final, **headers} # Track the total number of resources yielded so we can paginate # swift objects total_yielded = 0 while uri: # Copy query_params due to weird mock unittest interactions response = session.get( uri, headers=headers_final, params=query_params.copy(), microversion=microversion, ) exceptions.raise_from_response(response) data = response.json() # Discard any existing pagination keys last_marker = query_params.pop('marker', None) query_params.pop('limit', None) if cls.resources_key: resources = data[cls.resources_key] else: resources = data if not isinstance(resources, list): resources = [resources] marker = None for raw_resource in resources: # Do not allow keys called "self" through. Glance chose # to name a key "self", so we need to pop it out because # we can't send it through cls.existing and into the # Resource initializer. "self" is already the first # argument and is practically a reserved word. raw_resource.pop("self", None) # We want that URI props are available on the resource raw_resource.update(uri_params) value = cls.existing( microversion=microversion, connection=session._get_connection(), **raw_resource, ) marker = value.id filters_matched = True # Iterate over client filters and return only if matching for key in client_filters.keys(): if isinstance(client_filters[key], dict): if not _dict_filter( client_filters[key], value.get(key, None) ): filters_matched = False break elif value.get(key, None) != client_filters[key]: filters_matched = False break if filters_matched: yield value total_yielded += 1 if resources and paginated: uri, next_params = cls._get_next_link( uri, response, data, marker, limit, total_yielded ) try: if next_params['marker'] == last_marker: # If next page marker is same as what we were just # asked something went terribly wrong. Some ancient # services had bugs. raise exceptions.SDKException( 'Endless pagination loop detected, aborting' ) except KeyError: # do nothing, exception handling is cheaper then "if" pass query_params.update(next_params) else: return @classmethod def _get_next_link(cls, uri, response, data, marker, limit, total_yielded): next_link = None params = {} if isinstance(data, dict): pagination_key = cls.pagination_key if not pagination_key and 'links' in data: # api-wg guidelines are for a links dict in the main body pagination_key = 'links' if not pagination_key and cls.resources_key: # Nova has a {key}_links dict in the main body pagination_key = f'{cls.resources_key}_links' if pagination_key: links = data.get(pagination_key, {}) # keystone might return a dict if isinstance(links, dict): links = ({k: v} for k, v in links.items()) for item in links: if item.get('rel') == 'next' and 'href' in item: next_link = item['href'] break # Glance has a next field in the main body next_link = next_link or data.get('next') if next_link and next_link.startswith('/v'): next_link = next_link[next_link.find('/', 1) :] if not next_link and 'next' in response.links: # RFC5988 specifies Link headers and requests parses them if they # are there. We prefer link dicts in resource body, but if those # aren't there and Link headers are, use them. next_link = response.links['next']['uri'] # Swift provides a count of resources in a header and a list body if not next_link and cls.pagination_key: total_count = response.headers.get(cls.pagination_key) if total_count: total_count = int(total_count) if total_count > total_yielded: params['marker'] = marker if limit: params['limit'] = limit next_link = uri # Parse params from Link (next page URL) into params. # This prevents duplication of query parameters that with large # number of pages result in HTTP 414 error eventually. if next_link: parts = urllib.parse.urlparse(next_link) query_params = urllib.parse.parse_qs(parts.query) params.update(query_params) next_link = urllib.parse.urljoin(next_link, parts.path) # If we still have no link, and limit was given and is non-zero, # and the number of records yielded equals the limit, then the user # is playing pagination ball so we should go ahead and try once more. if not next_link and limit: next_link = uri params['marker'] = marker params['limit'] = limit return next_link, params @classmethod def _get_one_match(cls, name_or_id, results): """Given a list of results, return the match""" the_result = None for maybe_result in results: id_value = cls._get_id(maybe_result) name_value = maybe_result.name if (id_value == name_or_id) or (name_value == name_or_id): # Only allow one resource to be found. If we already # found a match, raise an exception to show it. if the_result is None: the_result = maybe_result else: msg = "More than one %s exists with the name '%s'." msg = msg % (cls.__name__, name_or_id) raise exceptions.DuplicateResource(msg) return the_result @ty.overload @classmethod def find( cls, session, name_or_id: str, ignore_missing: ty.Literal[True] = True, list_base_path: ty.Optional[str] = None, *, microversion: ty.Optional[str] = None, all_projects: ty.Optional[bool] = None, **params, ) -> ty.Optional['Resource']: ... @ty.overload @classmethod def find( cls, session, name_or_id: str, ignore_missing: ty.Literal[False], list_base_path: ty.Optional[str] = None, *, microversion: ty.Optional[str] = None, all_projects: ty.Optional[bool] = None, **params, ) -> 'Resource': ... # excuse the duplication here: it's mypy's fault # https://github.com/python/mypy/issues/14764 @ty.overload @classmethod def find( cls, session, name_or_id: str, ignore_missing: bool, list_base_path: ty.Optional[str] = None, *, microversion: ty.Optional[str] = None, all_projects: ty.Optional[bool] = None, **params, ): ... @classmethod def find( cls, session, name_or_id: str, ignore_missing: bool = True, list_base_path: ty.Optional[str] = None, *, microversion: ty.Optional[str] = None, all_projects: ty.Optional[bool] = None, **params, ): """Find a resource by its name or id. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param name_or_id: This resource's identifier, if needed by the request. The default is ``None``. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param str list_base_path: base_path to be used when need listing resources. :param str microversion: API version to override the negotiated one. :param dict params: Any additional parameters to be passed into underlying methods, such as to :meth:`~openstack.resource.Resource.existing` in order to pass on URI parameters. :return: The :class:`Resource` object matching the given name or id or None if nothing matches. :raises: :class:`openstack.exceptions.DuplicateResource` if more than one resource is found for this request. :raises: :class:`openstack.exceptions.NotFoundException` if nothing is found and ignore_missing is ``False``. """ session = cls._get_session(session) # Try to short-circuit by looking directly for a matching ID. try: match = cls.existing( id=name_or_id, connection=session._get_connection(), **params, ) return match.fetch(session, microversion=microversion, **params) except ( exceptions.NotFoundException, exceptions.BadRequestException, exceptions.ForbiddenException, ): # NOTE(gtema): There are few places around openstack that return # 400 if we try to GET resource and it doesn't exist. pass if list_base_path: params['base_path'] = list_base_path # all_projects is a special case that is used by multiple services. We # handle it here since it doesn't make sense to pass it to the .fetch # call above if all_projects is not None: params['all_projects'] = all_projects if ( 'name' in cls._query_mapping._mapping.keys() and 'name' not in params ): params['name'] = name_or_id data = cls.list(session, **params) result = cls._get_one_match(name_or_id, data) if result is not None: return result if ignore_missing: return None raise exceptions.NotFoundException( f"No {cls.__name__} found for {name_or_id}" ) def _normalize_status(status): if status is not None: status = status.lower() return status def wait_for_status( session, resource, status, failures, interval=None, wait=None, attribute='status', callback=None, ): """Wait for the resource to be in a particular status. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param resource: The resource to wait on to reach the status. The resource must have a status attribute specified via ``attribute``. :type resource: :class:`~openstack.resource.Resource` :param status: Desired status of the resource. :param list failures: Statuses that would indicate the transition failed such as 'ERROR'. Defaults to ['ERROR']. :param interval: Number of seconds to wait between checks. Set to ``None`` to use the default interval. :param wait: Maximum number of seconds to wait for transition. Set to ``None`` to wait forever. :param attribute: Name of the resource attribute that contains the status. :param callback: A callback function. This will be called with a single value, progress. This is API specific but is generally a percentage value from 0-100. :return: The updated resource. :raises: :class:`~openstack.exceptions.ResourceTimeout` transition to status failed to occur in wait seconds. :raises: :class:`~openstack.exceptions.ResourceFailure` resource transitioned to one of the failure states. :raises: :class:`~AttributeError` if the resource does not have a status attribute """ current_status = getattr(resource, attribute) if _normalize_status(current_status) == _normalize_status(status): return resource if failures is None: failures = ['ERROR'] failures = [f.lower() for f in failures] name = f"{resource.__class__.__name__}:{resource.id}" msg = "Timeout waiting for {name} to transition to {status}".format( name=name, status=status ) for count in utils.iterate_timeout( timeout=wait, message=msg, wait=interval ): resource = resource.fetch(session, skip_cache=True) if not resource: raise exceptions.ResourceFailure( "{name} went away while waiting for {status}".format( name=name, status=status ) ) new_status = getattr(resource, attribute) normalized_status = _normalize_status(new_status) if normalized_status == _normalize_status(status): return resource elif normalized_status in failures: raise exceptions.ResourceFailure( "{name} transitioned to failure state {status}".format( name=name, status=new_status ) ) LOG.debug( 'Still waiting for resource %s to reach state %s, ' 'current state is %s', name, status, new_status, ) if callback: progress = getattr(resource, 'progress', None) or 0 callback(progress) def wait_for_delete(session, resource, interval, wait, callback=None): """Wait for the resource to be deleted. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param resource: The resource to wait on to be deleted. :type resource: :class:`~openstack.resource.Resource` :param interval: Number of seconds to wait between checks. :param wait: Maximum number of seconds to wait for the delete. :param callback: A callback function. This will be called with a single value, progress. This is API specific but is generally a percentage value from 0-100. :return: Method returns self on success. :raises: :class:`~openstack.exceptions.ResourceTimeout` transition to status failed to occur in wait seconds. """ orig_resource = resource for count in utils.iterate_timeout( timeout=wait, message="Timeout waiting for {res}:{id} to delete".format( res=resource.__class__.__name__, id=resource.id ), wait=interval, ): try: resource = resource.fetch(session, skip_cache=True) if not resource: return orig_resource # Some resources like VolumeAttachment don't have status field. if hasattr(resource, 'status'): if resource.status.lower() == 'deleted': return resource except exceptions.NotFoundException: return orig_resource if callback: progress = getattr(resource, 'progress', None) or 0 callback(progress) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/service_description.py0000664000175000017500000003511700000000000022642 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty import warnings import os_service_types from openstack import _log from openstack import exceptions from openstack import proxy as proxy_mod from openstack import warnings as os_warnings __all__ = [ 'ServiceDescription', ] _logger = _log.setup_logging('openstack') _service_type_manager = os_service_types.ServiceTypes() class _ServiceDisabledProxyShim: def __init__(self, service_type, reason): self.service_type = service_type self.reason = reason def __getattr__(self, item): raise exceptions.ServiceDisabledException( "Service '{service_type}' is disabled because its configuration " "could not be loaded. {reason}".format( service_type=self.service_type, reason=self.reason or '' ) ) class ServiceDescription: #: Dictionary of supported versions and proxy classes for that version supported_versions: ty.Dict[str, ty.Type[proxy_mod.Proxy]] = {} #: main service_type to use to find this service in the catalog service_type: str #: list of aliases this service might be registered as aliases: ty.List[str] = [] def __init__(self, service_type, supported_versions=None, aliases=None): """Class describing how to interact with a REST service. Each service in an OpenStack cloud needs to be found by looking for it in the catalog. Once the endpoint is found, REST calls can be made, but a Proxy class and some Resource objects are needed to provide an object interface. Instances of ServiceDescription can be passed to `openstack.connection.Connection.add_service`, or a list can be passed to the `openstack.connection.Connection` constructor in the ``extra_services`` argument. All three parameters can be provided at instantation time, or a service-specific subclass can be used that sets the attributes directly. :param string service_type: service_type to look for in the keystone catalog :param list aliases: Optional list of aliases, if there is more than one name that might be used to register the service in the catalog. """ self.service_type = service_type or self.service_type self.supported_versions = ( supported_versions or self.supported_versions or {} ) self.aliases = aliases or self.aliases self.all_types = [service_type] + self.aliases def __get__(self, instance, owner): if instance is None: return self if self.service_type not in instance._proxies: proxy = self._make_proxy(instance) if not isinstance(proxy, _ServiceDisabledProxyShim): # The keystone proxy has a method called get_endpoint # that is about managing keystone endpoints. This is # unfortunate. try: endpoint = proxy_mod.Proxy.get_endpoint(proxy) except IndexError: # It's best not to look to closely here. This is # to support old placement. # There was a time when it had no status entry # in its version discovery doc (OY) In this case, # no endpoints get through version discovery # filtering. In order to deal with that, catch # the IndexError thrown by keystoneauth and # set an endpoint_override for the user to the # url in the catalog and try again. self._set_override_from_catalog(instance.config) proxy = self._make_proxy(instance) endpoint = proxy_mod.Proxy.get_endpoint(proxy) if instance._strict_proxies: self._validate_proxy(proxy, endpoint) proxy._connection = instance instance._proxies[self.service_type] = proxy return instance._proxies[self.service_type] def _set_override_from_catalog(self, config): override = config._get_endpoint_from_catalog( self.service_type, proxy_mod.Proxy, ) config.set_service_value( 'endpoint_override', self.service_type, override, ) def _validate_proxy(self, proxy, endpoint): exc = None service_url = getattr(proxy, 'skip_discovery', None) try: # Don't go too wild for e.g. swift if service_url is None: service_url = proxy.get_endpoint_data().service_url except Exception as e: exc = e if exc or not endpoint or not service_url: raise exceptions.ServiceDiscoveryException( "Failed to create a working proxy for service {service_type}: " "{message}".format( service_type=self.service_type, message=exc or "No valid endpoint was discoverable.", ) ) def _make_proxy(self, instance): """Create a Proxy for the service in question. :param instance: The `openstack.connection.Connection` we're working with. """ config = instance.config if not config.has_service(self.service_type): return _ServiceDisabledProxyShim( self.service_type, config.get_disabled_reason(self.service_type), ) # We don't know anything about this service, so the user is # explicitly just using us for a passthrough REST adapter. # Skip all the lower logic. if not self.supported_versions: temp_client = config.get_session_client( self.service_type, allow_version_hack=True, ) return temp_client # Check to see if we've got config that matches what we # understand in the SDK. version_string = config.get_api_version(self.service_type) endpoint_override = config.get_endpoint(self.service_type) # If the user doesn't give a version in config, but we only support # one version, then just use that version. if not version_string and len(self.supported_versions) == 1: version_string = list(self.supported_versions)[0] proxy_obj = None if endpoint_override and version_string: # Both endpoint override and version_string are set, we don't # need to do discovery - just trust the user. proxy_class = self.supported_versions.get(version_string[0]) if proxy_class: proxy_obj = config.get_session_client( self.service_type, constructor=proxy_class, ) else: warnings.warn( f"The configured version, {version_string} for service " f"{self.service_type} is not known or supported by " f"openstacksdk. The resulting Proxy object will only " f"have direct passthrough REST capabilities.", category=os_warnings.UnsupportedServiceVersion, ) elif endpoint_override: temp_adapter = config.get_session_client(self.service_type) api_version = temp_adapter.get_endpoint_data().api_version proxy_class = self.supported_versions.get(str(api_version[0])) if proxy_class: proxy_obj = config.get_session_client( self.service_type, constructor=proxy_class, ) else: warnings.warn( f"Service {self.service_type} has an endpoint override " f"set but the version discovered at that endpoint, " f"{api_version}, is not supported by openstacksdk. " f"The resulting Proxy object will only have direct " f"passthrough REST capabilities.", category=os_warnings.UnsupportedServiceVersion, ) if proxy_obj: if getattr(proxy_obj, 'skip_discovery', False): # Some services, like swift, don't have discovery. While # keystoneauth will behave correctly and handle such # scenarios, it's not super efficient as it involves trying # and falling back a few times. return proxy_obj data = proxy_obj.get_endpoint_data() if not data and instance._strict_proxies: raise exceptions.ServiceDiscoveryException( "Failed to create a working proxy for service " "{service_type}: No endpoint data found.".format( service_type=self.service_type ) ) # If we've gotten here with a proxy object it means we have # an endpoint_override in place. If the catalog_url and # service_url don't match, which can happen if there is a # None plugin and auth.endpoint like with standalone ironic, # we need to be explicit that this service has an endpoint_override # so that subsequent discovery calls don't get made incorrectly. if data.catalog_url != data.service_url: ep_key = '{service_type}_endpoint_override'.format( service_type=self.service_type.replace('-', '_') ) config.config[ep_key] = data.service_url proxy_obj = config.get_session_client( self.service_type, constructor=proxy_class, ) return proxy_obj # Make an adapter to let discovery take over version_kwargs = {} supported_versions = sorted([int(f) for f in self.supported_versions]) if version_string: version_kwargs['version'] = version_string if getattr( self.supported_versions[str(supported_versions[0])], 'skip_discovery', False, ): # Requested service does not support version discovery # In this case it is more efficient to set the # endpoint_override to the current catalog endpoint value, # otherwise next request will try to perform discovery. temp_adapter = config.get_session_client(self.service_type) ep_override = temp_adapter.get_endpoint(skip_discovery=True) ep_key = '{service_type}_endpoint_override'.format( service_type=self.service_type.replace('-', '_') ) config.config[ep_key] = ep_override return config.get_session_client( self.service_type, allow_version_hack=True, constructor=self.supported_versions[ str(supported_versions[0]) ], version=version_string, ) else: version_kwargs['min_version'] = str(supported_versions[0]) version_kwargs['max_version'] = '{version}.latest'.format( version=str(supported_versions[-1]) ) temp_adapter = config.get_session_client( self.service_type, allow_version_hack=True, **version_kwargs ) found_version = temp_adapter.get_api_major_version() if found_version is None: region_name = instance.config.get_region_name(self.service_type) if version_kwargs: raise exceptions.NotSupported( "The {service_type} service for {cloud}:{region_name}" " exists but does not have any supported versions.".format( service_type=self.service_type, cloud=instance.name, region_name=region_name, ) ) else: raise exceptions.NotSupported( "The {service_type} service for {cloud}:{region_name}" " exists but no version was discoverable.".format( service_type=self.service_type, cloud=instance.name, region_name=region_name, ) ) proxy_class = self.supported_versions.get(str(found_version[0])) if proxy_class: return config.get_session_client( self.service_type, allow_version_hack=True, constructor=proxy_class, **version_kwargs, ) # No proxy_class # Maybe openstacksdk is being used for the passthrough # REST API proxy layer for an unknown service in the # service catalog that also doesn't have any useful # version discovery? warnings.warn( "Service {service_type} has no discoverable version. " "The resulting Proxy object will only have direct " "passthrough REST capabilities.".format( service_type=self.service_type ), category=os_warnings.UnsupportedServiceVersion, ) return temp_adapter def __set__(self, instance, value): raise AttributeError('Service Descriptors cannot be set') def __delete__(self, instance): # NOTE(gtema) Some clouds are not very fast (or interested at all) # in bringing their changes upstream. If there are incompatible changes # downstream we need to allow overriding default implementation by # deleting service_type attribute of the connection and then # "add_service" with new implementation. # This is implemented explicitely not very comfortable to use # to show how bad it is not to contribute changes back for service_type in self.all_types: if service_type in instance._proxies: del instance._proxies[service_type] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3213499 openstacksdk-4.0.0/openstack/shared_file_system/0000775000175000017500000000000000000000000022067 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/__init__.py0000664000175000017500000000000000000000000024166 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/shared_file_system_service.py0000664000175000017500000000146000000000000030033 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import service_description from openstack.shared_file_system.v2 import _proxy class SharedFilesystemService(service_description.ServiceDescription): """The shared file systems service.""" supported_versions = { '2': _proxy.Proxy, } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.325352 openstacksdk-4.0.0/openstack/shared_file_system/v2/0000775000175000017500000000000000000000000022416 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/v2/__init__.py0000664000175000017500000000000000000000000024515 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/v2/_proxy.py0000664000175000017500000014322400000000000024316 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import proxy from openstack import resource from openstack.shared_file_system.v2 import ( availability_zone as _availability_zone, ) from openstack.shared_file_system.v2 import limit as _limit from openstack.shared_file_system.v2 import quota_class_set as _quota_class_set from openstack.shared_file_system.v2 import resource_locks as _resource_locks from openstack.shared_file_system.v2 import share as _share from openstack.shared_file_system.v2 import share_group as _share_group from openstack.shared_file_system.v2 import ( share_group_snapshot as _share_group_snapshot, ) from openstack.shared_file_system.v2 import ( share_access_rule as _share_access_rule, ) from openstack.shared_file_system.v2 import ( share_export_locations as _share_export_locations, ) from openstack.shared_file_system.v2 import share_instance as _share_instance from openstack.shared_file_system.v2 import share_network as _share_network from openstack.shared_file_system.v2 import ( share_network_subnet as _share_network_subnet, ) from openstack.shared_file_system.v2 import share_snapshot as _share_snapshot from openstack.shared_file_system.v2 import ( share_snapshot_instance as _share_snapshot_instance, ) from openstack.shared_file_system.v2 import storage_pool as _storage_pool from openstack.shared_file_system.v2 import user_message as _user_message class Proxy(proxy.Proxy): _resource_registry = { "availability_zone": _availability_zone.AvailabilityZone, "share_snapshot": _share_snapshot.ShareSnapshot, "storage_pool": _storage_pool.StoragePool, "user_message": _user_message.UserMessage, "limit": _limit.Limit, "share": _share.Share, "share_network": _share_network.ShareNetwork, "share_network_subnet": _share_network_subnet.ShareNetworkSubnet, "share_snapshot_instance": _share_snapshot_instance.ShareSnapshotInstance, # noqa: E501 "share_instance": _share_instance.ShareInstance, "share_export_locations": _share_export_locations.ShareExportLocation, "share_access_rule": _share_access_rule.ShareAccessRule, "share_group": _share_group.ShareGroup, "share_group_snapshot": _share_group_snapshot.ShareGroupSnapshot, "resource_locks": _resource_locks.ResourceLock, "quota_class_set": _quota_class_set.QuotaClassSet, } def availability_zones(self): """Retrieve shared file system availability zones :returns: A generator of availability zone resources :rtype: :class:`~openstack.shared_file_system.v2.availability_zone.AvailabilityZone` """ return self._list(_availability_zone.AvailabilityZone) def shares(self, details=True, **query): """Lists all shares with details :param kwargs query: Optional query parameters to be sent to limit the shares being returned. Available parameters include: * status: Filters by a share status * share_server_id: The UUID of the share server. * metadata: One or more metadata key and value pairs as a url encoded dictionary of strings. * extra_specs: The extra specifications as a set of one or more key-value pairs. * share_type_id: The UUID of a share type to query resources by. * name: The user defined name of the resource to filter resources by. * snapshot_id: The UUID of the share’s base snapshot to filter the request based on. * host: The host name of the resource to query with. * share_network_id: The UUID of the share network to filter resources by. * project_id: The ID of the project that owns the resource. * is_public: A boolean query parameter that, when set to true, allows retrieving public resources that belong to all projects. * share_group_id: The UUID of a share group to filter resource. * export_location_id: The export location UUID that can be used to filter shares or share instances. * export_location_path: The export location path that can be used to filter shares or share instances. * name~: The name pattern that can be used to filter shares, share snapshots, share networks or share groups. * description~: The description pattern that can be used to filter shares, share snapshots, share networks or share groups. * with_count: Whether to show count in API response or not, default is False. * limit: The maximum number of shares to return. * offset: The offset to define start point of share or share group listing. * sort_key: The key to sort a list of shares. * sort_dir: The direction to sort a list of shares. A valid value is asc, or desc. :returns: Details of shares resources :rtype: :class:`~openstack.shared_file_system.v2.share.Share` """ base_path = '/shares/detail' if details else None return self._list(_share.Share, base_path=base_path, **query) def find_share(self, name_or_id, ignore_missing=True, **query): """Find a single share :param name_or_id: The name or ID of a share. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict query: Any additional parameters to be passed into underlying methods. such as query filters. :returns: One :class:`~openstack.shared_file_system.v2.share.Share` or None """ return self._find( _share.Share, name_or_id, ignore_missing=ignore_missing, **query ) def get_share(self, share_id): """Lists details of a single share :param share: The ID of the share to get :returns: Details of the identified share :rtype: :class:`~openstack.shared_file_system.v2.share.Share` """ return self._get(_share.Share, share_id) def delete_share(self, share, ignore_missing=True): """Deletes a single share :param share: The ID of the share to delete :returns: Result of the ``delete`` :rtype: ``None`` """ self._delete(_share.Share, share, ignore_missing=ignore_missing) def update_share(self, share_id, **attrs): """Updates details of a single share. :param share: The ID of the share to update :param dict attrs: The attributes to update on the share :returns: the updated share :rtype: :class:`~openstack.shared_file_system.v2.share.Share` """ return self._update(_share.Share, share_id, **attrs) def create_share(self, **attrs): """Creates a share from attributes :returns: Details of the new share :param dict attrs: Attributes which will be used to create a :class:`~openstack.shared_file_system.v2.shares.Shares`, comprised of the properties on the Shares class. 'size' and 'share' are required to create a share. :rtype: :class:`~openstack.shared_file_system.v2.share.Share` """ return self._create(_share.Share, **attrs) def revert_share_to_snapshot(self, share_id, snapshot_id): """Reverts a share to the specified snapshot, which must be the most recent one known to manila. :param share_id: The ID of the share to revert :param snapshot_id: The ID of the snapshot to revert to :returns: Result of the ``revert`` :rtype: ``None`` """ res = self._get(_share.Share, share_id) res.revert_to_snapshot(self, snapshot_id) def manage_share(self, protocol, export_path, service_host, **params): """Manage a share. :param str protocol: The shared file systems protocol of this share. :param str export_path: The export path formatted according to the protocol. :param str service_host: The manage-share service host. :param kwargs params: Optional parameters to be sent. Available parameters include: * name: The user defined name of the resource. * share_type: The name or ID of the share type to be used to create the resource. * driver_options: A set of one or more key and value pairs, as a dictionary of strings, that describe driver options. * is_public: The level of visibility for the share. * description: The user defiend description of the resource. * share_server_id: The UUID of the share server. :returns: The share that was managed. """ share = _share.Share() return share.manage( self, protocol, export_path, service_host, **params ) def unmanage_share(self, share_id): """Unmanage the share with the given share ID. :param share_id: The ID of the share to unmanage. :returns: ``None`` """ share_to_unmanage = self._get(_share.Share, share_id) share_to_unmanage.unmanage(self) def resize_share( self, share_id, new_size, no_shrink=False, no_extend=False, force=False ): """Resizes a share, extending/shrinking the share as needed. :param share_id: The ID of the share to resize :param new_size: The new size of the share in GiBs. If new_size is the same as the current size, then nothing is done. :param bool no_shrink: If set to True, the given share is not shrunk, even if shrinking the share is required to get the share to the given size. This could be useful for extending shares to a minimum size, while not shrinking shares to the given size. This defaults to False. :param bool no_extend: If set to True, the given share is not extended, even if extending the share is required to get the share to the given size. This could be useful for shrinking shares to a maximum size, while not extending smaller shares to that maximum size. This defaults to False. :param bool force: Whether or not force should be used, in the case where the share should be extended. :returns: ``None`` """ res = self._get(_share.Share, share_id) if new_size > res.size and no_extend is not True: res.extend_share(self, new_size, force) elif new_size < res.size and no_shrink is not True: res.shrink_share(self, new_size) def share_groups(self, **query): """Lists all share groups. :param kwargs query: Optional query parameters to be sent to limit the share groups being returned. Available parameters include: * status: Filters by a share group status. * name: The user defined name of the resource to filter resources by. * description: The user defined description text that can be used to filter resources. * project_id: The project ID of the user or service. * share_server_id: The UUID of the share server. * snapshot_id: The UUID of the share’s base snapshot to filter the request based on. * host: The host name for the back end. * share_network_id: The UUID of the share network to filter resources by. * share_group_type_id: The share group type ID to filter share groups. * share_group_snapshot_id: The source share group snapshot ID to list the share group. * share_types: A list of one or more share type IDs. Allows filtering share groups. * limit: The maximum number of share groups members to return. * offset: The offset to define start point of share or share group listing. * sort_key: The key to sort a list of shares. * sort_dir: The direction to sort a list of shares * name~: The name pattern that can be used to filter shares, share snapshots, share networks or share groups. * description~: The description pattern that can be used to filter shares, share snapshots, share networks or share groups. :returns: A generator of manila share group resources :rtype: :class:`~openstack.shared_file_system.v2. share_group.ShareGroup` """ return self._list(_share_group.ShareGroup, **query) def get_share_group(self, share_group_id): """Lists details for a share group. :param share: The ID of the share group to get :returns: Details of the identified share group :rtype: :class:`~openstack.shared_file_system.v2. share_group.ShareGroup` """ return self._get(_share_group.ShareGroup, share_group_id) def find_share_group(self, name_or_id, ignore_missing=True): """Finds a single share group :param name_or_id: The name or ID of a share group. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.shared_file_system.v2. share_group.ShareGroup` or None """ return self._find( _share_group.ShareGroup, name_or_id, ignore_missing=ignore_missing ) def create_share_group(self, **attrs): """Creates a share group from attributes :returns: Details of the new share group :rtype: :class:`~openstack.shared_file_system.v2. share_group.ShareGroup` """ return self._create(_share_group.ShareGroup, **attrs) def update_share_group(self, share_group_id, **kwargs): """Updates details of a single share group :param share: The ID of the share group :returns: Updated details of the identified share group :rtype: :class:`~openstack.shared_file_system.v2. share_group.ShareGroup` """ return self._update(_share_group.ShareGroup, share_group_id, **kwargs) def delete_share_group(self, share_group_id, ignore_missing=True): """Deletes a single share group :param share: The ID of the share group :returns: Result of the "delete" on share group :rtype: :class:`~openstack.shared_file_system.v2. share_group.ShareGroup` """ return self._delete( _share_group.ShareGroup, share_group_id, ignore_missing=ignore_missing, ) def wait_for_status( self, res, status='active', failures=None, interval=2, wait=120, status_attr_name='status', ): """Wait for a resource to be in a particular status. :param res: The resource to wait on to reach the specified status. The resource must have a ``status`` attribute. :type resource: A :class:`~openstack.resource.Resource` object. :param status: Desired status. :param failures: Statuses that would be interpreted as failures. :type failures: :py:class:`list` :param interval: Number of seconds to wait before to consecutive checks. Default to 2. :param wait: Maximum number of seconds to wait before the change. Default to 120. :param status_attr_name: name of the attribute to reach the desired status. :returns: The resource is returned on success. :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition to the desired status failed to occur in specified seconds. :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource has transited to one of the failure statuses. :raises: :class:`~AttributeError` if the resource does not have a ``status`` attribute. """ failures = [] if failures is None else failures return resource.wait_for_status( self, res, status, failures, interval, wait, attribute=status_attr_name, ) def storage_pools(self, details=True, **query): """Lists all back-end storage pools with details :param kwargs query: Optional query parameters to be sent to limit the storage pools being returned. Available parameters include: * pool_name: The pool name for the back end. * host_name: The host name for the back end. * backend_name: The name of the back end. * capabilities: The capabilities for the storage back end. * share_type: The share type name or UUID. :returns: A generator of manila storage pool resources :rtype: :class:`~openstack.shared_file_system.v2.storage_pool.StoragePool` """ base_path = '/scheduler-stats/pools/detail' if details else None return self._list( _storage_pool.StoragePool, base_path=base_path, **query ) def user_messages(self, **query): """List shared file system user messages :param kwargs query: Optional query parameters to be sent to limit the messages being returned. Available parameters include: * action_id: The ID of the action during which the message was created. * detail_id: The ID of the message detail. * limit: The maximum number of shares to return. * message_level: The message level. * offset: The offset to define start point of share or share group listing. * sort_key: The key to sort a list of messages. * sort_dir: The direction to sort a list of shares. * project_id: The ID of the project for which the message was created. * request_id: The ID of the request during which the message was created. * resource_id: The UUID of the resource for which the message was created. * resource_type: The type of the resource for which the message was created. :returns: A generator of user message resources :rtype: :class:`~openstack.shared_file_system.v2.user_message.UserMessage` """ return self._list(_user_message.UserMessage, **query) def get_user_message(self, message_id): """List details of a single user message :param message_id: The ID of the user message :returns: Details of the identified user message :rtype: :class:`~openstack.shared_file_system.v2.user_message.UserMessage` """ return self._get(_user_message.UserMessage, message_id) def delete_user_message(self, message_id, ignore_missing=True): """Deletes a single user message :param message_id: The ID of the user message :returns: Result of the "delete" on the user message :rtype: :class:`~openstack.shared_file_system.v2.user_message.UserMessage` """ return self._delete( _user_message.UserMessage, message_id, ignore_missing=ignore_missing, ) def limits(self, **query): """Lists all share limits. :param kwargs query: Optional query parameters to be sent to limit the share limits being returned. :returns: A generator of manila share limits resources :rtype: :class:`~openstack.shared_file_system.v2.limit.Limit` """ return self._list(_limit.Limit, **query) def share_snapshots(self, details=True, **query): """Lists all share snapshots with details. :param kwargs query: Optional query parameters to be sent to limit the snapshots being returned. Available parameters include: * project_id: The ID of the user or service making the API request. :returns: A generator of manila share snapshot resources :rtype: :class:`~openstack.shared_file_system.v2.share_snapshot.ShareSnapshot` """ base_path = '/snapshots/detail' if details else None return self._list( _share_snapshot.ShareSnapshot, base_path=base_path, **query ) def get_share_snapshot(self, snapshot_id): """Lists details of a single share snapshot :param snapshot_id: The ID of the snapshot to get :returns: Details of the identified share snapshot :rtype: :class:`~openstack.shared_file_system.v2.share_snapshot.ShareSnapshot` """ return self._get(_share_snapshot.ShareSnapshot, snapshot_id) def create_share_snapshot(self, **attrs): """Creates a share snapshot from attributes :returns: Details of the new share snapshot :rtype: :class:`~openstack.shared_file_system.v2.share_snapshot.ShareSnapshot` """ return self._create(_share_snapshot.ShareSnapshot, **attrs) def update_share_snapshot(self, snapshot_id, **attrs): """Updates details of a single share. :param snapshot_id: The ID of the snapshot to update :pram dict attrs: The attributes to update on the snapshot :returns: the updated share snapshot :rtype: :class:`~openstack.shared_file_system.v2.share_snapshot.ShareSnapshot` """ return self._update( _share_snapshot.ShareSnapshot, snapshot_id, **attrs ) def delete_share_snapshot(self, snapshot_id, ignore_missing=True): """Deletes a single share snapshot :param snapshot_id: The ID of the snapshot to delete :returns: Result of the ``delete`` :rtype: ``None`` """ self._delete( _share_snapshot.ShareSnapshot, snapshot_id, ignore_missing=ignore_missing, ) # ========= Network Subnets ========== def share_network_subnets(self, share_network_id): """Lists all share network subnets with details. :param share_network_id: The id of the share network for which Share Network Subnets should be listed. :returns: A generator of manila share network subnets :rtype: :class:`~openstack.shared_file_system.v2.share_network_subnet.ShareNetworkSubnet` """ return self._list( _share_network_subnet.ShareNetworkSubnet, share_network_id=share_network_id, ) def get_share_network_subnet( self, share_network_id, share_network_subnet_id, ): """Lists details of a single share network subnet. :param share_network_id: The id of the share network associated with the Share Network Subnet. :param share_network_subnet_id: The id of the Share Network Subnet to retrieve. :returns: Details of the identified share network subnet :rtype: :class:`~openstack.shared_file_system.v2.share_network_subnet.ShareNetworkSubnet` """ return self._get( _share_network_subnet.ShareNetworkSubnet, share_network_subnet_id, share_network_id=share_network_id, ) def create_share_network_subnet(self, share_network_id, **attrs): """Creates a share network subnet from attributes :param share_network_id: The id of the share network wthin which the the Share Network Subnet should be created. :param dict attrs: Attributes which will be used to create a share network subnet. :returns: Details of the new share network subnet. :rtype: :class:`~openstack.shared_file_system.v2.share_network_subnet.ShareNetworkSubnet` """ return self._create( _share_network_subnet.ShareNetworkSubnet, **attrs, share_network_id=share_network_id, ) def delete_share_network_subnet( self, share_network_id, share_network_subnet, ignore_missing=True ): """Deletes a share network subnet. :param share_network_id: The id of the Share Network associated with the Share Network Subnet. :param share_network_subnet: The id of the Share Network Subnet which should be deleted. :returns: Result of the ``delete`` :rtype: None """ self._delete( _share_network_subnet.ShareNetworkSubnet, share_network_subnet, share_network_id=share_network_id, ignore_missing=ignore_missing, ) def wait_for_delete(self, res, interval=2, wait=120): """Wait for a resource to be deleted. :param res: The resource to wait on to be deleted. :type resource: A :class:`~openstack.resource.Resource` object. :param interval: Number of seconds to wait before to consecutive checks. Default to 2. :param wait: Maximum number of seconds to wait before the change. Default to 120. :returns: The resource is returned on success. :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition to delete failed to occur in the specified seconds. """ return resource.wait_for_delete(self, res, interval, wait) def share_snapshot_instances(self, details=True, **query): """Lists all share snapshot instances with details. :param bool details: Whether to fetch detailed resource descriptions. Defaults to True. :param kwargs query: Optional query parameters to be sent to limit the share snapshot instance being returned. Available parameters include: * snapshot_id: The UUID of the share’s base snapshot to filter the request based on. * project_id: The project ID of the user or service making the request. :returns: A generator of share snapshot instance resources :rtype: :class:`~openstack.shared_file_system.v2. share_snapshot_instance.ShareSnapshotInstance` """ base_path = '/snapshot-instances/detail' if details else None return self._list( _share_snapshot_instance.ShareSnapshotInstance, base_path=base_path, **query, ) def get_share_snapshot_instance(self, snapshot_instance_id): """Lists details of a single share snapshot instance :param snapshot_instance_id: The ID of the snapshot instance to get :returns: Details of the identified snapshot instance :rtype: :class:`~openstack.shared_file_system.v2. share_snapshot_instance.ShareSnapshotInstance` """ return self._get( _share_snapshot_instance.ShareSnapshotInstance, snapshot_instance_id, ) def share_networks(self, details=True, **query): """Lists all share networks with details. :param dict query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * name~: The user defined name of the resource to filter resources by. * project_id: The ID of the user or service making the request. * description~: The description pattern that can be used to filter shares, share snapshots, share networks or share groups. * all_projects: (Admin only). Defines whether to list the requested resources for all projects. :returns: Details of shares networks :rtype: :class:`~openstack.shared_file_system.v2. share_network.ShareNetwork` """ base_path = '/share-networks/detail' if details else None return self._list( _share_network.ShareNetwork, base_path=base_path, **query ) def get_share_network(self, share_network_id): """Lists details of a single share network :param share_network: The ID of the share network to get :returns: Details of the identified share network :rtype: :class:`~openstack.shared_file_system.v2. share_network.ShareNetwork` """ return self._get(_share_network.ShareNetwork, share_network_id) def delete_share_network(self, share_network_id, ignore_missing=True): """Deletes a single share network :param share_network_id: The ID of the share network to delete :rtype: ``None`` """ self._delete( _share_network.ShareNetwork, share_network_id, ignore_missing=ignore_missing, ) def update_share_network(self, share_network_id, **attrs): """Updates details of a single share network. :param share_network_id: The ID of the share network to update :pram dict attrs: The attributes to update on the share network :returns: the updated share network :rtype: :class:`~openstack.shared_file_system.v2. share_network.ShareNetwork` """ return self._update( _share_network.ShareNetwork, share_network_id, **attrs ) def create_share_network(self, **attrs): """Creates a share network from attributes :returns: Details of the new share network :param dict attrs: Attributes which will be used to create a :class:`~openstack.shared_file_system.v2. share_network.ShareNetwork`,comprised of the properties on the ShareNetwork class. :rtype: :class:`~openstack.shared_file_system.v2. share_network.ShareNetwork` """ return self._create(_share_network.ShareNetwork, **attrs) def share_instances(self, **query): """Lists all share instances. :param kwargs query: Optional query parameters to be sent to limit the share instances being returned. Available parameters include: * export_location_id: The export location UUID that can be used to filter share instances. * export_location_path: The export location path that can be used to filter share instances. :returns: Details of share instances resources :rtype: :class:`~openstack.shared_file_system.v2. share_instance.ShareInstance` """ return self._list(_share_instance.ShareInstance, **query) def get_share_instance(self, share_instance_id): """Shows details for a single share instance :param share_instance_id: The UUID of the share instance to get :returns: Details of the identified share instance :rtype: :class:`~openstack.shared_file_system.v2. share_instance.ShareInstance` """ return self._get(_share_instance.ShareInstance, share_instance_id) def reset_share_instance_status(self, share_instance_id, status): """Explicitly updates the state of a share instance. :param share_instance_id: The UUID of the share instance to reset. :param status: The share or share instance status to be set. :returns: ``None`` """ res = self._get_resource( _share_instance.ShareInstance, share_instance_id ) res.reset_status(self, status) def delete_share_instance(self, share_instance_id): """Force-deletes a share instance :param share_instance: The ID of the share instance to delete :returns: ``None`` """ res = self._get_resource( _share_instance.ShareInstance, share_instance_id ) res.force_delete(self) def export_locations(self, share_id): """List all export locations with details :param share_id: The ID of the share to list export locations from :returns: List of export locations :rtype: List of :class:`~openstack.shared_filesystem_storage.v2. share_export_locations.ShareExportLocations` """ return self._list( _share_export_locations.ShareExportLocation, share_id=share_id ) def get_export_location(self, export_location, share_id): """List details of export location :param export_location: The export location resource to get :param share_id: The ID of the share to get export locations from :returns: Details of identified export location :rtype: :class:`~openstack.shared_filesystem_storage.v2. share_export_locations.ShareExportLocations` """ export_location_id = resource.Resource._get_id(export_location) return self._get( _share_export_locations.ShareExportLocation, export_location_id, share_id=share_id, ) def access_rules(self, share, **query): """Lists the access rules on a share. :returns: A generator of the share access rules. :rtype: :class:`~openstack.shared_file_system.v2. share_access_rules.ShareAccessRules` """ share = self._get_resource(_share.Share, share) return self._list( _share_access_rule.ShareAccessRule, share_id=share.id, **query ) def get_access_rule(self, access_id): """List details of an access rule. :param access_id: The id of the access rule to get :returns: Details of the identified access rule. :rtype: :class:`~openstack.shared_file_system.v2. share_access_rules.ShareAccessRules` """ return self._get(_share_access_rule.ShareAccessRule, access_id) def create_access_rule(self, share_id, **attrs): """Creates an access rule from attributes :returns: Details of the new access rule :param share_id: The ID of the share :param dict attrs: Attributes which will be used to create a :class:`~openstack.shared_file_system.v2. share_access_rules.ShareAccessRules`, comprised of the properties on the ShareAccessRules class. :rtype: :class:`~openstack.shared_file_system.v2. share_access_rules.ShareAccessRules` """ base_path = f"/shares/{share_id}/action" return self._create( _share_access_rule.ShareAccessRule, base_path=base_path, **attrs ) def delete_access_rule( self, access_id, share_id, ignore_missing=True, *, unrestrict=False ): """Deletes an access rule :param access_id: The id of the access rule to get :param share_id: The ID of the share :param unrestrict: If Manila must attempt removing locks while deleting :rtype: ``requests.models.Response`` HTTP response from internal requests client """ res = self._get_resource(_share_access_rule.ShareAccessRule, access_id) return res.delete( self, share_id, ignore_missing=ignore_missing, unrestrict=unrestrict, ) def share_group_snapshots(self, details=True, **query): """Lists all share group snapshots. :param kwargs query: Optional query parameters to be sent to limit the share group snapshots being returned. Available parameters include: * project_id: The ID of the project that owns the resource. * name: The user defined name of the resource to filter resources. * description: The user defined description text that can be used to filter resources. * status: Filters by a share status * share_group_id: The UUID of a share group to filter resource. * limit: The maximum number of share group snapshot members to return. * offset: The offset to define start point of share or share group listing. * sort_key: The key to sort a list of shares. * sort_dir: The direction to sort a list of shares. A valid value is asc, or desc. :returns: Details of share group snapshots resources :rtype: :class:`~openstack.shared_file_system.v2. share_group_snapshot.ShareGroupSnapshot` """ base_path = '/share-group-snapshots/detail' if details else None return self._list( _share_group_snapshot.ShareGroupSnapshot, base_path=base_path, **query, ) def share_group_snapshot_members(self, group_snapshot_id): """Lists all share group snapshots members. :param group_snapshot_id: The ID of the group snapshot to get :returns: List of the share group snapshot members, which are share snapshots. :rtype: dict containing attributes of the share snapshot members. """ res = self._get( _share_group_snapshot.ShareGroupSnapshot, group_snapshot_id, ) response = res.get_members(self) return response def get_share_group_snapshot(self, group_snapshot_id): """Show share group snapshot details :param group_snapshot_id: The ID of the group snapshot to get :returns: Details of the group snapshot :rtype: :class:`~openstack.shared_file_system.v2. share_group_snapshot.ShareGroupSnapshot` """ return self._get( _share_group_snapshot.ShareGroupSnapshot, group_snapshot_id ) def create_share_group_snapshot(self, share_group_id, **attrs): """Creates a point-in-time snapshot copy of a share group. :returns: Details of the new snapshot :param dict attrs: Attributes which will be used to create a :class:`~openstack.shared_file_system.v2. share_group_snapshots.ShareGroupSnapshots`, :param 'share_group_id': ID of the share group to have the snapshot taken. :rtype: :class:`~openstack.shared_file_system.v2. share_group_snapshot.ShareGroupSnapshot` """ return self._create( _share_group_snapshot.ShareGroupSnapshot, share_group_id=share_group_id, **attrs, ) def reset_share_group_snapshot_status(self, group_snapshot_id, status): """Reset share group snapshot state. :param group_snapshot_id: The ID of the share group snapshot to reset :param status: The state of the share group snapshot to be set, A valid value is "creating", "error", "available", "deleting", "error_deleting". :rtype: ``None`` """ res = self._get( _share_group_snapshot.ShareGroupSnapshot, group_snapshot_id ) res.reset_status(self, status) def update_share_group_snapshot(self, group_snapshot_id, **attrs): """Updates a share group snapshot. :param group_snapshot_id: The ID of the share group snapshot to update :param dict attrs: The attributes to update on the share group snapshot :returns: the updated share group snapshot :rtype: :class:`~openstack.shared_file_system.v2. share_group_snapshot.ShareGroupSnapshot` """ return self._update( _share_group_snapshot.ShareGroupSnapshot, group_snapshot_id, **attrs, ) def delete_share_group_snapshot( self, group_snapshot_id, ignore_missing=True ): """Deletes a share group snapshot. :param group_snapshot_id: The ID of the share group snapshot to delete :rtype: ``None`` """ self._delete( _share_group_snapshot.ShareGroupSnapshot, group_snapshot_id, ignore_missing=ignore_missing, ) # ========= Share Metadata ========== def get_share_metadata(self, share_id): """Lists all metadata for a share. :param share_id: The ID of the share :returns: A :class:`~openstack.shared_file_system.v2.share.Share` with the share's metadata. :rtype: :class:`~openstack.shared_file_system.v2.share.Share` """ share = self._get_resource(_share.Share, share_id) return share.fetch_metadata(self) def get_share_metadata_item(self, share_id, key): """Retrieves a specific metadata item from a share by its key. :param share_id: The ID of the share :param key: The key of the share metadata :returns: A :class:`~openstack.shared_file_system.v2.share.Share` with the share's metadata. :rtype: :class:`~openstack.shared_file_system.v2.share.Share` """ share = self._get_resource(_share.Share, share_id) return share.get_metadata_item(self, key) def create_share_metadata(self, share_id, **metadata): """Creates share metadata as key-value pairs. :param share_id: The ID of the share :param metadata: The metadata to be created :returns: A :class:`~openstack.shared_file_system.v2.share.Share` with the share's metadata. :rtype: :class:`~openstack.shared_file_system.v2.share.Share` """ share = self._get_resource(_share.Share, share_id) return share.set_metadata(self, metadata=metadata) def update_share_metadata(self, share_id, metadata, replace=False): """Updates metadata of given share. :param share_id: The ID of the share :param metadata: The metadata to be created :param replace: Boolean for whether the preexisting metadata should be replaced :returns: A :class:`~openstack.shared_file_system.v2.share.Share` with the share's updated metadata. :rtype: :class:`~openstack.shared_file_system.v2.share.Share` """ share = self._get_resource(_share.Share, share_id) return share.set_metadata(self, metadata=metadata, replace=replace) def delete_share_metadata(self, share_id, keys, ignore_missing=True): """Deletes a single metadata item on a share, idetified by its key. :param share_id: The ID of the share :param keys: The list of share metadata keys to be deleted :param ignore_missing: Boolean indicating if missing keys should be ignored. :returns: None :rtype: None """ share = self._get_resource(_share.Share, share_id) keys_failed_to_delete = [] for key in keys: try: share.delete_metadata_item(self, key) except exceptions.NotFoundException: if not ignore_missing: self._connection.log.info("Key %s not found.", key) keys_failed_to_delete.append(key) except exceptions.ForbiddenException: self._connection.log.info("Key %s cannot be deleted.", key) keys_failed_to_delete.append(key) except exceptions.SDKException: self._connection.log.info("Failed to delete key %s.", key) keys_failed_to_delete.append(key) if keys_failed_to_delete: raise exceptions.SDKException( "Some keys failed to be deleted %s" % keys_failed_to_delete ) def resource_locks(self, **query): """Lists all resource locks. :param kwargs query: Optional query parameters to be sent to limit the resource locks being returned. Available parameters include: * project_id: The project ID of the user that the lock is created for. * user_id: The ID of a user to filter resource locks by. * all_projects: list locks from all projects (Admin Only) * resource_id: The ID of the resource that the locks pertain to filter resource locks by. * resource_action: The action prevented by the filtered resource locks. * resource_type: The type of the resource that the locks pertain to filter resource locks by. * lock_context: The lock creator’s context to filter locks by. * lock_reason: The lock reason that can be used to filter resource locks. (Inexact search is also available with lock_reason~) * created_since: Search for the list of resources that were created after the specified date. The date is in ‘yyyy-mm-dd’ format. * created_before: Search for the list of resources that were created prior to the specified date. The date is in ‘yyyy-mm-dd’ format. * limit: The maximum number of resource locks to return. * offset: The offset to define start point of resource lock listing. * sort_key: The key to sort a list of shares. * sort_dir: The direction to sort a list of shares * with_count: Whether to show count in API response or not, default is False. This query parameter is useful with pagination. :returns: A generator of manila resource locks :rtype: :class:`~openstack.shared_file_system.v2. resource_locks.ResourceLock` """ return self._list(_resource_locks.ResourceLock, **query) def get_resource_lock(self, resource_lock): """Show details of a resource lock. :param resource_lock: The ID of a resource lock or a :class:`~openstack.shared_file_system.v2. resource_locks.ResourceLock` instance. :returns: Details of the identified resource lock. :rtype: :class:`~openstack.shared_file_system.v2. resource_locks.ResourceLock` """ return self._get(_resource_locks.ResourceLock, resource_lock) def update_resource_lock(self, resource_lock, **attrs): """Updates details of a single resource lock. :param resource_lock: The ID of a resource lock or a :class:`~openstack.shared_file_system.v2. resource_locks.ResourceLock` instance. :param dict attrs: The attributes to update on the resource lock :returns: the updated resource lock :rtype: :class:`~openstack.shared_file_system.v2. resource_locks.ResourceLock` """ return self._update( _resource_locks.ResourceLock, resource_lock, **attrs ) def delete_resource_lock(self, resource_lock, ignore_missing=True): """Deletes a single resource lock :param resource_lock: The ID of a resource lock or a :class:`~openstack.shared_file_system.v2. resource_locks.ResourceLock` instance. :returns: Result of the ``delete`` :rtype: ``None`` """ return self._delete( _resource_locks.ResourceLock, resource_lock, ignore_missing=ignore_missing, ) def create_resource_lock(self, **attrs): """Locks a resource. :param dict attrs: Attributes which will be used to create a :class:`~openstack.shared_file_system.v2. resource_locks.ResourceLock`, comprised of the properties on the ResourceLock class. Available parameters include: * ``resource_id``: ID of the resource to be locked. * ``resource_type``: type of the resource (share, access_rule). * ``resource_action``: action to be locked (delete, show). * ``lock_reason``: reason why you're locking the resource (Optional). :returns: Details of the lock :rtype: :class:`~openstack.shared_file_system.v2. resource_locks.ResourceLock` """ if attrs.get('resource_type'): # The _create method has a parameter named resource_type, which # refers to the type of resource to be created, so we need to avoid # a conflict of parameters we are sending to the method. attrs['__conflicting_attrs'] = { 'resource_type': attrs.get('resource_type') } attrs.pop('resource_type') return self._create(_resource_locks.ResourceLock, **attrs) def get_quota_class_set(self, quota_class_name): """Get quota class set. :param quota_class_name: The name of the quota class :returns: A :class:`~openstack.shared_file_system.v2 .quota_class_set.QuotaClassSet` """ return self._get(_quota_class_set.QuotaClassSet, quota_class_name) def update_quota_class_set(self, quota_class_name, **attrs): """Update quota class set. :param quota_class_name: The name of the quota class :param attrs: The attributes to update on the quota class set :returns: the updated quota class set :rtype: :class:`~openstack.shared_file_system.v2 .quota_class_set.QuotaClassSet` """ return self._update( _quota_class_set.QuotaClassSet, quota_class_name, **attrs ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/v2/availability_zone.py0000664000175000017500000000242000000000000026473 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class AvailabilityZone(resource.Resource): resource_key = "availability_zone" resources_key = "availability_zones" base_path = "/availability-zones" # capabilities allow_create = False allow_fetch = False allow_commit = False allow_delete = False allow_list = True #: Properties #: The ID of the availability zone id = resource.Body("id", type=str) #: The name of the availability zone. name = resource.Body("name", type=str) #: Date and time the availability zone was created at. created_at = resource.Body("created_at", type=str) #: Date and time the availability zone was last updated at. updated_at = resource.Body("updated_at", type=str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/v2/limit.py0000664000175000017500000000601300000000000024106 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Limit(resource.Resource): resources_key = "limits" base_path = "/limits" # capabilities allow_create = False allow_fetch = False allow_commit = False allow_delete = False allow_list = True allow_head = False #: Properties #: The maximum number of replica gigabytes that are allowed #: in a project. maxTotalReplicaGigabytes = resource.Body( "maxTotalReplicaGigabytes", type=int ) #: The total maximum number of shares that are allowed in a project. maxTotalShares = resource.Body("maxTotalShares", type=int) #: The total maximum number of share gigabytes that are allowed in a #: project. maxTotalShareGigabytes = resource.Body("maxTotalShareGigabytes", type=int) #: The total maximum number of share-networks that are allowed in a #: project. maxTotalShareNetworks = resource.Body("maxTotalShareNetworks", type=int) #: The total maximum number of share snapshots that are allowed in a #: project. maxTotalShareSnapshots = resource.Body("maxTotalShareSnapshots", type=int) #: The maximum number of share replicas that is allowed. maxTotalShareReplicas = resource.Body("maxTotalShareReplicas", type=int) #: The total maximum number of snapshot gigabytes that are allowed #: in a project. maxTotalSnapshotGigabytes = resource.Body( "maxTotalSnapshotGigabytes", type=int ) #: The total number of replica gigabytes used in a project by #: share replicas. totalReplicaGigabytesUsed = resource.Body( "totalReplicaGigabytesUsed", type=int ) #: The total number of gigabytes used in a project by shares. totalShareGigabytesUsed = resource.Body( "totalShareGigabytesUsed", type=int ) #: The total number of created shares in a project. totalSharesUsed = resource.Body("totalSharesUsed", type=int) #: The total number of created share-networks in a project. totalShareNetworksUsed = resource.Body("totalShareNetworksUsed", type=int) #: The total number of created share snapshots in a project. totalShareSnapshotsUsed = resource.Body( "totalShareSnapshotsUsed", type=int ) #: The total number of gigabytes used in a project by snapshots. totalSnapshotGigabytesUsed = resource.Body( "totalSnapshotGigabytesUsed", type=int ) #: The total number of created share replicas in a project. totalShareReplicasUsed = resource.Body("totalShareReplicasUsed", type=int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/v2/quota_class_set.py0000664000175000017500000000541400000000000026165 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class QuotaClassSet(resource.Resource): base_path = '/quota-class-sets' resource_key = 'quota_class_set' allow_create = False allow_fetch = True allow_commit = True allow_delete = False allow_list = False allow_head = False _query_mapping = resource.QueryParameters("quota_class_name", "project_id") #: Properties #: A quota_class_set id. id = resource.Body("id", type=str) #: The maximum number of share groups. share_groups = resource.Body("share_groups", type=int) #: The maximum number of share group snapshots. share_group_snapshots = resource.Body("share_group_snapshots", type=int) #: The total maximum number of shares that are allowed in a project. snapshots = resource.Body("snapshots", type=int) #: The maximum number of snapshot gigabytes that are allowed in a project. snapshot_gigabytes = resource.Body("snapshot_gigabytes", type=int) #: The total maximum number of snapshot gigabytes that are allowed in a project. shares = resource.Body("shares", type=int) #: The maximum number of share-networks that are allowed in a project. share_networks = resource.Body("share_networks", type=int) #: The maximum number of share replicas that is allowed. share_replicas = resource.Body("share_replicas", type=int) #: The total maximum number of share gigabytes that are allowed in a project. #: You cannot request a share that exceeds the allowed gigabytes quota. gigabytes = resource.Body("gigabytes", type=int) #: The maximum number of replica gigabytes that are allowed in a project. #: You cannot create a share, share replica, manage a share or extend a share #: if it is going to exceed the allowed replica gigabytes quota. replica_gigabytes = resource.Body("replica_gigabytes", type=int) #: The number of gigabytes per share allowed in a project. per_share_gigabytes = resource.Body("per_share_gigabytes", type=int) #: The total maximum number of share backups that are allowed in a project. backups = resource.Body("backups", type=int) #: The total maximum number of backup gigabytes that are allowed in a project. backup_gigabytes = resource.Body("backup_gigabytes", type=int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/v2/resource_locks.py0000664000175000017500000000516600000000000026022 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ResourceLock(resource.Resource): resource_key = "resource_lock" resources_key = "resource_locks" base_path = "/resource-locks" # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_head = False _query_mapping = resource.QueryParameters( "project_id", "created_since", "created_before", "limit", "offset", "id", "resource_id", "resource_type", "resource_action", "user_id", "lock_context", "lock_reason", "lock_reason~", "sort_key", "sort_dir", "with_count", "all_projects", ) # The resource was introduced in this microversion, so it is the minimum # version to use it. Openstacksdk currently doesn't allow to set # minimum microversions. _max_microversion = '2.81' #: Properties #: The date and time stamp when the resource was created within the #: service’s database. created_at = resource.Body("created_at", type=str) #: The date and time stamp when the resource was last modified within the #: service’s database. updated_at = resource.Body("updated_at", type=str) #: The ID of the user that owns the lock user_id = resource.Body("user_id", type=str) #: The ID of the project that owns the lock. project_id = resource.Body("project_id", type=str) #: The type of the resource that is locked, i.e.: share, access rule. resource_type = resource.Body("resource_type", type=str) #: The UUID of the resource that is locked. resource_id = resource.Body("resource_id", type=str) #: What action is currently locked, i.e.: deletion, visibility of fields. resource_action = resource.Body("resource_action", type=str) #: The reason specified while the lock was being placed. lock_reason = resource.Body("lock_reason", type=str) #: The context that placed the lock (user, admin or service). lock_context = resource.Body("lock_context", type=str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/v2/share.py0000664000175000017500000002006100000000000024071 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.common import metadata from openstack import exceptions from openstack import resource from openstack import utils class Share(resource.Resource, metadata.MetadataMixin): resource_key = "share" resources_key = "shares" base_path = "/shares" # capabilities allow_create = True allow_fetch = True allow_commit = True allow_list = True allow_head = False allow_delete = True #: Properties #: The share instance access rules status. A valid value is active, #: error, or syncing. access_rules_status = resource.Body("access_rules_status", type=str) #: The availability zone. availability_zone = resource.Body("availability_zone", type=str) #: The date and time stamp when the resource was created within the #: service’s database. created_at = resource.Body("created_at", type=str) #: The user defined description of the resource. description = resource.Body("description", type=str) #: The share host name. host = resource.Body("host", type=str) #: The level of visibility for the share. is_public = resource.Body("is_public", type=bool) #: Whether or not this share supports snapshots that can be #: cloned into new shares. is_creating_new_share_from_snapshot_supported = resource.Body( "create_share_from_snapshot_support", type=bool ) #: Whether the share's snapshots can be mounted directly and access #: controlled independently or not. is_mounting_snapshot_supported = resource.Body( "mount_snapshot_support", type=bool ) #: Whether the share can be reverted to its latest snapshot or not. is_reverting_to_snapshot_supported = resource.Body( "revert_to_snapshot_support", type=bool ) #: An extra specification that filters back ends by whether the share #: supports snapshots or not. is_snapshot_supported = resource.Body("snapshot_support", type=bool) #: Indicates whether the share has replicas or not. is_replicated = resource.Body("has_replicas", type=bool) #: One or more metadata key and value pairs as a dictionary of strings. metadata = resource.Body("metadata", type=dict) #: The progress of the share creation. progress = resource.Body("progress", type=str) #: The ID of the project that owns the resource. project_id = resource.Body("project_id", type=str) #: The share replication type. Valid values are none, readable, #: writable and dr. replication_type = resource.Body("replication_type", type=str) #: The UUID of the share group that this shares belongs to. share_group_id = resource.Body("share_group_id", type=str) #: The share network ID. share_network_id = resource.Body("share_network_id", type=str) #: The Shared File Systems protocol. A valid value is NFS, #: CIFS, GlusterFS, HDFS, CephFS, MAPRFS share_protocol = resource.Body("share_proto", type=str) #: The UUID of the share server. share_server_id = resource.Body("share_server_id", type=str) #: The UUID of the share type. In minor versions, this parameter is a #: share type name, as a string. share_type = resource.Body("share_type", type=str) #: Name of the share type. share_type_name = resource.Body("share_type_name", type=str) #: The share size, in GiBs. size = resource.Body("size", type=int) #: The UUID of the snapshot that was used to create the #: share. snapshot_id = resource.Body("snapshot_id", type=str) #: The ID of the group snapshot instance that was used to create #: this share. source_share_group_snapshot_member_id = resource.Body( "source_share_group_snapshot_member_id", type=str ) #: The share status status = resource.Body("status", type=str) #: For the share migration, the migration task state. task_state = resource.Body("task_state", type=str) #: ID of the user that the share was created by. user_id = resource.Body("user_id", type=str) #: Display name for updating name display_name = resource.Body("display_name", type=str) #: Display description for updating description display_description = resource.Body("display_description", type=str) def _action(self, session, body, action='patch', microversion=None): """Perform share instance actions given the message body""" url = utils.urljoin(self.base_path, self.id, 'action') headers = {'Accept': ''} if microversion is None: microversion = self._get_microversion(session, action=action) response = session.post( url, json=body, headers=headers, microversion=microversion ) exceptions.raise_from_response(response) return response def extend_share(self, session, new_size, force=False): """Extend the share size. :param float new_size: The new size of the share in GiB. :param bool force: Whether or not to use force, bypassing the scheduler. Requires admin privileges. Defaults to False. :returns: The result of the action. :rtype: ``None`` """ extend_body = {"new_size": new_size} if force is True: extend_body['force'] = True body = {"extend": extend_body} self._action(session, body) def shrink_share(self, session, new_size): """Shrink the share size. :param float new_size: The new size of the share in GiB. :returns: ``None`` """ body = {"shrink": {'new_size': new_size}} self._action(session, body) def revert_to_snapshot(self, session, snapshot_id): """Revert the share to the given snapshot. :param str snapshot_id: The id of the snapshot to revert to. :returns: ``None`` """ body = {"revert": {"snapshot_id": snapshot_id}} self._action(session, body) def manage(self, session, protocol, export_path, service_host, **params): """Manage a share. :param session: A session object used for sending request. :param str protocol: The shared file systems protocol of this share. :param str export_path: The export path formatted according to the protocol. :param str service_host: The manage-share service host. :param kwargs params: Optional parameters to be sent. Available parameters include: * name: The user defined name of the resource. * share_type: The name or ID of the share type to be used to create the resource. * driver_options: A set of one or more key and value pairs, as a dictionary of strings, that describe driver options. * is_public: The level of visibility for the share. * description: The user defiend description of the resource. * share_server_id: The UUID of the share server. :returns: The share that was managed. """ path = 'manage' attrs = { 'share': { 'protocol': protocol, 'export_path': export_path, 'service_host': service_host, } } attrs['share'].update(params) url = utils.urljoin(self.base_path, path) resp = session.post(url, json=attrs) self._translate_response(resp) return self def unmanage(self, session): """Unmanage a share. :param session: A session object used for sending request. :returns: ``None`` """ body = {'unmanage': None} self._action(session, body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/v2/share_access_rule.py0000664000175000017500000000727400000000000026454 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class ShareAccessRule(resource.Resource): resource_key = "access" resources_key = "access_list" base_path = "/share-access-rules" # capabilities allow_create = True allow_fetch = True allow_commit = False allow_delete = True allow_list = True allow_head = False _query_mapping = resource.QueryParameters("share_id") # Restricted access rules became available in 2.82 _max_microversion = '2.82' #: Properties #: The access credential of the entity granted share access. access_key = resource.Body("access_key", type=str) #: The access level to the share. access_level = resource.Body("access_level", type=str) #: The object of the access rule. access_list = resource.Body("access_list", type=str) #: The value that defines the access. access_to = resource.Body("access_to", type=str) #: The access rule type. access_type = resource.Body("access_type", type=str) #: The date and time stamp when the resource was created within the #: service’s database. created_at = resource.Body("created_at", type=str) #: One or more access rule metadata key and value pairs as a dictionary #: of strings. metadata = resource.Body("metadata", type=dict) #: The UUID of the share to which you are granted or denied access. share_id = resource.Body("share_id", type=str) #: The state of the access rule. state = resource.Body("state", type=str) #: The date and time stamp when the resource was last updated within #: the service’s database. updated_at = resource.Body("updated_at", type=str) #: Whether the visibility of some sensitive fields is restricted or not lock_visibility = resource.Body("lock_visibility", type=bool) #: Whether the deletion of the access rule should be restricted or not lock_deletion = resource.Body("lock_deletion", type=bool) #: Reason for placing the loc lock_reason = resource.Body("lock_reason", type=bool) def _action(self, session, body, url, action='patch', microversion=None): headers = {'Accept': ''} if microversion is None: microversion = self._get_microversion(session, action=action) return session.post( url, json=body, headers=headers, microversion=microversion ) def create(self, session, **kwargs): return super().create( session, resource_request_key='allow_access', resource_response_key='access', **kwargs ) def delete( self, session, share_id, ignore_missing=True, *, unrestrict=False ): body = {"deny_access": {"access_id": self.id}} if unrestrict: body['deny_access']['unrestrict'] = True url = utils.urljoin("/shares", share_id, "action") response = self._action(session, body, url) try: response = self._action(session, body, url) self._translate_response(response) except exceptions.NotFoundException: if not ignore_missing: raise return response ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/v2/share_export_locations.py0000664000175000017500000000326700000000000027556 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ShareExportLocation(resource.Resource): resource_key = "export_location" resources_key = "export_locations" base_path = "/shares/%(share_id)s/export_locations" # capabilities allow_list = True allow_fetch = True allow_create = False allow_commit = False allow_delete = False allow_head = False _max_microversion = '2.47' #: Properties # The share ID, part of the URI for export locations share_id = resource.URI("share_id", type='str') #: The path of the export location. path = resource.Body("path", type=str) #: Indicate if export location is preferred. is_preferred = resource.Body("preferred", type=bool) #: The share instance ID of the export location. share_instance_id = resource.Body("share_instance_id", type=str) #: Indicate if export location is admin only. is_admin = resource.Body("is_admin_only", type=bool) #: Indicate when the export location is created at created_at = resource.Body("created_at", type=str) #: Indicate when the export location is updated at updated_at = resource.Body("updated_at", type=str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/v2/share_group.py0000664000175000017500000000413700000000000025313 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ShareGroup(resource.Resource): resource_key = "share_group" resources_key = "share_groups" base_path = "/share-groups" _query_mapping = resource.QueryParameters("share_group_id") # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_head = False #: Properties #: The availability zone ID that the share group exists within. availability_zone = resource.Body("availability_zone", type=str) #: The consistency snapshot support. consistent_snapshot_support = resource.Body( "consistent_snapshot_support", type=str ) #: The date and time stamp when the resource was created within the #: service’s database. created_at = resource.Body("created_at", type=str) #: The user defined description of the resource. description = resource.Body("description", type=str) #: The ID of the project that owns the resource. project_id = resource.Body("project_id", type=str) #: The share group snapshot ID. share_group_snapshot_id = resource.Body( "share_group_snapshot_id", type=str ) #: The share group type ID. share_group_type_id = resource.Body("share_group_type_id", type=str) #: The share network ID where the resource is exported to. share_network_id = resource.Body("share_network_id", type=str) #: A list of share type IDs. share_types = resource.Body("share_types", type=list) #: The share status status = resource.Body("status", type=str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/v2/share_group_snapshot.py0000664000175000017500000000633400000000000027233 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource from openstack import utils class ShareGroupSnapshot(resource.Resource): resource_key = "share_group_snapshot" resources_key = "share_group_snapshots" base_path = "/share-group-snapshots" # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_head = False _query_mapping = resource.QueryParameters( 'project_id', 'all_tenants', 'name', 'description', 'status', 'share_group_id', 'limit', 'offset', 'sort_key', 'sort_dir', ) #: Properties #: The ID of the project that owns the resource. project_id = resource.Body("project_id", type=str) #: Filters by a share group snapshot status. A valid value is creating, #: error, available, deleting, error_deleting. status = resource.Body("status", type=str) #: The UUID of the share group. share_group_id = resource.Body("share_group_id", type=str) #: The user defined description of the resource. description = resource.Body("description", type=str) #: The date and time stamp when the resource was created. created_at = resource.Body("created_at", type=str) #: The share group snapshot members. members = resource.Body("members", type=str) #: The snapshot size, in GiBs. size = resource.Body("size", type=int) #: NFS, CIFS, GlusterFS, HDFS, CephFS or MAPRFS. share_protocol = resource.Body("share_proto", type=str) def _action(self, session, body, action='patch', microversion=None): """Perform ShareGroupSnapshot actions given the message body.""" # NOTE: This is using ShareGroupSnapshot.base_path instead of # self.base_path as ShareGroupSnapshot instances can be acted on, # but the URL used is sans any additional /detail/ part. url = utils.urljoin(self.base_path, self.id, 'action') headers = {'Accept': ''} microversion = microversion or self._get_microversion( session, action=action ) extra_attrs = {'microversion': microversion} session.post(url, json=body, headers=headers, **extra_attrs) def reset_status(self, session, status): body = {"reset_status": {"status": status}} self._action(session, body) def get_members(self, session, microversion=None): url = utils.urljoin(self.base_path, self.id, 'members') microversion = microversion or self._get_microversion( session, action='list' ) headers = {'Accept': ''} response = session.get(url, headers=headers, microversion=microversion) return response.json() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/v2/share_instance.py0000664000175000017500000000675400000000000025772 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack import resource from openstack import utils class ShareInstance(resource.Resource): resource_key = "share_instance" resources_key = "share_instances" base_path = "/share_instances" # capabilities allow_create = False allow_fetch = True allow_commit = False allow_delete = False allow_list = True allow_head = False #: Properties #: The share instance access rules status. A valid value is active, #: error, or syncing. access_rules_status = resource.Body("access_rules_status", type=str) #: The name of the availability zone the share exists within. availability_zone = resource.Body("availability_zone", type=str) #: If the share instance has its cast_rules_to_readonly attribute #: set to True, all existing access rules be cast to read/only. cast_rules_to_readonly = resource.Body("cast_rules_to_readonly", type=bool) #: The date and time stamp when the resource was created within the #: service’s database. created_at = resource.Body("created_at", type=str) #: The host name of the service back end that the resource is #: contained within. host = resource.Body("host", type=str) #: The progress of the share creation. progress = resource.Body("progress", type=str) #: The share replica state. Has set value only when replication is used. #: List of possible values: active, in_sync, out_of_sync, error replica_state = resource.Body("replica_state", type=str) #: The UUID of the share to which the share instance belongs to. share_id = resource.Body("share_id", type=str) #: The share network ID where the resource is exported to. share_network_id = resource.Body("share_network_id", type=str) #: The UUID of the share server. share_server_id = resource.Body("share_server_id", type=str) #: The share or share instance status. status = resource.Body("status", type=str) def _action(self, session, body, action='patch', microversion=None): """Perform share instance actions given the message body""" url = utils.urljoin(self.base_path, self.id, 'action') headers = {'Accept': ''} extra_attrs = {} if microversion: # Set microversion override extra_attrs['microversion'] = microversion else: extra_attrs['microversion'] = self._get_microversion( session, action=action ) response = session.post(url, json=body, headers=headers, **extra_attrs) exceptions.raise_from_response(response) return response def reset_status(self, session, reset_status): """Reset share instance to given status""" body = {"reset_status": {"status": reset_status}} self._action(session, body) def force_delete(self, session): """Force delete share instance""" body = {"force_delete": None} self._action(session, body, action='delete') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/v2/share_network.py0000664000175000017500000000447100000000000025651 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource from openstack.shared_file_system.v2 import share_network_subnet class ShareNetwork(resource.Resource): resource_key = "share_network" resources_key = "share_networks" base_path = "/share-networks" # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_head = False _query_mapping = resource.QueryParameters( "project_id", "name", "description", "created_since", "created_before", "security_service_id", "limit", "offset", all_projects="all_tenants", ) #: Properties #: The date and time stamp when the resource was created within the #: service’s database. created_at = resource.Body("created_at") #: The user defined description of the resource. description = resource.Body("description", type=str) #: The ID of the project that owns the resource. project_id = resource.Body("project_id", type=str) #: A list of share network subnets that pertain to the related share #: network. share_network_subnets = resource.Body( "share_network_subnets", type=list, list_type=share_network_subnet.ShareNetworkSubnet, ) #: The UUID of a neutron network when setting up or #: updating a share network subnet with neutron. neutron_net_id = resource.Body("neutron_net_id", type=str) #: The UUID of the neutron subnet when setting up or updating #: a share network subnet with neutron. neutron_subnet_id = resource.Body("neutron_subnet_id", type=str) #: The date and time stamp when the resource was last updated within #: the service’s database. updated_at = resource.Body("updated_at", type=str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/v2/share_network_subnet.py0000664000175000017500000000502600000000000027226 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ShareNetworkSubnet(resource.Resource): resource_key = "share_network_subnet" resources_key = "share_network_subnets" base_path = "/share-networks/%(share_network_id)s/subnets" # capabilities allow_create = True allow_fetch = True allow_commit = False allow_delete = True allow_list = True #: Properties #: The share nerwork ID, part of the URI for share network subnets. share_network_id = resource.URI("share_network_id", type=str) #: The name of the availability zone that the share network #: subnet belongs to. availability_zone = resource.Body("availability_zone", type=str) #: The IP block from which to allocate the network, in CIDR notation. cidr = resource.Body("cidr", type=str) #: Date and time the share network subnet was created at. created_at = resource.Body("created_at") #: The gateway of a share network subnet. gateway = resource.Body("gateway", type=str) #: The IP version of the network. ip_version = resource.Body("ip_version", type=int) #: The MTU of a share network subnet. mtu = resource.Body("mtu", type=str) #: The network type. A valid value is VLAN, VXLAN, GRE, or flat network_type = resource.Body("network_type", type=str) #: The name of the neutron network. neutron_net_id = resource.Body("neutron_net_id", type=str) #: The ID of the neitron subnet. neutron_subnet_id = resource.Body("neutron_subnet_id", type=str) #: The segmentation ID. segmentation_id = resource.Body('segmentation_id', type=int) #: The name of the share network that the share network subnet belongs to. share_network_name = resource.Body("share_network_name", type=str) #: Date and time the share network subnet was last updated at. updated_at = resource.Body("updated_at", type=str) def create(self, session, **kwargs): return super().create( session, resource_request_key='share-network-subnet', **kwargs ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/v2/share_snapshot.py0000664000175000017500000000415000000000000026011 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ShareSnapshot(resource.Resource): resource_key = "snapshot" resources_key = "snapshots" base_path = "/snapshots" # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_head = False _query_mapping = resource.QueryParameters("snapshot_id") #: Properties #: The date and time stamp when the resource was #: created within the service’s database. created_at = resource.Body("created_at") #: The user defined description of the resource. description = resource.Body("description", type=str) #: The user defined name of the resource. display_name = resource.Body("display_name", type=str) #: The user defined description of the resource display_description = resource.Body("display_description", type=str) #: ID of the project that the snapshot belongs to. project_id = resource.Body("project_id", type=str) #: The UUID of the source share that was used to #: create the snapshot. share_id = resource.Body("share_id", type=str) #: The file system protocol of a share snapshot share_proto = resource.Body("share_proto", type=str) #: The snapshot's source share's size, in GiBs. share_size = resource.Body("share_size", type=int) #: The snapshot size, in GiBs. size = resource.Body("size", type=int) #: The snapshot status status = resource.Body("status", type=str) #: ID of the user that the snapshot was created by. user_id = resource.Body("user_id", type=str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/v2/share_snapshot_instance.py0000664000175000017500000000344100000000000027677 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class ShareSnapshotInstance(resource.Resource): resource_key = "snapshot_instance" resources_key = "snapshot_instances" base_path = "/snapshot-instances" # capabilities allow_create = False allow_fetch = True allow_commit = False allow_delete = False allow_list = True allow_head = False #: Properties #: The date and time stamp when the resource was created within the #: service’s database. created_at = resource.Body("created_at", type=str) #: The progress of the snapshot creation. progress = resource.Body("progress", type=str) #: Provider location of the snapshot on the backend. provider_location = resource.Body("provider_location", type=str) #: The UUID of the share. share_id = resource.Body("share_id", type=str) #: The UUID of the share instance. share_instance_id = resource.Body("share_instance_id", type=str) #: The UUID of the snapshot. snapshot_id = resource.Body("snapshot_id", type=str) #: The snapshot instance status. status = resource.Body("status", type=str) #: The date and time stamp when the resource was updated within the #: service’s database. updated_at = resource.Body("updated_at", type=str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/v2/storage_pool.py0000664000175000017500000000250400000000000025466 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class StoragePool(resource.Resource): resources_key = "pools" base_path = "/scheduler-stats/pools" # capabilities allow_create = False allow_fetch = False allow_commit = False allow_delete = False allow_list = True allow_head = False _query_mapping = resource.QueryParameters( 'pool', 'backend', 'host', 'capabilities', 'share_type', ) #: Properties #: The name of the back end. backend = resource.Body("backend", type=str) #: The host of the back end. host = resource.Body("host", type=str) #: The pool for the back end pool = resource.Body("pool", type=str) #: The back end capabilities. capabilities = resource.Body("capabilities", type=dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/shared_file_system/v2/user_message.py0000664000175000017500000000364200000000000025457 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class UserMessage(resource.Resource): resource_key = "message" resources_key = "messages" base_path = "/messages" # capabilities allow_fetch = True allow_commit = False allow_delete = True allow_list = True allow_head = False _query_mapping = resource.QueryParameters("message_id") _max_microversion = '2.37' #: Properties #: The action ID of the user message action_id = resource.Body("action_id", type=str) #: Indicate when the user message was created created_at = resource.Body("created_at", type=str) #: The detail ID of the user message detail_id = resource.Body("detail_id", type=str) #: Indicate when the share message expires expires_at = resource.Body("expires_at", type=str) #: The message level of the user message message_level = resource.Body("message_level", type=str) #: The project ID of the user message project_id = resource.Body("project_id", type=str) #: The request ID of the user message request_id = resource.Body("request_id", type=str) #: The resource ID of the user message resource_id = resource.Body("resource_id", type=str) #: The resource type of the user message resource_type = resource.Body("resource_type", type=str) #: The message for the user message user_message = resource.Body("user_message", type=str) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.325352 openstacksdk-4.0.0/openstack/test/0000775000175000017500000000000000000000000017175 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/test/__init__.py0000664000175000017500000000000000000000000021274 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/test/fakes.py0000664000175000017500000002354400000000000020650 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The :mod:`~openstack.test.fakes` module exists to help application developers using the OpenStack SDK to unit test their applications. It provides a number of helper utilities to generate fake :class:`~openstack.resource.Resource` and :class:`~openstack.proxy.Proxy` instances. These fakes do not require an established connection and allow you to validate that your application using valid attributes and methods for both :class:`~openstack.resource.Resource` and :class:`~openstack.proxy.Proxy` instances. """ import inspect import random from typing import ( Any, Dict, Generator, Optional, Type, TypeVar, ) from unittest import mock import uuid from openstack import format as _format from openstack import proxy from openstack import resource from openstack import service_description Resource = TypeVar('Resource', bound=resource.Resource) def generate_fake_resource( resource_type: Type[Resource], **attrs: Dict[str, Any], ) -> Resource: """Generate a fake resource :param type resource_type: Object class :param dict attrs: Optional attributes to be set on resource Example usage: .. code-block:: python >>> from openstack.compute.v2 import server >>> from openstack.test import fakes >>> fakes.generate_fake_resource(server.Server) openstack.compute.v2.server.Server(...) :param type resource_type: Object class :param dict attrs: Optional attributes to be set on resource :return: Instance of ``resource_type`` class populated with fake values of expected types :raises NotImplementedError: If a resource attribute specifies a ``type`` or ``list_type`` that cannot be automatically generated """ base_attrs: Dict[str, Any] = {} for name, value in inspect.getmembers( resource_type, predicate=lambda x: isinstance(x, (resource.Body, resource.URI)), ): if isinstance(value, resource.Body): target_type = value.type if target_type is None: if ( name == "properties" and hasattr( resource_type, "_store_unknown_attrs_as_properties" ) and resource_type._store_unknown_attrs_as_properties ): # virtual "properties" attr which hosts all unknown attrs # (i.e. Image) base_attrs[name] = dict() else: # Type not defined - string base_attrs[name] = uuid.uuid4().hex elif issubclass(target_type, resource.Resource): # Attribute is of another Resource type base_attrs[name] = generate_fake_resource(target_type) elif issubclass(target_type, list) and value.list_type is not None: # List of ... item_type = value.list_type if issubclass(item_type, resource.Resource): # item is of Resource type base_attrs[name] = [generate_fake_resource(item_type)] elif issubclass(item_type, dict): base_attrs[name] = [{}] elif issubclass(item_type, str): base_attrs[name] = [uuid.uuid4().hex] else: # Everything else msg = "Fake value for {}.{} can not be generated".format( resource_type.__name__, name, ) raise NotImplementedError(msg) elif issubclass(target_type, list) and value.list_type is None: # List of str base_attrs[name] = [uuid.uuid4().hex] elif issubclass(target_type, str): # definitely string base_attrs[name] = uuid.uuid4().hex elif issubclass(target_type, int): # int base_attrs[name] = random.randint(1, 100) elif issubclass(target_type, float): # float base_attrs[name] = random.random() elif issubclass(target_type, bool) or issubclass( target_type, _format.BoolStr ): # bool base_attrs[name] = random.choice([True, False]) elif issubclass(target_type, dict): # some dict - without further details leave it empty base_attrs[name] = dict() else: # Everything else msg = "Fake value for {}.{} can not be generated".format( resource_type.__name__, name, ) raise NotImplementedError(msg) if isinstance(value, resource.URI): # For URI we just generate something base_attrs[name] = uuid.uuid4().hex base_attrs.update(**attrs) fake = resource_type(**base_attrs) return fake def generate_fake_resources( resource_type: Type[Resource], count: int = 1, attrs: Optional[Dict[str, Any]] = None, ) -> Generator[Resource, None, None]: """Generate a given number of fake resource entities :param type resource_type: Object class :param int count: Number of objects to return :param dict attrs: Attribute values to set into each instance Example usage: .. code-block:: python >>> from openstack.compute.v2 import server >>> from openstack.test import fakes >>> fakes.generate_fake_resources(server.Server, count=3) :param type resource_type: Object class :param int count: Number of objects to return :param dict attrs: Attribute values to set into each instance :return: Generator of ``resource_type`` class instances populated with fake values of expected types. """ if not attrs: attrs = {} for _ in range(count): yield generate_fake_resource(resource_type, **attrs) # TODO(stephenfin): It would be helpful to generate fake resources for the # various proxy methods also, but doing so requires deep code introspection or # (better) type annotations def generate_fake_proxy( service: Type[service_description.ServiceDescription], api_version: Optional[str] = None, ) -> proxy.Proxy: """Generate a fake proxy for the given service type Example usage: .. code-block:: python >>> import functools >>> from openstack.compute import compute_service >>> from openstack.compute.v2 import server >>> from openstack.test import fakes >>> # create the fake proxy >>> fake_compute_proxy = fakes.generate_fake_proxy( ... compute_service.ComputeService, ... ) >>> # configure return values for various proxy APIs >>> # note that this will generate new fake resources on each invocation >>> fake_compute_proxy.get_server.side_effect = functools.partial( ... fakes.generate_fake_resource, ... server.Server, ... ) >>> fake_compute_proxy.servers.side_effect = functools.partial( ... fakes.generate_fake_resources, ... server.Server, ... ) >>> fake_compute_proxy.servers() >>> fake_compute_proxy.serverssss() Traceback (most recent call last): File "", line 1, in File "/usr/lib64/python3.11/unittest/mock.py", line 653, in __getattr__ raise AttributeError("Mock object has no attribute %r" % name) AttributeError: Mock object has no attribute 'serverssss'. Did you mean: 'server_ips'? :param service: The service to generate the fake proxy for. :type service: :class:`~openstack.service_description.ServiceDescription` :param api_version: The API version to generate the fake proxy for. This should be a major version must be supported by openstacksdk, as specified in the ``supported_versions`` attribute of the provided ``service``. This is only required if openstacksdk supports multiple API versions for the given service. :type api_version: int or None :raises ValueError: if the ``service`` is not a valid :class:`~openstack.service_description.ServiceDescription` or if ``api_version`` is not supported :returns: An autospecced mock of the :class:`~openstack.proxy.Proxy` implementation for the specified service type and API version """ if not issubclass(service, service_description.ServiceDescription): raise ValueError( f"Service {service.__name__} is not a valid ServiceDescription" ) supported_versions = service.supported_versions if api_version is None: if len(supported_versions) > 1: raise ValueError( f"api_version was not provided but service {service.__name__} " f"provides multiple API versions" ) else: api_version = list(supported_versions)[0] elif api_version not in supported_versions: raise ValueError( f"API version {api_version} is not supported by openstacksdk. " f"Supported API versions are: {', '.join(supported_versions)}" ) return mock.create_autospec(supported_versions[api_version]) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.325352 openstacksdk-4.0.0/openstack/tests/0000775000175000017500000000000000000000000017360 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/README.rst0000664000175000017500000000033100000000000021044 0ustar00zuulzuul00000000000000Tests for openstacksdk ====================== For information on how to run and extend these tests, refer to the `contributor guide`__. .. __: https://docs.openstack.org/openstacksdk/latest/contributor/testing.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/__init__.py0000664000175000017500000000000000000000000021457 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3293538 openstacksdk-4.0.0/openstack/tests/ansible/0000775000175000017500000000000000000000000020775 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/README.txt0000664000175000017500000000211100000000000022466 0ustar00zuulzuul00000000000000This directory contains a testing infrastructure for the Ansible OpenStack modules. You will need a clouds.yaml file in order to run the tests. You must provide a value for the `cloud` variable for each run (using the -e option) as a default is not currently provided. If you want to run these tests against devstack, it is easiest to use the tox target. This assumes you have a devstack-admin cloud defined in your clouds.yaml file that points to devstack. Some examples of using tox: tox -e ansible tox -e ansible keypair security_group If you want to run these tests directly, or against different clouds, then you'll need to use the ansible-playbook command that comes with the Ansible distribution and feed it the run.yml playbook. Some examples: # Run all module tests against a provider ansible-playbook run.yml -e "cloud=hp" # Run only the keypair and security_group tests ansible-playbook run.yml -e "cloud=hp" --tags "keypair,security_group" # Run all tests except security_group ansible-playbook run.yml -e "cloud=hp" --skip-tags "security_group" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3293538 openstacksdk-4.0.0/openstack/tests/ansible/hooks/0000775000175000017500000000000000000000000022120 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/hooks/post_test_hook.sh0000775000175000017500000000207400000000000025526 0ustar00zuulzuul00000000000000#!/bin/sh # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(shade) Rework for Zuul v3 export OPENSTACKSDK_DIR="$BASE/new/openstacksdk" cd $OPENSTACKSDK_DIR sudo chown -R jenkins:stack $OPENSTACKSDK_DIR echo "Running shade Ansible test suite" if [ ${OPENSTACKSDK_ANSIBLE_DEV:-0} -eq 1 ] then # Use the upstream development version of Ansible set +e sudo -E -H -u jenkins tox -eansible -- -d EXIT_CODE=$? set -e else # Use the release version of Ansible set +e sudo -E -H -u jenkins tox -eansible EXIT_CODE=$? set -e fi exit $EXIT_CODE ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.089238 openstacksdk-4.0.0/openstack/tests/ansible/roles/0000775000175000017500000000000000000000000022121 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.085236 openstacksdk-4.0.0/openstack/tests/ansible/roles/auth/0000775000175000017500000000000000000000000023062 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3293538 openstacksdk-4.0.0/openstack/tests/ansible/roles/auth/tasks/0000775000175000017500000000000000000000000024207 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/auth/tasks/main.yml0000664000175000017500000000014500000000000025656 0ustar00zuulzuul00000000000000--- - name: Authenticate to the cloud os_auth: cloud={{ cloud }} - debug: var=service_catalog ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.085236 openstacksdk-4.0.0/openstack/tests/ansible/roles/client_config/0000775000175000017500000000000000000000000024724 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3293538 openstacksdk-4.0.0/openstack/tests/ansible/roles/client_config/tasks/0000775000175000017500000000000000000000000026051 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/client_config/tasks/main.yml0000664000175000017500000000023300000000000027516 0ustar00zuulzuul00000000000000--- - name: List all profiles os_client_config: register: list # WARNING: This will output sensitive authentication information!!!! - debug: var=list ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.085236 openstacksdk-4.0.0/openstack/tests/ansible/roles/group/0000775000175000017500000000000000000000000023255 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3293538 openstacksdk-4.0.0/openstack/tests/ansible/roles/group/defaults/0000775000175000017500000000000000000000000025064 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/group/defaults/main.yml0000664000175000017500000000003200000000000026526 0ustar00zuulzuul00000000000000group_name: ansible_group ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3293538 openstacksdk-4.0.0/openstack/tests/ansible/roles/group/tasks/0000775000175000017500000000000000000000000024402 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/group/tasks/main.yml0000664000175000017500000000056400000000000026056 0ustar00zuulzuul00000000000000--- - name: Create group os_group: cloud: "{{ cloud }}" state: present name: "{{ group_name }}" - name: Update group os_group: cloud: "{{ cloud }}" state: present name: "{{ group_name }}" description: "updated description" - name: Delete group os_group: cloud: "{{ cloud }}" state: absent name: "{{ group_name }}" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.085236 openstacksdk-4.0.0/openstack/tests/ansible/roles/image/0000775000175000017500000000000000000000000023203 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3293538 openstacksdk-4.0.0/openstack/tests/ansible/roles/image/defaults/0000775000175000017500000000000000000000000025012 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/image/defaults/main.yml0000664000175000017500000000003200000000000026454 0ustar00zuulzuul00000000000000image_name: ansible_image ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3293538 openstacksdk-4.0.0/openstack/tests/ansible/roles/image/tasks/0000775000175000017500000000000000000000000024330 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/image/tasks/main.yml0000664000175000017500000000215100000000000025776 0ustar00zuulzuul00000000000000--- - name: Create a test image file shell: mktemp register: tmp_file - name: Fill test image file to 1MB shell: truncate -s 1048576 {{ tmp_file.stdout }} - name: Create raw image (defaults) os_image: cloud: "{{ cloud }}" state: present name: "{{ image_name }}" filename: "{{ tmp_file.stdout }}" disk_format: raw register: image - debug: var=image - name: Delete raw image (defaults) os_image: cloud: "{{ cloud }}" state: absent name: "{{ image_name }}" - name: Create raw image (complex) os_image: cloud: "{{ cloud }}" state: present name: "{{ image_name }}" filename: "{{ tmp_file.stdout }}" disk_format: raw is_public: True min_disk: 10 min_ram: 1024 kernel: cirros-vmlinuz ramdisk: cirros-initrd properties: cpu_arch: x86_64 distro: ubuntu register: image - debug: var=image - name: Delete raw image (complex) os_image: cloud: "{{ cloud }}" state: absent name: "{{ image_name }}" - name: Delete test image file file: name: "{{ tmp_file.stdout }}" state: absent ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.085236 openstacksdk-4.0.0/openstack/tests/ansible/roles/keypair/0000775000175000017500000000000000000000000023565 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3293538 openstacksdk-4.0.0/openstack/tests/ansible/roles/keypair/defaults/0000775000175000017500000000000000000000000025374 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/keypair/defaults/main.yml0000664000175000017500000000003400000000000027040 0ustar00zuulzuul00000000000000keypair_name: shade_keypair ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3293538 openstacksdk-4.0.0/openstack/tests/ansible/roles/keypair/tasks/0000775000175000017500000000000000000000000024712 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/keypair/tasks/main.yml0000664000175000017500000000275700000000000026374 0ustar00zuulzuul00000000000000--- - name: Create keypair (non-existing) os_keypair: cloud: "{{ cloud }}" name: "{{ keypair_name }}" state: present register: keypair # This assert verifies that Ansible is capable serializing data returned by SDK - name: Ensure private key is returned assert: that: - keypair.key.public_key is defined and keypair.key.public_key - name: Delete keypair (non-existing) os_keypair: cloud: "{{ cloud }}" name: "{{ keypair_name }}" state: absent - name: Generate test key file user: name: "{{ ansible_env.USER }}" generate_ssh_key: yes ssh_key_file: .ssh/shade_id_rsa - name: Create keypair (file) os_keypair: cloud: "{{ cloud }}" name: "{{ keypair_name }}" state: present public_key_file: "{{ ansible_env.HOME }}/.ssh/shade_id_rsa.pub" - name: Delete keypair (file) os_keypair: cloud: "{{ cloud }}" name: "{{ keypair_name }}" state: absent - name: Create keypair (key) os_keypair: cloud: "{{ cloud }}" name: "{{ keypair_name }}" state: present public_key: "{{ lookup('file', '~/.ssh/shade_id_rsa.pub') }}" - name: Delete keypair (key) os_keypair: cloud: "{{ cloud }}" name: "{{ keypair_name }}" state: absent - name: Delete test key pub file file: name: "{{ ansible_env.HOME }}/.ssh/shade_id_rsa.pub" state: absent - name: Delete test key pvt file file: name: "{{ ansible_env.HOME }}/.ssh/shade_id_rsa" state: absent ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.085236 openstacksdk-4.0.0/openstack/tests/ansible/roles/keystone_domain/0000775000175000017500000000000000000000000025311 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3293538 openstacksdk-4.0.0/openstack/tests/ansible/roles/keystone_domain/defaults/0000775000175000017500000000000000000000000027120 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/keystone_domain/defaults/main.yml0000664000175000017500000000003400000000000030564 0ustar00zuulzuul00000000000000domain_name: ansible_domain ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3293538 openstacksdk-4.0.0/openstack/tests/ansible/roles/keystone_domain/tasks/0000775000175000017500000000000000000000000026436 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/keystone_domain/tasks/main.yml0000664000175000017500000000070400000000000030106 0ustar00zuulzuul00000000000000--- - name: Create keystone domain os_keystone_domain: cloud: "{{ cloud }}" state: present name: "{{ domain_name }}" description: "test description" - name: Update keystone domain os_keystone_domain: cloud: "{{ cloud }}" name: "{{ domain_name }}" description: "updated description" - name: Delete keystone domain os_keystone_domain: cloud: "{{ cloud }}" state: absent name: "{{ domain_name }}" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.085236 openstacksdk-4.0.0/openstack/tests/ansible/roles/keystone_role/0000775000175000017500000000000000000000000025003 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3293538 openstacksdk-4.0.0/openstack/tests/ansible/roles/keystone_role/defaults/0000775000175000017500000000000000000000000026612 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/keystone_role/defaults/main.yml0000664000175000017500000000004100000000000030254 0ustar00zuulzuul00000000000000role_name: ansible_keystone_role ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3293538 openstacksdk-4.0.0/openstack/tests/ansible/roles/keystone_role/tasks/0000775000175000017500000000000000000000000026130 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/keystone_role/tasks/main.yml0000664000175000017500000000037400000000000027603 0ustar00zuulzuul00000000000000--- - name: Create keystone role os_keystone_role: cloud: "{{ cloud }}" state: present name: "{{ role_name }}" - name: Delete keystone role os_keystone_role: cloud: "{{ cloud }}" state: absent name: "{{ role_name }}" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.085236 openstacksdk-4.0.0/openstack/tests/ansible/roles/network/0000775000175000017500000000000000000000000023612 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3293538 openstacksdk-4.0.0/openstack/tests/ansible/roles/network/defaults/0000775000175000017500000000000000000000000025421 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/network/defaults/main.yml0000664000175000017500000000011200000000000027062 0ustar00zuulzuul00000000000000network_name: shade_network network_shared: false network_external: false ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3293538 openstacksdk-4.0.0/openstack/tests/ansible/roles/network/tasks/0000775000175000017500000000000000000000000024737 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/network/tasks/main.yml0000664000175000017500000000046600000000000026414 0ustar00zuulzuul00000000000000--- - name: Create network os_network: cloud: "{{ cloud }}" name: "{{ network_name }}" state: present shared: "{{ network_shared }}" external: "{{ network_external }}" - name: Delete network os_network: cloud: "{{ cloud }}" name: "{{ network_name }}" state: absent ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.085236 openstacksdk-4.0.0/openstack/tests/ansible/roles/nova_flavor/0000775000175000017500000000000000000000000024435 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3293538 openstacksdk-4.0.0/openstack/tests/ansible/roles/nova_flavor/tasks/0000775000175000017500000000000000000000000025562 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/nova_flavor/tasks/main.yml0000664000175000017500000000204000000000000027225 0ustar00zuulzuul00000000000000--- - name: Create public flavor os_nova_flavor: cloud: "{{ cloud }}" state: present name: ansible_public_flavor is_public: True ram: 1024 vcpus: 1 disk: 10 ephemeral: 10 swap: 1 flavorid: 12345 - name: Delete public flavor os_nova_flavor: cloud: "{{ cloud }}" state: absent name: ansible_public_flavor - name: Create private flavor os_nova_flavor: cloud: "{{ cloud }}" state: present name: ansible_private_flavor is_public: False ram: 1024 vcpus: 1 disk: 10 ephemeral: 10 swap: 1 flavorid: 12345 - name: Delete private flavor os_nova_flavor: cloud: "{{ cloud }}" state: absent name: ansible_private_flavor - name: Create flavor (defaults) os_nova_flavor: cloud: "{{ cloud }}" state: present name: ansible_defaults_flavor ram: 1024 vcpus: 1 disk: 10 - name: Delete flavor (defaults) os_nova_flavor: cloud: "{{ cloud }}" state: absent name: ansible_defaults_flavor ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.085236 openstacksdk-4.0.0/openstack/tests/ansible/roles/object/0000775000175000017500000000000000000000000023367 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3333557 openstacksdk-4.0.0/openstack/tests/ansible/roles/object/tasks/0000775000175000017500000000000000000000000024514 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/object/tasks/main.yml0000664000175000017500000000136500000000000026170 0ustar00zuulzuul00000000000000--- - name: Create a test object file shell: mktemp register: tmp_file - name: Create container os_object: cloud: "{{ cloud }}" state: present container: ansible_container container_access: private - name: Put object os_object: cloud: "{{ cloud }}" state: present name: ansible_object filename: "{{ tmp_file.stdout }}" container: ansible_container - name: Delete object os_object: cloud: "{{ cloud }}" state: absent name: ansible_object container: ansible_container - name: Delete container os_object: cloud: "{{ cloud }}" state: absent container: ansible_container - name: Delete test object file file: name: "{{ tmp_file.stdout }}" state: absent ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.085236 openstacksdk-4.0.0/openstack/tests/ansible/roles/port/0000775000175000017500000000000000000000000023105 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3333557 openstacksdk-4.0.0/openstack/tests/ansible/roles/port/defaults/0000775000175000017500000000000000000000000024714 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/port/defaults/main.yml0000664000175000017500000000026100000000000026362 0ustar00zuulzuul00000000000000network_name: ansible_port_network network_external: true subnet_name: ansible_port_subnet port_name: ansible_port secgroup_name: ansible_port_secgroup no_security_groups: True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3333557 openstacksdk-4.0.0/openstack/tests/ansible/roles/port/tasks/0000775000175000017500000000000000000000000024232 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/port/tasks/main.yml0000664000175000017500000000444400000000000025707 0ustar00zuulzuul00000000000000--- - name: Create network os_network: cloud: "{{ cloud }}" state: present name: "{{ network_name }}" external: "{{ network_external }}" - name: Create subnet os_subnet: cloud: "{{ cloud }}" state: present name: "{{ subnet_name }}" network_name: "{{ network_name }}" cidr: 10.5.5.0/24 - name: Create port (no security group or default security group) os_port: cloud: "{{ cloud }}" state: present name: "{{ port_name }}" network: "{{ network_name }}" no_security_groups: "{{ no_security_groups }}" fixed_ips: - ip_address: 10.5.5.69 register: port - debug: var=port - name: Delete port (no security group or default security group) os_port: cloud: "{{ cloud }}" state: absent name: "{{ port_name }}" - name: Create security group os_security_group: cloud: "{{ cloud }}" state: present name: "{{ secgroup_name }}" description: Test group - name: Create port (with security group) os_port: cloud: "{{ cloud }}" state: present name: "{{ port_name }}" network: "{{ network_name }}" fixed_ips: - ip_address: 10.5.5.69 security_groups: - "{{ secgroup_name }}" register: port - debug: var=port - name: Delete port (with security group) os_port: cloud: "{{ cloud }}" state: absent name: "{{ port_name }}" - name: Create port (with allowed_address_pairs and extra_dhcp_opts) os_port: cloud: "{{ cloud }}" state: present name: "{{ port_name }}" network: "{{ network_name }}" no_security_groups: "{{ no_security_groups }}" allowed_address_pairs: - ip_address: 10.6.7.0/24 extra_dhcp_opts: - opt_name: "bootfile-name" opt_value: "testfile.1" register: port - debug: var=port - name: Delete port (with allowed_address_pairs and extra_dhcp_opts) os_port: cloud: "{{ cloud }}" state: absent name: "{{ port_name }}" - name: Delete security group os_security_group: cloud: "{{ cloud }}" state: absent name: "{{ secgroup_name }}" - name: Delete subnet os_subnet: cloud: "{{ cloud }}" state: absent name: "{{ subnet_name }}" - name: Delete network os_network: cloud: "{{ cloud }}" state: absent name: "{{ network_name }}" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.085236 openstacksdk-4.0.0/openstack/tests/ansible/roles/router/0000775000175000017500000000000000000000000023441 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3333557 openstacksdk-4.0.0/openstack/tests/ansible/roles/router/defaults/0000775000175000017500000000000000000000000025250 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/router/defaults/main.yml0000664000175000017500000000013700000000000026720 0ustar00zuulzuul00000000000000external_network_name: ansible_external_net network_external: true router_name: ansible_router ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3333557 openstacksdk-4.0.0/openstack/tests/ansible/roles/router/tasks/0000775000175000017500000000000000000000000024566 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/router/tasks/main.yml0000664000175000017500000000365100000000000026242 0ustar00zuulzuul00000000000000--- # Regular user operation - name: Create internal network os_network: cloud: "{{ cloud }}" state: present name: "{{ network_name }}" external: false - name: Create subnet1 os_subnet: cloud: "{{ cloud }}" state: present network_name: "{{ network_name }}" name: shade_subnet1 cidr: 10.7.7.0/24 - name: Create router os_router: cloud: "{{ cloud }}" state: present name: "{{ router_name }}" - name: Update router (add interface) os_router: cloud: "{{ cloud }}" state: present name: "{{ router_name }}" interfaces: - shade_subnet1 # Admin operation - name: Create external network os_network: cloud: "{{ cloud }}" state: present name: "{{ external_network_name }}" external: "{{ network_external }}" when: - network_external - name: Create subnet2 os_subnet: cloud: "{{ cloud }}" state: present network_name: "{{ external_network_name }}" name: shade_subnet2 cidr: 10.6.6.0/24 when: - network_external - name: Update router (add external gateway) os_router: cloud: "{{ cloud }}" state: present name: "{{ router_name }}" network: "{{ external_network_name }}" interfaces: - shade_subnet1 when: - network_external - name: Delete router os_router: cloud: "{{ cloud }}" state: absent name: "{{ router_name }}" - name: Delete subnet1 os_subnet: cloud: "{{ cloud }}" state: absent name: shade_subnet1 - name: Delete subnet2 os_subnet: cloud: "{{ cloud }}" state: absent name: shade_subnet2 when: - network_external - name: Delete internal network os_network: cloud: "{{ cloud }}" state: absent name: "{{ network_name }}" - name: Delete external network os_network: cloud: "{{ cloud }}" state: absent name: "{{ external_network_name }}" when: - network_external ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.089238 openstacksdk-4.0.0/openstack/tests/ansible/roles/security_group/0000775000175000017500000000000000000000000025204 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3333557 openstacksdk-4.0.0/openstack/tests/ansible/roles/security_group/defaults/0000775000175000017500000000000000000000000027013 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/security_group/defaults/main.yml0000664000175000017500000000003600000000000030461 0ustar00zuulzuul00000000000000secgroup_name: shade_secgroup ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3333557 openstacksdk-4.0.0/openstack/tests/ansible/roles/security_group/tasks/0000775000175000017500000000000000000000000026331 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/security_group/tasks/main.yml0000664000175000017500000000570600000000000030010 0ustar00zuulzuul00000000000000--- - name: Create security group os_security_group: cloud: "{{ cloud }}" name: "{{ secgroup_name }}" state: present description: Created from Ansible playbook - name: Create empty ICMP rule os_security_group_rule: cloud: "{{ cloud }}" security_group: "{{ secgroup_name }}" state: present protocol: icmp remote_ip_prefix: 0.0.0.0/0 - name: Create -1 ICMP rule os_security_group_rule: cloud: "{{ cloud }}" security_group: "{{ secgroup_name }}" state: present protocol: icmp port_range_min: -1 port_range_max: -1 remote_ip_prefix: 0.0.0.0/0 - name: Create empty TCP rule os_security_group_rule: cloud: "{{ cloud }}" security_group: "{{ secgroup_name }}" state: present protocol: tcp remote_ip_prefix: 0.0.0.0/0 - name: Create empty UDP rule os_security_group_rule: cloud: "{{ cloud }}" security_group: "{{ secgroup_name }}" state: present protocol: udp remote_ip_prefix: 0.0.0.0/0 - name: Create HTTP rule os_security_group_rule: cloud: "{{ cloud }}" security_group: "{{ secgroup_name }}" state: present protocol: tcp port_range_min: 80 port_range_max: 80 remote_ip_prefix: 0.0.0.0/0 - name: Create egress rule os_security_group_rule: cloud: "{{ cloud }}" security_group: "{{ secgroup_name }}" state: present protocol: tcp port_range_min: 30000 port_range_max: 30001 remote_ip_prefix: 0.0.0.0/0 direction: egress - name: Delete empty ICMP rule os_security_group_rule: cloud: "{{ cloud }}" security_group: "{{ secgroup_name }}" state: absent protocol: icmp remote_ip_prefix: 0.0.0.0/0 - name: Delete -1 ICMP rule os_security_group_rule: cloud: "{{ cloud }}" security_group: "{{ secgroup_name }}" state: absent protocol: icmp port_range_min: -1 port_range_max: -1 remote_ip_prefix: 0.0.0.0/0 - name: Delete empty TCP rule os_security_group_rule: cloud: "{{ cloud }}" security_group: "{{ secgroup_name }}" state: absent protocol: tcp remote_ip_prefix: 0.0.0.0/0 - name: Delete empty UDP rule os_security_group_rule: cloud: "{{ cloud }}" security_group: "{{ secgroup_name }}" state: absent protocol: udp remote_ip_prefix: 0.0.0.0/0 - name: Delete HTTP rule os_security_group_rule: cloud: "{{ cloud }}" security_group: "{{ secgroup_name }}" state: absent protocol: tcp port_range_min: 80 port_range_max: 80 remote_ip_prefix: 0.0.0.0/0 - name: Delete egress rule os_security_group_rule: cloud: "{{ cloud }}" security_group: "{{ secgroup_name }}" state: absent protocol: tcp port_range_min: 30000 port_range_max: 30001 remote_ip_prefix: 0.0.0.0/0 direction: egress - name: Delete security group os_security_group: cloud: "{{ cloud }}" name: "{{ secgroup_name }}" state: absent ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.089238 openstacksdk-4.0.0/openstack/tests/ansible/roles/server/0000775000175000017500000000000000000000000023427 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3333557 openstacksdk-4.0.0/openstack/tests/ansible/roles/server/defaults/0000775000175000017500000000000000000000000025236 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/server/defaults/main.yaml0000664000175000017500000000016600000000000027051 0ustar00zuulzuul00000000000000server_network: private server_name: ansible_server flavor: m1.tiny floating_ip_pool_name: public boot_volume_size: 5 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3333557 openstacksdk-4.0.0/openstack/tests/ansible/roles/server/tasks/0000775000175000017500000000000000000000000024554 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/server/tasks/main.yml0000664000175000017500000000376200000000000026233 0ustar00zuulzuul00000000000000--- - name: Create server with meta as CSV os_server: cloud: "{{ cloud }}" state: present name: "{{ server_name }}" image: "{{ image }}" flavor: "{{ flavor }}" network: "{{ server_network }}" auto_floating_ip: false meta: "key1=value1,key2=value2" wait: true register: server - debug: var=server - name: Delete server with meta as CSV os_server: cloud: "{{ cloud }}" state: absent name: "{{ server_name }}" wait: true - name: Create server with meta as dict os_server: cloud: "{{ cloud }}" state: present name: "{{ server_name }}" image: "{{ image }}" flavor: "{{ flavor }}" auto_floating_ip: false network: "{{ server_network }}" meta: key1: value1 key2: value2 wait: true register: server - debug: var=server - name: Delete server with meta as dict os_server: cloud: "{{ cloud }}" state: absent name: "{{ server_name }}" wait: true - name: Create server (FIP from pool/network) os_server: cloud: "{{ cloud }}" state: present name: "{{ server_name }}" image: "{{ image }}" flavor: "{{ flavor }}" network: "{{ server_network }}" floating_ip_pools: - "{{ floating_ip_pool_name }}" wait: true register: server - debug: var=server - name: Delete server (FIP from pool/network) os_server: cloud: "{{ cloud }}" state: absent name: "{{ server_name }}" wait: true - name: Create server from volume os_server: cloud: "{{ cloud }}" state: present name: "{{ server_name }}" image: "{{ image }}" flavor: "{{ flavor }}" network: "{{ server_network }}" auto_floating_ip: false boot_from_volume: true volume_size: "{{ boot_volume_size }}" terminate_volume: true wait: true register: server - debug: var=server - name: Delete server with volume os_server: cloud: "{{ cloud }}" state: absent name: "{{ server_name }}" wait: true ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.089238 openstacksdk-4.0.0/openstack/tests/ansible/roles/subnet/0000775000175000017500000000000000000000000023421 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3333557 openstacksdk-4.0.0/openstack/tests/ansible/roles/subnet/defaults/0000775000175000017500000000000000000000000025230 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/subnet/defaults/main.yml0000664000175000017500000000006400000000000026677 0ustar00zuulzuul00000000000000subnet_name: shade_subnet enable_subnet_dhcp: false ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3333557 openstacksdk-4.0.0/openstack/tests/ansible/roles/subnet/tasks/0000775000175000017500000000000000000000000024546 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/subnet/tasks/main.yml0000664000175000017500000000200500000000000026212 0ustar00zuulzuul00000000000000--- - name: Create network {{ network_name }} os_network: cloud: "{{ cloud }}" name: "{{ network_name }}" state: present - name: Create subnet {{ subnet_name }} on network {{ network_name }} os_subnet: cloud: "{{ cloud }}" network_name: "{{ network_name }}" name: "{{ subnet_name }}" state: present enable_dhcp: "{{ enable_subnet_dhcp }}" dns_nameservers: - 8.8.8.7 - 8.8.8.8 cidr: 192.168.0.0/24 gateway_ip: 192.168.0.1 allocation_pool_start: 192.168.0.2 allocation_pool_end: 192.168.0.254 - name: Update subnet os_subnet: cloud: "{{ cloud }}" network_name: "{{ network_name }}" name: "{{ subnet_name }}" state: present dns_nameservers: - 8.8.8.7 cidr: 192.168.0.0/24 - name: Delete subnet {{ subnet_name }} os_subnet: cloud: "{{ cloud }}" name: "{{ subnet_name }}" state: absent - name: Delete network {{ network_name }} os_network: cloud: "{{ cloud }}" name: "{{ network_name }}" state: absent ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.089238 openstacksdk-4.0.0/openstack/tests/ansible/roles/user/0000775000175000017500000000000000000000000023077 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3333557 openstacksdk-4.0.0/openstack/tests/ansible/roles/user/tasks/0000775000175000017500000000000000000000000024224 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/user/tasks/main.yml0000664000175000017500000000107000000000000025671 0ustar00zuulzuul00000000000000--- - name: Create user os_user: cloud: "{{ cloud }}" state: present name: ansible_user password: secret email: ansible.user@nowhere.net domain: default default_project: demo register: user - debug: var=user - name: Update user os_user: cloud: "{{ cloud }}" state: present name: ansible_user password: secret email: updated.ansible.user@nowhere.net register: updateduser - debug: var=updateduser - name: Delete user os_user: cloud: "{{ cloud }}" state: absent name: ansible_user ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.089238 openstacksdk-4.0.0/openstack/tests/ansible/roles/user_group/0000775000175000017500000000000000000000000024313 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3333557 openstacksdk-4.0.0/openstack/tests/ansible/roles/user_group/tasks/0000775000175000017500000000000000000000000025440 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/user_group/tasks/main.yml0000664000175000017500000000116500000000000027112 0ustar00zuulzuul00000000000000--- - name: Create user os_user: cloud: "{{ cloud }}" state: present name: ansible_user password: secret email: ansible.user@nowhere.net domain: default default_project: demo register: user - name: Assign user to nonadmins group os_user_group: cloud: "{{ cloud }}" state: present user: ansible_user group: nonadmins - name: Remove user from nonadmins group os_user_group: cloud: "{{ cloud }}" state: absent user: ansible_user group: nonadmins - name: Delete user os_user: cloud: "{{ cloud }}" state: absent name: ansible_user ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.089238 openstacksdk-4.0.0/openstack/tests/ansible/roles/volume/0000775000175000017500000000000000000000000023430 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3333557 openstacksdk-4.0.0/openstack/tests/ansible/roles/volume/tasks/0000775000175000017500000000000000000000000024555 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/roles/volume/tasks/main.yml0000664000175000017500000000047700000000000026234 0ustar00zuulzuul00000000000000--- - name: Create volume os_volume: cloud: "{{ cloud }}" state: present size: 1 display_name: ansible_volume display_description: Test volume register: vol - debug: var=vol - name: Delete volume os_volume: cloud: "{{ cloud }}" state: absent display_name: ansible_volume ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/ansible/run.yml0000664000175000017500000000162300000000000022326 0ustar00zuulzuul00000000000000--- - hosts: localhost connection: local gather_facts: true roles: - { role: auth, tags: auth } - { role: client_config, tags: client_config } - { role: group, tags: group } # TODO(mordred) Reenable this once the fixed os_image winds up in an # upstream ansible release. # - { role: image, tags: image } - { role: keypair, tags: keypair } - { role: keystone_domain, tags: keystone_domain } - { role: keystone_role, tags: keystone_role } - { role: network, tags: network } - { role: nova_flavor, tags: nova_flavor } - { role: object, tags: object } - { role: port, tags: port } - { role: router, tags: router } - { role: security_group, tags: security_group } - { role: server, tags: server } - { role: subnet, tags: subnet } - { role: user, tags: user } - { role: user_group, tags: user_group } - { role: volume, tags: volume } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/base.py0000664000175000017500000001163700000000000020654 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import logging import os import pprint import sys import typing as ty import fixtures from oslotest import base import testtools.content from openstack.tests import fixtures as os_fixtures from openstack import utils _TRUE_VALUES = ('true', '1', 'yes') class TestCase(base.BaseTestCase): """Test case base class for all tests.""" # A way to adjust slow test classes TIMEOUT_SCALING_FACTOR = 1.0 def setUp(self): """Run before each test method to initialize test environment.""" # No openstacksdk unit tests should EVER run longer than a second. # Set this to 5 by default just to give us some fudge. # Do this before super setUp so that we intercept the default value # in oslotest. TODO(mordred) Make the default timeout configurable # in oslotest. test_timeout = int(os.environ.get('OS_TEST_TIMEOUT', '5')) try: test_timeout = int(test_timeout * self.TIMEOUT_SCALING_FACTOR) self.useFixture( fixtures.EnvironmentVariable( 'OS_TEST_TIMEOUT', str(test_timeout) ) ) except ValueError: # Let oslotest do its thing pass super().setUp() self.warnings = self.useFixture(os_fixtures.WarningsFixture()) self._log_stream: ty.TextIO if os.environ.get('OS_LOG_CAPTURE') in _TRUE_VALUES: self._log_stream = io.StringIO() if os.environ.get('OS_ALWAYS_LOG') in _TRUE_VALUES: self.addCleanup(self.printLogs) else: self.addOnException(self.attachLogs) else: self._log_stream = sys.stdout handler = logging.StreamHandler(self._log_stream) formatter = logging.Formatter('%(asctime)s %(name)-32s %(message)s') handler.setFormatter(formatter) logger = logging.getLogger('openstack') logger.setLevel(logging.DEBUG) logger.addHandler(handler) # Enable HTTP level tracing # TODO(mordred) This is blowing out our memory we think logger = logging.getLogger('keystoneauth') logger.setLevel(logging.INFO) logger.addHandler(handler) logger.propagate = False def _fake_logs(self): # Override _fake_logs in oslotest until we can get our # attach-on-exception logic added pass def assertEqual(self, first, second, *args, **kwargs): '''Munch aware wrapper''' if isinstance(first, utils.Munch): first = first.toDict() if isinstance(second, utils.Munch): second = second.toDict() return super().assertEqual(first, second, *args, **kwargs) def printLogs(self, *args): self._log_stream.seek(0) print(self._log_stream.read()) def attachLogs(self, *args): def reader(): self._log_stream.seek(0) while True: x = self._log_stream.read(4096) if not x: break yield x.encode('utf8') content = testtools.content.content_from_reader( reader, testtools.content_type.UTF8_TEXT, False ) self.addDetail('logging', content) def add_info_on_exception(self, name, text): def add_content(unused): self.addDetail( name, testtools.content.text_content(pprint.pformat(text)) ) self.addOnException(add_content) def assertSubdict(self, part, whole): missing_keys = [] for key in part: # In the resource we have virtual access by not existing keys. To # verify those are there try access it. if not whole[key] and part[key]: missing_keys.append(key) if missing_keys: self.fail( "Keys {} are in {} but not in {}".format( missing_keys, part, whole ) ) wrong_values = [ (key, part[key], whole[key]) for key in part if part[key] != whole[key] ] if wrong_values: self.fail( "Mismatched values: %s" % ", ".join( "for %s got %s and %s" % tpl for tpl in wrong_values ) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/fakes.py0000664000175000017500000004006200000000000021025 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ fakes ----- Fakes used for testing """ import datetime import hashlib import json import uuid from openstack.cloud import meta from openstack.orchestration.util import template_format from openstack import utils PROJECT_ID = '1c36b64c840a42cd9e9b931a369337f0' FLAVOR_ID = '0c1d9008-f546-4608-9e8f-f8bdaec8dddd' CHOCOLATE_FLAVOR_ID = '0c1d9008-f546-4608-9e8f-f8bdaec8ddde' STRAWBERRY_FLAVOR_ID = '0c1d9008-f546-4608-9e8f-f8bdaec8dddf' COMPUTE_ENDPOINT = 'https://compute.example.com/v2.1' ORCHESTRATION_ENDPOINT = 'https://orchestration.example.com/v1/{p}'.format( p=PROJECT_ID ) NO_MD5 = '93b885adfe0da089cdf634904fd59f71' NO_SHA256 = '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d' FAKE_PUBLIC_KEY = ( "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGj" "lnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/" "sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qg" "fQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3P" "HB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+" "YIsBUHNLLMM/oQp Generated-by-Nova\n" ) def make_fake_flavor(flavor_id, name, ram=100, disk=1600, vcpus=24): return { 'OS-FLV-DISABLED:disabled': False, 'OS-FLV-EXT-DATA:ephemeral': 0, 'disk': disk, 'id': flavor_id, 'links': [ { 'href': '{endpoint}/flavors/{id}'.format( endpoint=COMPUTE_ENDPOINT, id=flavor_id ), 'rel': 'self', }, { 'href': '{endpoint}/flavors/{id}'.format( endpoint=COMPUTE_ENDPOINT, id=flavor_id ), 'rel': 'bookmark', }, ], 'name': name, 'os-flavor-access:is_public': True, 'ram': ram, 'rxtx_factor': 1.0, 'swap': 0, 'vcpus': vcpus, } FAKE_FLAVOR = make_fake_flavor(FLAVOR_ID, 'vanilla') FAKE_CHOCOLATE_FLAVOR = make_fake_flavor( CHOCOLATE_FLAVOR_ID, 'chocolate', ram=200 ) FAKE_STRAWBERRY_FLAVOR = make_fake_flavor( STRAWBERRY_FLAVOR_ID, 'strawberry', ram=300 ) FAKE_FLAVOR_LIST = [FAKE_FLAVOR, FAKE_CHOCOLATE_FLAVOR, FAKE_STRAWBERRY_FLAVOR] FAKE_TEMPLATE = '''heat_template_version: 2014-10-16 parameters: length: type: number default: 10 resources: my_rand: type: OS::Heat::RandomString properties: length: {get_param: length} outputs: rand: value: get_attr: [my_rand, value] ''' FAKE_TEMPLATE_CONTENT = template_format.parse(FAKE_TEMPLATE) def make_fake_server( server_id, name, status='ACTIVE', admin_pass=None, addresses=None, image=None, flavor=None, ): if addresses is None: if status == 'ACTIVE': addresses = { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:df:b0:8d", "version": 6, "addr": "fddb:b018:307:0:f816:3eff:fedf:b08d", "OS-EXT-IPS:type": "fixed", }, { "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:df:b0:8d", "version": 4, "addr": "10.1.0.9", "OS-EXT-IPS:type": "fixed", }, { "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:df:b0:8d", "version": 4, "addr": "172.24.5.5", "OS-EXT-IPS:type": "floating", }, ] } else: addresses = {} if image is None: image = {"id": "217f3ab1-03e0-4450-bf27-63d52b421e9e", "links": []} if flavor is None: flavor = {"id": "64", "links": []} server = { "OS-EXT-STS:task_state": None, "addresses": addresses, "links": [], "image": image, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2017-03-23T23:57:38.000000", "flavor": flavor, "id": server_id, "security_groups": [{"name": "default"}], "user_id": "9c119f4beaaa438792ce89387362b3ad", "OS-DCF:diskConfig": "MANUAL", "accessIPv4": "", "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1, "OS-EXT-AZ:availability_zone": "nova", "metadata": {}, "status": status, "updated": "2017-03-23T23:57:39Z", "hostId": "89d165f04384e3ffa4b6536669eb49104d30d6ca832bba2684605dbc", "OS-SRV-USG:terminated_at": None, "key_name": None, "name": name, "created": "2017-03-23T23:57:12Z", "tenant_id": PROJECT_ID, "os-extended-volumes:volumes_attached": [], "config_drive": "True", } if admin_pass: server['adminPass'] = admin_pass return json.loads(json.dumps(server)) def make_fake_keypair(name): # Note: this is literally taken from: # https://docs.openstack.org/api-ref/compute/ return { "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", "name": name, "type": "ssh", "public_key": FAKE_PUBLIC_KEY, "created_at": datetime.datetime.now().isoformat(), } def make_fake_stack(id, name, description=None, status='CREATE_COMPLETE'): return { 'creation_time': '2017-03-23T23:57:12Z', 'deletion_time': '2017-03-23T23:57:12Z', 'description': description, 'id': id, 'links': [], 'parent': None, 'stack_name': name, 'stack_owner': None, 'stack_status': status, 'stack_user_project_id': PROJECT_ID, 'tags': None, 'updated_time': '2017-03-23T23:57:12Z', } def make_fake_stack_event( id, name, status='CREATE_COMPLETED', resource_name='id' ): event_id = uuid.uuid4().hex self_url = "{endpoint}/stacks/{name}/{id}/resources/{name}/events/{event}" resource_url = "{endpoint}/stacks/{name}/{id}/resources/{name}" return { "resource_name": id if resource_name == 'id' else name, "event_time": "2017-03-26T19:38:18", "links": [ { "href": self_url.format( endpoint=ORCHESTRATION_ENDPOINT, name=name, id=id, event=event_id, ), "rel": "self", }, { "href": resource_url.format( endpoint=ORCHESTRATION_ENDPOINT, name=name, id=id ), "rel": "resource", }, { "href": "{endpoint}/stacks/{name}/{id}".format( endpoint=ORCHESTRATION_ENDPOINT, name=name, id=id ), "rel": "stack", }, ], "logical_resource_id": name, "resource_status": status, "resource_status_reason": "", "physical_resource_id": id, "id": event_id, } def make_fake_image( image_id=None, md5=NO_MD5, sha256=NO_SHA256, status='active', image_name='fake_image', data=None, checksum='ee36e35a297980dee1b514de9803ec6d', ): if data: md5 = utils.md5(usedforsecurity=False) sha256 = hashlib.sha256() with open(data, 'rb') as file_obj: for chunk in iter(lambda: file_obj.read(8192), b''): md5.update(chunk) sha256.update(chunk) md5 = md5.hexdigest() sha256 = sha256.hexdigest() return { 'image_state': 'available', 'container_format': 'bare', 'min_ram': 0, 'ramdisk_id': 'fake_ramdisk_id', 'updated_at': '2016-02-10T05:05:02Z', 'file': '/v2/images/' + image_id + '/file', 'size': 3402170368, 'image_type': 'snapshot', 'disk_format': 'qcow2', 'id': image_id, 'schema': '/v2/schemas/image', 'status': status, 'tags': [], 'visibility': 'private', 'locations': [ {'url': 'http://127.0.0.1/images/' + image_id, 'metadata': {}} ], 'min_disk': 40, 'virtual_size': None, 'name': image_name, 'checksum': md5 or checksum, 'created_at': '2016-02-10T05:03:11Z', 'owner_specified.openstack.md5': md5 or NO_MD5, 'owner_specified.openstack.sha256': sha256 or NO_SHA256, 'owner_specified.openstack.object': 'images/{name}'.format( name=image_name ), 'protected': False, } def make_fake_machine(machine_name, machine_id=None): if not machine_id: machine_id = uuid.uuid4().hex return meta.obj_to_munch(FakeMachine(id=machine_id, name=machine_name)) def make_fake_port(address, node_id=None, port_id=None): if not node_id: node_id = uuid.uuid4().hex if not port_id: port_id = uuid.uuid4().hex return meta.obj_to_munch( FakeMachinePort(id=port_id, address=address, node_id=node_id) ) class FakeFloatingIP: def __init__(self, id, pool, ip, fixed_ip, instance_id): self.id = id self.pool = pool self.ip = ip self.fixed_ip = fixed_ip self.instance_id = instance_id def make_fake_server_group(id, name, policies): return json.loads( json.dumps( { 'id': id, 'name': name, 'policies': policies, 'members': [], 'metadata': {}, } ) ) def make_fake_hypervisor(id, name): return json.loads( json.dumps( { 'id': id, 'hypervisor_hostname': name, 'state': 'up', 'status': 'enabled', "cpu_info": { "arch": "x86_64", "model": "Nehalem", "vendor": "Intel", "features": ["pge", "clflush"], "topology": {"cores": 1, "threads": 1, "sockets": 4}, }, "current_workload": 0, "status": "enabled", "state": "up", "disk_available_least": 0, "host_ip": "1.1.1.1", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_type": "fake", "hypervisor_version": 1000, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "service": {"host": "host1", "id": 7, "disabled_reason": None}, "vcpus": 1, "vcpus_used": 0, } ) ) class FakeVolume: def __init__(self, id, status, name, attachments=[], size=75): self.id = id self.status = status self.name = name self.attachments = attachments self.size = size self.snapshot_id = 'id:snapshot' self.description = 'description' self.volume_type = 'type:volume' self.availability_zone = 'az1' self.created_at = '1900-01-01 12:34:56' self.updated_at = None self.source_volid = '12345' self.metadata = {} class FakeVolumeSnapshot: def __init__(self, id, status, name, description, size=75): self.id = id self.status = status self.name = name self.description = description self.size = size self.created_at = '1900-01-01 12:34:56' self.updated_at = None self.volume_id = '12345' self.metadata = {} self.is_forced = False class FakeMachine: def __init__( self, id, name=None, driver=None, driver_info=None, chassis_uuid=None, instance_info=None, instance_uuid=None, properties=None, reservation=None, last_error=None, provision_state='available', ): self.uuid = id self.name = name self.driver = driver self.driver_info = driver_info self.chassis_uuid = chassis_uuid self.instance_info = instance_info self.instance_uuid = instance_uuid self.properties = properties self.reservation = reservation self.last_error = last_error self.provision_state = provision_state class FakeMachinePort: def __init__(self, id, address, node_id): self.uuid = id self.address = address self.node_uuid = node_id def make_fake_neutron_security_group( id, name, description, rules, stateful=True, project_id=None ): if not rules: rules = [] if not project_id: project_id = PROJECT_ID return json.loads( json.dumps( { 'id': id, 'name': name, 'description': description, 'stateful': stateful, 'project_id': project_id, 'tenant_id': project_id, 'security_group_rules': rules, } ) ) def make_fake_nova_security_group_rule( id, from_port, to_port, ip_protocol, cidr ): return json.loads( json.dumps( { 'id': id, 'from_port': int(from_port), 'to_port': int(to_port), 'ip_protcol': 'tcp', 'ip_range': {'cidr': cidr}, } ) ) def make_fake_nova_security_group(id, name, description, rules): if not rules: rules = [] return json.loads( json.dumps( { 'id': id, 'name': name, 'description': description, 'tenant_id': PROJECT_ID, 'rules': rules, } ) ) class FakeNovaSecgroupRule: def __init__( self, id, from_port=None, to_port=None, ip_protocol=None, cidr=None, parent_group_id=None, ): self.id = id self.from_port = from_port self.to_port = to_port self.ip_protocol = ip_protocol if cidr: self.ip_range = {'cidr': cidr} self.parent_group_id = parent_group_id class FakeHypervisor: def __init__(self, id, hostname): self.id = id self.hypervisor_hostname = hostname class FakeZone: def __init__(self, id, name, type_, email, description, ttl, masters): self.id = id self.name = name self.type_ = type_ self.email = email self.description = description self.ttl = ttl self.masters = masters class FakeRecordset: def __init__(self, zone, id, name, type_, description, ttl, records): self.zone = zone self.id = id self.name = name self.type_ = type_ self.description = description self.ttl = ttl self.records = records def make_fake_aggregate( id, name, availability_zone='nova', metadata=None, hosts=None ): if not metadata: metadata = {} if not hosts: hosts = [] return json.loads( json.dumps( { "availability_zone": availability_zone, "created_at": datetime.datetime.now().isoformat(), "deleted": False, "deleted_at": None, "hosts": hosts, "id": int(id), "metadata": { "availability_zone": availability_zone, }, "name": name, "updated_at": None, } ) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/fixtures.py0000664000175000017500000000336500000000000021612 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ fixtures -------- Fixtures used for testing """ import warnings import fixtures from openstack import warnings as os_warnings # TODO(stephenfin): Replace this with WarningsFilter from fixtures when it's # released https://github.com/testing-cabal/fixtures/pull/50 class WarningsFixture(fixtures.Fixture): """Filters out warnings during test runs.""" def setUp(self): super().setUp() self._original_warning_filters = warnings.filters[:] # enable user warnings as many libraries use this (it's the default) warnings.simplefilter("error", UserWarning) # enable deprecation warnings in general... warnings.simplefilter("once", DeprecationWarning) # ...but ignore our own deprecation warnings warnings.filterwarnings( "ignore", category=os_warnings.OpenStackDeprecationWarning, ) # also ignore our own general warnings warnings.filterwarnings( "ignore", category=os_warnings.OpenStackWarning, ) self.addCleanup(self._reset_warning_filters) def _reset_warning_filters(self): warnings.filters[:] = self._original_warning_filters # type: ignore[index] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3373578 openstacksdk-4.0.0/openstack/tests/functional/0000775000175000017500000000000000000000000021522 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/README.rst0000664000175000017500000000034300000000000023211 0ustar00zuulzuul00000000000000Unit Tests for openstacksdk =========================== For information on how to run and extend these tests, refer to the `contributor guide`__. .. __: https://docs.openstack.org/openstacksdk/latest/contributor/testing.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/__init__.py0000664000175000017500000000000000000000000023621 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3373578 openstacksdk-4.0.0/openstack/tests/functional/baremetal/0000775000175000017500000000000000000000000023456 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/baremetal/__init__.py0000664000175000017500000000000000000000000025555 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/baremetal/base.py0000664000175000017500000000733200000000000024747 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty from openstack.tests.functional import base class BaseBaremetalTest(base.BaseFunctionalTest): min_microversion: ty.Optional[str] = None node_id: str def setUp(self): super().setUp() self.require_service( 'baremetal', min_microversion=self.min_microversion ) def create_allocation(self, **kwargs): allocation = self.conn.baremetal.create_allocation(**kwargs) self.addCleanup( lambda: self.conn.baremetal.delete_allocation( allocation.id, ignore_missing=True ) ) return allocation def create_chassis(self, **kwargs): chassis = self.conn.baremetal.create_chassis(**kwargs) self.addCleanup( lambda: self.conn.baremetal.delete_chassis( chassis.id, ignore_missing=True ) ) return chassis def create_node(self, driver='fake-hardware', **kwargs): node = self.conn.baremetal.create_node(driver=driver, **kwargs) self.node_id = node.id self.addCleanup( lambda: self.conn.baremetal.delete_node( self.node_id, ignore_missing=True ) ) self.assertIsNotNone(self.node_id) return node def create_port(self, node_id=None, **kwargs): node_id = node_id or self.node_id port = self.conn.baremetal.create_port(node_uuid=node_id, **kwargs) self.addCleanup( lambda: self.conn.baremetal.delete_port( port.id, ignore_missing=True ) ) return port def create_port_group(self, node_id=None, **kwargs): node_id = node_id or self.node_id port_group = self.conn.baremetal.create_port_group( node_uuid=node_id, **kwargs ) self.addCleanup( lambda: self.conn.baremetal.delete_port_group( port_group.id, ignore_missing=True ) ) return port_group def create_volume_connector(self, node_id=None, **kwargs): node_id = node_id or self.node_id volume_connector = self.conn.baremetal.create_volume_connector( node_uuid=node_id, **kwargs ) self.addCleanup( lambda: self.conn.baremetal.delete_volume_connector( volume_connector.id, ignore_missing=True ) ) return volume_connector def create_volume_target(self, node_id=None, **kwargs): node_id = node_id or self.node_id volume_target = self.conn.baremetal.create_volume_target( node_uuid=node_id, **kwargs ) self.addCleanup( lambda: self.conn.baremetal.delete_volume_target( volume_target.id, ignore_missing=True ) ) return volume_target def create_deploy_template(self, **kwargs): """Create a new deploy_template from attributes.""" deploy_template = self.conn.baremetal.create_deploy_template(**kwargs) self.addCleanup( lambda: self.conn.baremetal.delete_deploy_template( deploy_template.id, ignore_missing=True ) ) return deploy_template ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/baremetal/test_baremetal_allocation.py0000664000175000017500000001733100000000000031235 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random from openstack import exceptions from openstack.tests.functional.baremetal import base class Base(base.BaseBaremetalTest): def setUp(self): super().setUp() # NOTE(dtantsur): generate a unique resource class to prevent parallel # tests from clashing. self.resource_class = 'baremetal-%d' % random.randrange(1024) self.node = self._create_available_node() def _create_available_node(self): node = self.create_node(resource_class=self.resource_class) self.conn.baremetal.set_node_provision_state(node, 'manage', wait=True) self.conn.baremetal.set_node_provision_state( node, 'provide', wait=True ) # Make sure the node has non-empty power state by forcing power off. self.conn.baremetal.set_node_power_state(node, 'power off') self.addCleanup( lambda: self.conn.baremetal.update_node(node.id, instance_id=None) ) return node class TestBareMetalAllocation(Base): min_microversion = '1.52' def test_allocation_create_get_delete(self): allocation = self.create_allocation(resource_class=self.resource_class) self.assertEqual('allocating', allocation.state) self.assertIsNone(allocation.node_id) self.assertIsNone(allocation.last_error) loaded = self.conn.baremetal.wait_for_allocation(allocation) self.assertEqual(loaded.id, allocation.id) self.assertEqual('active', allocation.state) self.assertEqual(self.node.id, allocation.node_id) self.assertIsNone(allocation.last_error) with_fields = self.conn.baremetal.get_allocation( allocation.id, fields=['uuid', 'node_uuid'] ) self.assertEqual(allocation.id, with_fields.id) self.assertIsNone(with_fields.state) node = self.conn.baremetal.get_node(self.node.id) self.assertEqual(allocation.id, node.allocation_id) self.conn.baremetal.delete_allocation(allocation, ignore_missing=False) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_allocation, allocation.id, ) def test_allocation_list(self): allocation1 = self.create_allocation( resource_class=self.resource_class ) allocation2 = self.create_allocation( resource_class=self.resource_class + '-fail' ) self.conn.baremetal.wait_for_allocation(allocation1) self.conn.baremetal.wait_for_allocation(allocation2, ignore_error=True) allocations = self.conn.baremetal.allocations() self.assertEqual( {p.id for p in allocations}, {allocation1.id, allocation2.id} ) allocations = self.conn.baremetal.allocations(state='active') self.assertEqual([p.id for p in allocations], [allocation1.id]) allocations = self.conn.baremetal.allocations(node=self.node.id) self.assertEqual([p.id for p in allocations], [allocation1.id]) allocations = self.conn.baremetal.allocations( resource_class=self.resource_class + '-fail' ) self.assertEqual([p.id for p in allocations], [allocation2.id]) def test_allocation_negative_failure(self): allocation = self.create_allocation( resource_class=self.resource_class + '-fail' ) self.assertRaises( exceptions.SDKException, self.conn.baremetal.wait_for_allocation, allocation, ) allocation = self.conn.baremetal.get_allocation(allocation.id) self.assertEqual('error', allocation.state) self.assertIn(self.resource_class + '-fail', allocation.last_error) def test_allocation_negative_non_existing(self): uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971" self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_allocation, uuid, ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.delete_allocation, uuid, ignore_missing=False, ) self.assertIsNone(self.conn.baremetal.delete_allocation(uuid)) def test_allocation_fields(self): self.create_allocation(resource_class=self.resource_class) result = self.conn.baremetal.allocations(fields=['uuid']) for item in result: self.assertIsNotNone(item.id) self.assertIsNone(item.resource_class) class TestBareMetalAllocationUpdate(Base): min_microversion = '1.57' def test_allocation_update(self): name = 'ossdk-name1' allocation = self.create_allocation(resource_class=self.resource_class) allocation = self.conn.baremetal.wait_for_allocation(allocation) self.assertEqual('active', allocation.state) self.assertIsNone(allocation.last_error) self.assertIsNone(allocation.name) self.assertEqual({}, allocation.extra) allocation = self.conn.baremetal.update_allocation( allocation, name=name, extra={'answer': 42} ) self.assertEqual(name, allocation.name) self.assertEqual({'answer': 42}, allocation.extra) allocation = self.conn.baremetal.get_allocation(name) self.assertEqual(name, allocation.name) self.assertEqual({'answer': 42}, allocation.extra) self.conn.baremetal.delete_allocation(allocation, ignore_missing=False) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_allocation, allocation.id, ) def test_allocation_patch(self): name = 'ossdk-name2' allocation = self.create_allocation(resource_class=self.resource_class) allocation = self.conn.baremetal.wait_for_allocation(allocation) self.assertEqual('active', allocation.state) self.assertIsNone(allocation.last_error) self.assertIsNone(allocation.name) self.assertEqual({}, allocation.extra) allocation = self.conn.baremetal.patch_allocation( allocation, [ {'op': 'replace', 'path': '/name', 'value': name}, {'op': 'add', 'path': '/extra/answer', 'value': 42}, ], ) self.assertEqual(name, allocation.name) self.assertEqual({'answer': 42}, allocation.extra) allocation = self.conn.baremetal.get_allocation(name) self.assertEqual(name, allocation.name) self.assertEqual({'answer': 42}, allocation.extra) allocation = self.conn.baremetal.patch_allocation( allocation, [ {'op': 'remove', 'path': '/name'}, {'op': 'remove', 'path': '/extra/answer'}, ], ) self.assertIsNone(allocation.name) self.assertEqual({}, allocation.extra) allocation = self.conn.baremetal.get_allocation(allocation.id) self.assertIsNone(allocation.name) self.assertEqual({}, allocation.extra) self.conn.baremetal.delete_allocation(allocation, ignore_missing=False) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_allocation, allocation.id, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/baremetal/test_baremetal_chassis.py0000664000175000017500000000554300000000000030547 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack.tests.functional.baremetal import base class TestBareMetalChassis(base.BaseBaremetalTest): def test_chassis_create_get_delete(self): chassis = self.create_chassis() loaded = self.conn.baremetal.get_chassis(chassis.id) self.assertEqual(loaded.id, chassis.id) self.conn.baremetal.delete_chassis(chassis, ignore_missing=False) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_chassis, chassis.id, ) def test_chassis_update(self): chassis = self.create_chassis() chassis.extra = {'answer': 42} chassis = self.conn.baremetal.update_chassis(chassis) self.assertEqual({'answer': 42}, chassis.extra) chassis = self.conn.baremetal.get_chassis(chassis.id) self.assertEqual({'answer': 42}, chassis.extra) def test_chassis_patch(self): chassis = self.create_chassis() chassis = self.conn.baremetal.patch_chassis( chassis, dict(path='/extra/answer', op='add', value=42) ) self.assertEqual({'answer': 42}, chassis.extra) chassis = self.conn.baremetal.get_chassis(chassis.id) self.assertEqual({'answer': 42}, chassis.extra) def test_chassis_negative_non_existing(self): uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971" self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_chassis, uuid ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.find_chassis, uuid, ignore_missing=False, ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.delete_chassis, uuid, ignore_missing=False, ) self.assertIsNone(self.conn.baremetal.find_chassis(uuid)) self.assertIsNone(self.conn.baremetal.delete_chassis(uuid)) class TestBareMetalChassisFields(base.BaseBaremetalTest): min_microversion = '1.8' def test_chassis_fields(self): self.create_chassis(description='something') result = self.conn.baremetal.chassis(fields=['uuid', 'extra']) for ch in result: self.assertIsNotNone(ch.id) self.assertIsNone(ch.description) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/baremetal/test_baremetal_conductor.py0000664000175000017500000000222600000000000031105 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional.baremetal import base class TestBareMetalConductor(base.BaseBaremetalTest): min_microversion = '1.49' def test_list_get_conductor(self): node = self.create_node(name='node-name') conductors = self.conn.baremetal.conductors() hostname_list = [conductor.hostname for conductor in conductors] self.assertIn(node.conductor, hostname_list) conductor1 = self.conn.baremetal.get_conductor(node.conductor) self.assertIsNotNone(conductor1.conductor_group) self.assertIsNotNone(conductor1.links) self.assertTrue(conductor1.alive) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/baremetal/test_baremetal_deploy_templates.py0000664000175000017500000001407500000000000032464 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack.tests.functional.baremetal import base class TestBareMetalDeployTemplate(base.BaseBaremetalTest): min_microversion = '1.55' def setUp(self): super().setUp() def test_baremetal_deploy_create_get_delete(self): steps = [ { "interface": "bios", "step": "apply_configuration", "args": { "settings": [{"name": "LogicalProc", "value": "Enabled"}] }, "priority": 150, } ] deploy_template = self.create_deploy_template( name='CUSTOM_DEPLOY_TEMPLATE', steps=steps ) loaded = self.conn.baremetal.get_deploy_template(deploy_template.id) self.assertEqual(loaded.id, deploy_template.id) self.conn.baremetal.delete_deploy_template( deploy_template, ignore_missing=False ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_deploy_template, deploy_template.id, ) def test_baremetal_deploy_template_list(self): steps = [ { "interface": "bios", "step": "apply_configuration", "args": { "settings": [{"name": "LogicalProc", "value": "Enabled"}] }, "priority": 150, } ] deploy_template1 = self.create_deploy_template( name='CUSTOM_DEPLOY_TEMPLATE1', steps=steps ) deploy_template2 = self.create_deploy_template( name='CUSTOM_DEPLOY_TEMPLATE2', steps=steps ) deploy_templates = self.conn.baremetal.deploy_templates() ids = [template.id for template in deploy_templates] self.assertIn(deploy_template1.id, ids) self.assertIn(deploy_template2.id, ids) deploy_templates_with_details = self.conn.baremetal.deploy_templates( details=True ) for dp in deploy_templates_with_details: self.assertIsNotNone(dp.id) self.assertIsNotNone(dp.name) deploy_tempalte_with_fields = self.conn.baremetal.deploy_templates( fields=['uuid'] ) for dp in deploy_tempalte_with_fields: self.assertIsNotNone(dp.id) self.assertIsNone(dp.name) def test_baremetal_deploy_list_update_delete(self): steps = [ { "interface": "bios", "step": "apply_configuration", "args": { "settings": [{"name": "LogicalProc", "value": "Enabled"}] }, "priority": 150, } ] deploy_template = self.create_deploy_template( name='CUSTOM_DEPLOY_TEMPLATE4', steps=steps ) self.assertFalse(deploy_template.extra) deploy_template.extra = {'answer': 42} deploy_template = self.conn.baremetal.update_deploy_template( deploy_template ) self.assertEqual({'answer': 42}, deploy_template.extra) deploy_template = self.conn.baremetal.get_deploy_template( deploy_template.id ) self.conn.baremetal.delete_deploy_template( deploy_template.id, ignore_missing=False ) def test_baremetal_deploy_update(self): steps = [ { "interface": "bios", "step": "apply_configuration", "args": { "settings": [{"name": "LogicalProc", "value": "Enabled"}] }, "priority": 150, } ] deploy_template = self.create_deploy_template( name='CUSTOM_DEPLOY_TEMPLATE4', steps=steps ) deploy_template.extra = {'answer': 42} deploy_template = self.conn.baremetal.update_deploy_template( deploy_template ) self.assertEqual({'answer': 42}, deploy_template.extra) deploy_template = self.conn.baremetal.get_deploy_template( deploy_template.id ) self.assertEqual({'answer': 42}, deploy_template.extra) def test_deploy_template_patch(self): name = "CUSTOM_HYPERTHREADING_ON" steps = [ { "interface": "bios", "step": "apply_configuration", "args": { "settings": [{"name": "LogicalProc", "value": "Enabled"}] }, "priority": 150, } ] deploy_template = self.create_deploy_template(name=name, steps=steps) deploy_template = self.conn.baremetal.patch_deploy_template( deploy_template, dict(path='/extra/answer', op='add', value=42) ) self.assertEqual({'answer': 42}, deploy_template.extra) self.assertEqual(name, deploy_template.name) deploy_template = self.conn.baremetal.get_deploy_template( deploy_template.id ) self.assertEqual({'answer': 42}, deploy_template.extra) def test_deploy_template_negative_non_existing(self): uuid = "bbb45f41-d4bc-4307-8d1d-32f95ce1e920" self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_deploy_template, uuid, ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.delete_deploy_template, uuid, ignore_missing=False, ) self.assertIsNone(self.conn.baremetal.delete_deploy_template(uuid)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/baremetal/test_baremetal_driver.py0000664000175000017500000000440300000000000030377 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack.tests.functional.baremetal import base class TestBareMetalDriver(base.BaseBaremetalTest): def test_fake_hardware_get(self): driver = self.conn.baremetal.get_driver('fake-hardware') self.assertEqual('fake-hardware', driver.name) self.assertNotEqual([], driver.hosts) def test_fake_hardware_list(self): drivers = self.conn.baremetal.drivers() self.assertIn('fake-hardware', [d.name for d in drivers]) def test_driver_negative_non_existing(self): self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_driver, 'not-a-driver', ) class TestBareMetalDriverDetails(base.BaseBaremetalTest): min_microversion = '1.30' def test_fake_hardware_get(self): driver = self.conn.baremetal.get_driver('fake-hardware') self.assertEqual('fake-hardware', driver.name) for iface in ('boot', 'deploy', 'management', 'power'): self.assertIn( 'fake', getattr(driver, 'enabled_%s_interfaces' % iface) ) self.assertEqual( 'fake', getattr(driver, 'default_%s_interface' % iface) ) self.assertNotEqual([], driver.hosts) def test_fake_hardware_list_details(self): drivers = self.conn.baremetal.drivers(details=True) driver = [d for d in drivers if d.name == 'fake-hardware'][0] for iface in ('boot', 'deploy', 'management', 'power'): self.assertIn( 'fake', getattr(driver, 'enabled_%s_interfaces' % iface) ) self.assertEqual( 'fake', getattr(driver, 'default_%s_interface' % iface) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/baremetal/test_baremetal_node.py0000664000175000017500000004416400000000000030041 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import uuid from openstack import exceptions from openstack.tests.functional.baremetal import base class TestBareMetalNode(base.BaseBaremetalTest): def test_node_create_get_delete(self): node = self.create_node(name='node-name') self.assertEqual(node.name, 'node-name') self.assertEqual(node.driver, 'fake-hardware') self.assertEqual(node.provision_state, 'enroll') self.assertFalse(node.is_maintenance) # NOTE(dtantsur): get_node and find_node only differ in handing missing # nodes, otherwise they are identical. for call, ident in [ (self.conn.baremetal.get_node, self.node_id), (self.conn.baremetal.get_node, 'node-name'), (self.conn.baremetal.find_node, self.node_id), (self.conn.baremetal.find_node, 'node-name'), ]: found = call(ident) self.assertEqual(node.id, found.id) self.assertEqual(node.name, found.name) with_fields = self.conn.baremetal.get_node( 'node-name', fields=['uuid', 'driver', 'instance_id'] ) self.assertEqual(node.id, with_fields.id) self.assertEqual(node.driver, with_fields.driver) self.assertIsNone(with_fields.name) self.assertIsNone(with_fields.provision_state) nodes = self.conn.baremetal.nodes() self.assertIn(node.id, [n.id for n in nodes]) self.conn.baremetal.delete_node(node, ignore_missing=False) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_node, self.node_id, ) def test_node_create_in_available(self): node = self.create_node(name='node-name', provision_state='available') self.assertEqual(node.name, 'node-name') self.assertEqual(node.driver, 'fake-hardware') self.assertEqual(node.provision_state, 'available') self.conn.baremetal.delete_node(node, ignore_missing=False) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_node, self.node_id, ) def test_node_update(self): node = self.create_node(name='node-name', extra={'foo': 'bar'}) node.name = 'new-name' node.extra = {'answer': 42} instance_uuid = str(uuid.uuid4()) node = self.conn.baremetal.update_node(node, instance_id=instance_uuid) self.assertEqual('new-name', node.name) self.assertEqual({'answer': 42}, node.extra) self.assertEqual(instance_uuid, node.instance_id) node = self.conn.baremetal.get_node('new-name') self.assertEqual('new-name', node.name) self.assertEqual({'answer': 42}, node.extra) self.assertEqual(instance_uuid, node.instance_id) node = self.conn.baremetal.update_node(node, instance_id=None) self.assertIsNone(node.instance_id) node = self.conn.baremetal.get_node('new-name') self.assertIsNone(node.instance_id) def test_node_update_by_name(self): self.create_node(name='node-name', extra={'foo': 'bar'}) instance_uuid = str(uuid.uuid4()) node = self.conn.baremetal.update_node( 'node-name', instance_id=instance_uuid, extra={'answer': 42} ) self.assertEqual({'answer': 42}, node.extra) self.assertEqual(instance_uuid, node.instance_id) node = self.conn.baremetal.get_node('node-name') self.assertEqual({'answer': 42}, node.extra) self.assertEqual(instance_uuid, node.instance_id) node = self.conn.baremetal.update_node('node-name', instance_id=None) self.assertIsNone(node.instance_id) node = self.conn.baremetal.get_node('node-name') self.assertIsNone(node.instance_id) def test_node_patch(self): node = self.create_node(name='node-name', extra={'foo': 'bar'}) node.name = 'new-name' instance_uuid = str(uuid.uuid4()) node = self.conn.baremetal.patch_node( node, [ dict(path='/instance_id', op='replace', value=instance_uuid), dict(path='/extra/answer', op='add', value=42), ], ) self.assertEqual('new-name', node.name) self.assertEqual({'foo': 'bar', 'answer': 42}, node.extra) self.assertEqual(instance_uuid, node.instance_id) node = self.conn.baremetal.get_node('new-name') self.assertEqual('new-name', node.name) self.assertEqual({'foo': 'bar', 'answer': 42}, node.extra) self.assertEqual(instance_uuid, node.instance_id) node = self.conn.baremetal.patch_node( node, [ dict(path='/instance_id', op='remove'), dict(path='/extra/answer', op='remove'), ], ) self.assertIsNone(node.instance_id) self.assertNotIn('answer', node.extra) node = self.conn.baremetal.get_node('new-name') self.assertIsNone(node.instance_id) self.assertNotIn('answer', node.extra) def test_node_list_update_delete(self): self.create_node(name='node-name', extra={'foo': 'bar'}) node = next( n for n in self.conn.baremetal.nodes( details=True, provision_state='enroll', is_maintenance=False, associated=False, ) if n.name == 'node-name' ) self.assertEqual(node.extra, {'foo': 'bar'}) # This test checks that resources returned from listing are usable self.conn.baremetal.update_node(node, extra={'foo': 42}) self.conn.baremetal.delete_node(node, ignore_missing=False) def test_node_create_in_enroll_provide(self): node = self.create_node() self.node_id = node.id self.assertEqual(node.driver, 'fake-hardware') self.assertEqual(node.provision_state, 'enroll') self.assertIsNone(node.power_state) self.assertFalse(node.is_maintenance) self.conn.baremetal.set_node_provision_state(node, 'manage', wait=True) self.assertEqual(node.provision_state, 'manageable') self.conn.baremetal.set_node_provision_state( node, 'provide', wait=True ) self.assertEqual(node.provision_state, 'available') def test_node_create_in_enroll_provide_by_name(self): name = 'node-%d' % random.randint(0, 1000) node = self.create_node(name=name) self.node_id = node.id self.assertEqual(node.driver, 'fake-hardware') self.assertEqual(node.provision_state, 'enroll') self.assertIsNone(node.power_state) self.assertFalse(node.is_maintenance) node = self.conn.baremetal.set_node_provision_state( name, 'manage', wait=True ) self.assertEqual(node.provision_state, 'manageable') node = self.conn.baremetal.set_node_provision_state( name, 'provide', wait=True ) self.assertEqual(node.provision_state, 'available') def test_node_power_state(self): node = self.create_node() self.assertIsNone(node.power_state) self.conn.baremetal.set_node_power_state(node, 'power on', wait=True) node = self.conn.baremetal.get_node(node.id) self.assertEqual('power on', node.power_state) self.conn.baremetal.set_node_power_state(node, 'power off', wait=True) node = self.conn.baremetal.get_node(node.id) self.assertEqual('power off', node.power_state) def test_node_validate(self): node = self.create_node() # Fake hardware passes validation for all interfaces result = self.conn.baremetal.validate_node(node) for iface in ('boot', 'deploy', 'management', 'power'): self.assertTrue(result[iface].result) self.assertFalse(result[iface].reason) def test_node_negative_non_existing(self): uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971" self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_node, uuid ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.find_node, uuid, ignore_missing=False, ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.delete_node, uuid, ignore_missing=False, ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.update_node, uuid, name='new-name', ) self.assertIsNone(self.conn.baremetal.find_node(uuid)) self.assertIsNone(self.conn.baremetal.delete_node(uuid)) def test_maintenance(self): reason = "Prepating for taking over the world" node = self.create_node() self.assertFalse(node.is_maintenance) self.assertIsNone(node.maintenance_reason) # Initial setting without the reason node = self.conn.baremetal.set_node_maintenance(node) self.assertTrue(node.is_maintenance) self.assertIsNone(node.maintenance_reason) # Updating the reason later node = self.conn.baremetal.set_node_maintenance(node, reason) self.assertTrue(node.is_maintenance) self.assertEqual(reason, node.maintenance_reason) # Removing the reason later node = self.conn.baremetal.set_node_maintenance(node) self.assertTrue(node.is_maintenance) self.assertIsNone(node.maintenance_reason) # Unsetting maintenance node = self.conn.baremetal.unset_node_maintenance(node) self.assertFalse(node.is_maintenance) self.assertIsNone(node.maintenance_reason) # Initial setting with the reason node = self.conn.baremetal.set_node_maintenance(node, reason) self.assertTrue(node.is_maintenance) self.assertEqual(reason, node.maintenance_reason) def test_maintenance_via_update(self): reason = "Prepating for taking over the world" node = self.create_node() # Initial setting without the reason node = self.conn.baremetal.update_node(node, is_maintenance=True) self.assertTrue(node.is_maintenance) self.assertIsNone(node.maintenance_reason) # Make sure the change has effect on the remote side. node = self.conn.baremetal.get_node(node.id) self.assertTrue(node.is_maintenance) self.assertIsNone(node.maintenance_reason) # Updating the reason later node = self.conn.baremetal.update_node(node, maintenance_reason=reason) self.assertTrue(node.is_maintenance) self.assertEqual(reason, node.maintenance_reason) # Make sure the change has effect on the remote side. node = self.conn.baremetal.get_node(node.id) self.assertTrue(node.is_maintenance) self.assertEqual(reason, node.maintenance_reason) # Unsetting maintenance node = self.conn.baremetal.update_node(node, is_maintenance=False) self.assertFalse(node.is_maintenance) self.assertIsNone(node.maintenance_reason) # Make sure the change has effect on the remote side. node = self.conn.baremetal.get_node(node.id) self.assertFalse(node.is_maintenance) self.assertIsNone(node.maintenance_reason) # Initial setting with the reason node = self.conn.baremetal.update_node( node, is_maintenance=True, maintenance_reason=reason ) self.assertTrue(node.is_maintenance) self.assertEqual(reason, node.maintenance_reason) # Make sure the change has effect on the remote side. node = self.conn.baremetal.get_node(node.id) self.assertTrue(node.is_maintenance) self.assertEqual(reason, node.maintenance_reason) class TestNodeRetired(base.BaseBaremetalTest): min_microversion = '1.61' def test_retired(self): reason = "I'm too old for this s...tuff!" node = self.create_node() # Set retired without reason node = self.conn.baremetal.update_node(node, is_retired=True) self.assertTrue(node.is_retired) self.assertIsNone(node.retired_reason) # Verify set retired on server side node = self.conn.baremetal.get_node(node.id) self.assertTrue(node.is_retired) self.assertIsNone(node.retired_reason) # Add the reason node = self.conn.baremetal.update_node(node, retired_reason=reason) self.assertTrue(node.is_retired) self.assertEqual(reason, node.retired_reason) # Verify the reason on server side node = self.conn.baremetal.get_node(node.id) self.assertTrue(node.is_retired) self.assertEqual(reason, node.retired_reason) # Unset retired node = self.conn.baremetal.update_node(node, is_retired=False) self.assertFalse(node.is_retired) self.assertIsNone(node.retired_reason) # Verify on server side node = self.conn.baremetal.get_node(node.id) self.assertFalse(node.is_retired) self.assertIsNone(node.retired_reason) # Set retired with reason node = self.conn.baremetal.update_node( node, is_retired=True, retired_reason=reason ) self.assertTrue(node.is_retired) self.assertEqual(reason, node.retired_reason) # Verify on server side node = self.conn.baremetal.get_node(node.id) self.assertTrue(node.is_retired) self.assertEqual(reason, node.retired_reason) def test_retired_in_available(self): node = self.create_node(provision_state='available') # Set retired when node state available should fail! self.assertRaises( exceptions.ConflictException, self.conn.baremetal.update_node, node, is_retired=True, ) class TestBareMetalNodeFields(base.BaseBaremetalTest): min_microversion = '1.8' def test_node_fields(self): self.create_node() result = self.conn.baremetal.nodes( fields=['uuid', 'name', 'instance_id'] ) for item in result: self.assertIsNotNone(item.id) self.assertIsNone(item.driver) class TestBareMetalVif(base.BaseBaremetalTest): min_microversion = '1.28' def setUp(self): super().setUp() self.node = self.create_node(network_interface='noop') self.vif_id = "200712fc-fdfb-47da-89a6-2d19f76c7618" def test_node_vif_attach_detach(self): self.conn.baremetal.attach_vif_to_node(self.node, self.vif_id) # NOTE(dtantsur): The noop networking driver is completely noop - the # VIF list does not return anything of value. self.conn.baremetal.list_node_vifs(self.node) res = self.conn.baremetal.detach_vif_from_node( self.node, self.vif_id, ignore_missing=False ) self.assertTrue(res) def test_node_vif_negative(self): uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971" self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.attach_vif_to_node, uuid, self.vif_id, ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.list_node_vifs, uuid, ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.detach_vif_from_node, uuid, self.vif_id, ignore_missing=False, ) class TestTraits(base.BaseBaremetalTest): min_microversion = '1.37' def setUp(self): super().setUp() self.node = self.create_node() def test_add_remove_node_trait(self): node = self.conn.baremetal.get_node(self.node) self.assertEqual([], node.traits) self.conn.baremetal.add_node_trait(self.node, 'CUSTOM_FAKE') self.assertEqual(['CUSTOM_FAKE'], self.node.traits) node = self.conn.baremetal.get_node(self.node) self.assertEqual(['CUSTOM_FAKE'], node.traits) self.conn.baremetal.add_node_trait(self.node, 'CUSTOM_REAL') self.assertEqual( sorted(['CUSTOM_FAKE', 'CUSTOM_REAL']), sorted(self.node.traits) ) node = self.conn.baremetal.get_node(self.node) self.assertEqual( sorted(['CUSTOM_FAKE', 'CUSTOM_REAL']), sorted(node.traits) ) self.conn.baremetal.remove_node_trait( node, 'CUSTOM_FAKE', ignore_missing=False ) self.assertEqual(['CUSTOM_REAL'], self.node.traits) node = self.conn.baremetal.get_node(self.node) self.assertEqual(['CUSTOM_REAL'], node.traits) def test_set_node_traits(self): node = self.conn.baremetal.get_node(self.node) self.assertEqual([], node.traits) traits1 = ['CUSTOM_FAKE', 'CUSTOM_REAL'] traits2 = ['CUSTOM_FOOBAR'] self.conn.baremetal.set_node_traits(self.node, traits1) self.assertEqual(sorted(traits1), sorted(self.node.traits)) node = self.conn.baremetal.get_node(self.node) self.assertEqual(sorted(traits1), sorted(node.traits)) self.conn.baremetal.set_node_traits(self.node, traits2) self.assertEqual(['CUSTOM_FOOBAR'], self.node.traits) node = self.conn.baremetal.get_node(self.node) self.assertEqual(['CUSTOM_FOOBAR'], node.traits) class TestBareMetalNodeListFirmware(base.BaseBaremetalTest): min_microversion = '1.86' def test_list_firmware(self): node = self.create_node(firmware_interface="no-firmware") self.assertEqual("no-firmware", node.firmware_interface) result = self.conn.baremetal.list_node_firmware(node) self.assertEqual({'firmware': []}, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/baremetal/test_baremetal_port.py0000664000175000017500000001213000000000000030064 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack.tests.functional.baremetal import base class TestBareMetalPort(base.BaseBaremetalTest): def setUp(self): super().setUp() self.node = self.create_node() def test_port_create_get_delete(self): port = self.create_port(address='11:22:33:44:55:66') self.assertEqual(self.node_id, port.node_id) # Can be None if the microversion is too small, so we make sure it is # not False. self.assertNotEqual(port.is_pxe_enabled, False) self.assertIsNone(port.port_group_id) loaded = self.conn.baremetal.get_port(port.id) self.assertEqual(loaded.id, port.id) self.assertIsNotNone(loaded.address) with_fields = self.conn.baremetal.get_port( port.id, fields=['uuid', 'extra', 'node_id'] ) self.assertEqual(port.id, with_fields.id) self.assertIsNone(with_fields.address) self.conn.baremetal.delete_port(port, ignore_missing=False) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_port, port.id ) def test_port_list(self): node2 = self.create_node(name='test-node') port1 = self.create_port(address='11:22:33:44:55:66', node_id=node2.id) port2 = self.create_port( address='11:22:33:44:55:77', node_id=self.node.id ) ports = self.conn.baremetal.ports(address='11:22:33:44:55:77') self.assertEqual([p.id for p in ports], [port2.id]) ports = self.conn.baremetal.ports(node=node2.id) self.assertEqual([p.id for p in ports], [port1.id]) ports = self.conn.baremetal.ports(node='test-node') self.assertEqual([p.id for p in ports], [port1.id]) def test_port_list_update_delete(self): self.create_port( address='11:22:33:44:55:66', node_id=self.node.id, extra={'foo': 'bar'}, ) port = next( self.conn.baremetal.ports( details=True, address='11:22:33:44:55:66' ) ) self.assertEqual(port.extra, {'foo': 'bar'}) # This test checks that resources returned from listing are usable self.conn.baremetal.update_port(port, extra={'foo': 42}) self.conn.baremetal.delete_port(port, ignore_missing=False) def test_port_update(self): port = self.create_port(address='11:22:33:44:55:66') port.address = '66:55:44:33:22:11' port.extra = {'answer': 42} port = self.conn.baremetal.update_port(port) self.assertEqual('66:55:44:33:22:11', port.address) self.assertEqual({'answer': 42}, port.extra) port = self.conn.baremetal.get_port(port.id) self.assertEqual('66:55:44:33:22:11', port.address) self.assertEqual({'answer': 42}, port.extra) def test_port_patch(self): port = self.create_port(address='11:22:33:44:55:66') port.address = '66:55:44:33:22:11' port = self.conn.baremetal.patch_port( port, dict(path='/extra/answer', op='add', value=42) ) self.assertEqual('66:55:44:33:22:11', port.address) self.assertEqual({'answer': 42}, port.extra) port = self.conn.baremetal.get_port(port.id) self.assertEqual('66:55:44:33:22:11', port.address) self.assertEqual({'answer': 42}, port.extra) def test_port_negative_non_existing(self): uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971" self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_port, uuid ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.find_port, uuid, ignore_missing=False, ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.delete_port, uuid, ignore_missing=False, ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.update_port, uuid, pxe_enabled=True, ) self.assertIsNone(self.conn.baremetal.find_port(uuid)) self.assertIsNone(self.conn.baremetal.delete_port(uuid)) class TestBareMetalPortFields(base.BaseBaremetalTest): min_microversion = '1.8' def test_port_fields(self): self.create_node() self.create_port(address='11:22:33:44:55:66') result = self.conn.baremetal.ports(fields=['uuid', 'node_id']) for item in result: self.assertIsNotNone(item.id) self.assertIsNone(item.address) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/baremetal/test_baremetal_port_group.py0000664000175000017500000001106600000000000031307 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack.tests.functional.baremetal import base class TestBareMetalPortGroup(base.BaseBaremetalTest): min_microversion = '1.23' def setUp(self): super().setUp() self.node = self.create_node() def test_port_group_create_get_delete(self): port_group = self.create_port_group() loaded = self.conn.baremetal.get_port_group(port_group.id) self.assertEqual(loaded.id, port_group.id) self.assertIsNotNone(loaded.node_id) with_fields = self.conn.baremetal.get_port_group( port_group.id, fields=['uuid', 'extra'] ) self.assertEqual(port_group.id, with_fields.id) self.assertIsNone(with_fields.node_id) self.conn.baremetal.delete_port_group(port_group, ignore_missing=False) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_port_group, port_group.id, ) def test_port_list(self): node2 = self.create_node(name='test-node') pg1 = self.create_port_group( address='11:22:33:44:55:66', node_id=node2.id ) pg2 = self.create_port_group( address='11:22:33:44:55:77', node_id=self.node.id ) pgs = self.conn.baremetal.port_groups(address='11:22:33:44:55:77') self.assertEqual([p.id for p in pgs], [pg2.id]) pgs = self.conn.baremetal.port_groups(node=node2.id) self.assertEqual([p.id for p in pgs], [pg1.id]) pgs = self.conn.baremetal.port_groups(node='test-node') self.assertEqual([p.id for p in pgs], [pg1.id]) def test_port_list_update_delete(self): self.create_port_group( address='11:22:33:44:55:66', extra={'foo': 'bar'} ) port_group = next( self.conn.baremetal.port_groups( details=True, address='11:22:33:44:55:66' ) ) self.assertEqual(port_group.extra, {'foo': 'bar'}) # This test checks that resources returned from listing are usable self.conn.baremetal.update_port_group(port_group, extra={'foo': 42}) self.conn.baremetal.delete_port_group(port_group, ignore_missing=False) def test_port_group_update(self): port_group = self.create_port_group() port_group.extra = {'answer': 42} port_group = self.conn.baremetal.update_port_group(port_group) self.assertEqual({'answer': 42}, port_group.extra) port_group = self.conn.baremetal.get_port_group(port_group.id) self.assertEqual({'answer': 42}, port_group.extra) def test_port_group_patch(self): port_group = self.create_port_group() port_group = self.conn.baremetal.patch_port_group( port_group, dict(path='/extra/answer', op='add', value=42) ) self.assertEqual({'answer': 42}, port_group.extra) port_group = self.conn.baremetal.get_port_group(port_group.id) self.assertEqual({'answer': 42}, port_group.extra) def test_port_group_negative_non_existing(self): uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971" self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_port_group, uuid, ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.find_port_group, uuid, ignore_missing=False, ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.delete_port_group, uuid, ignore_missing=False, ) self.assertIsNone(self.conn.baremetal.find_port_group(uuid)) self.assertIsNone(self.conn.baremetal.delete_port_group(uuid)) def test_port_group_fields(self): self.create_node() self.create_port_group(address='11:22:33:44:55:66') result = self.conn.baremetal.port_groups(fields=['uuid', 'name']) for item in result: self.assertIsNotNone(item.id) self.assertIsNone(item.address) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/baremetal/test_baremetal_volume_connector.py0000664000175000017500000001610100000000000032463 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack.tests.functional.baremetal import base class TestBareMetalVolumeconnector(base.BaseBaremetalTest): min_microversion = '1.32' def setUp(self): super().setUp() self.node = self.create_node(provision_state='enroll') def test_volume_connector_create_get_delete(self): self.conn.baremetal.set_node_provision_state( self.node, 'manage', wait=True ) self.conn.baremetal.set_node_power_state(self.node, 'power off') volume_connector = self.create_volume_connector( connector_id='iqn.2017-07.org.openstack:01:d9a51732c3f', type='iqn' ) loaded = self.conn.baremetal.get_volume_connector(volume_connector.id) self.assertEqual(loaded.id, volume_connector.id) self.assertIsNotNone(loaded.node_id) with_fields = self.conn.baremetal.get_volume_connector( volume_connector.id, fields=['uuid', 'extra'] ) self.assertEqual(volume_connector.id, with_fields.id) self.assertIsNone(with_fields.node_id) self.conn.baremetal.delete_volume_connector( volume_connector, ignore_missing=False ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_volume_connector, volume_connector.id, ) def test_volume_connector_list(self): node2 = self.create_node(name='test-node') self.conn.baremetal.set_node_provision_state( node2, 'manage', wait=True ) self.conn.baremetal.set_node_power_state(node2, 'power off') self.conn.baremetal.set_node_provision_state( self.node, 'manage', wait=True ) self.conn.baremetal.set_node_power_state(self.node, 'power off') vc1 = self.create_volume_connector( connector_id='iqn.2018-07.org.openstack:01:d9a514g2c32', node_id=node2.id, type='iqn', ) vc2 = self.create_volume_connector( connector_id='iqn.2017-07.org.openstack:01:d9a51732c4g', node_id=self.node.id, type='iqn', ) vcs = self.conn.baremetal.volume_connectors(node=self.node.id) self.assertEqual([v.id for v in vcs], [vc2.id]) vcs = self.conn.baremetal.volume_connectors(node=node2.id) self.assertEqual([v.id for v in vcs], [vc1.id]) vcs = self.conn.baremetal.volume_connectors(node='test-node') self.assertEqual([v.id for v in vcs], [vc1.id]) def test_volume_connector_list_update_delete(self): self.conn.baremetal.set_node_provision_state( self.node, 'manage', wait=True ) self.conn.baremetal.set_node_power_state(self.node, 'power off') self.create_volume_connector( connector_id='iqn.2020-07.org.openstack:02:d9451472ce2', node_id=self.node.id, type='iqn', extra={'foo': 'bar'}, ) volume_connector = next( self.conn.baremetal.volume_connectors( details=True, node=self.node.id ) ) self.assertEqual(volume_connector.extra, {'foo': 'bar'}) # This test checks that resources returned from listing are usable self.conn.baremetal.update_volume_connector( volume_connector, extra={'foo': 42} ) self.conn.baremetal.delete_volume_connector( volume_connector, ignore_missing=False ) def test_volume_connector_update(self): self.conn.baremetal.set_node_provision_state( self.node, 'manage', wait=True ) self.conn.baremetal.set_node_power_state(self.node, 'power off') volume_connector = self.create_volume_connector( connector_id='iqn.2019-07.org.openstack:03:de45b472c40', node_id=self.node.id, type='iqn', ) volume_connector.extra = {'answer': 42} volume_connector = self.conn.baremetal.update_volume_connector( volume_connector ) self.assertEqual({'answer': 42}, volume_connector.extra) volume_connector = self.conn.baremetal.get_volume_connector( volume_connector.id ) self.assertEqual({'answer': 42}, volume_connector.extra) def test_volume_connector_patch(self): vol_conn_id = 'iqn.2020-07.org.openstack:04:de45b472c40' self.conn.baremetal.set_node_provision_state( self.node, 'manage', wait=True ) self.conn.baremetal.set_node_power_state(self.node, 'power off') volume_connector = self.create_volume_connector( connector_id=vol_conn_id, node_id=self.node.id, type='iqn' ) volume_connector = self.conn.baremetal.patch_volume_connector( volume_connector, dict(path='/extra/answer', op='add', value=42) ) self.assertEqual({'answer': 42}, volume_connector.extra) self.assertEqual(vol_conn_id, volume_connector.connector_id) volume_connector = self.conn.baremetal.get_volume_connector( volume_connector.id ) self.assertEqual({'answer': 42}, volume_connector.extra) def test_volume_connector_negative_non_existing(self): uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971" self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_volume_connector, uuid, ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.find_volume_connector, uuid, ignore_missing=False, ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.delete_volume_connector, uuid, ignore_missing=False, ) self.assertIsNone(self.conn.baremetal.find_volume_connector(uuid)) self.assertIsNone(self.conn.baremetal.delete_volume_connector(uuid)) def test_volume_connector_fields(self): self.create_node() self.conn.baremetal.set_node_provision_state( self.node, 'manage', wait=True ) self.conn.baremetal.set_node_power_state(self.node, 'power off') self.create_volume_connector( connector_id='iqn.2018-08.org.openstack:04:de45f37c48', node_id=self.node.id, type='iqn', ) result = self.conn.baremetal.volume_connectors( fields=['uuid', 'node_id'] ) for item in result: self.assertIsNotNone(item.id) self.assertIsNone(item.connector_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/baremetal/test_baremetal_volume_target.py0000664000175000017500000001664100000000000031770 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack.tests.functional.baremetal import base class TestBareMetalVolumetarget(base.BaseBaremetalTest): min_microversion = '1.32' def setUp(self): super().setUp() self.node = self.create_node(provision_state='enroll') def test_volume_target_create_get_delete(self): self.conn.baremetal.set_node_provision_state( self.node, 'manage', wait=True ) self.conn.baremetal.set_node_power_state(self.node, 'power off') volume_target = self.create_volume_target( boot_index=0, volume_id='04452bed-5367-4202-8bf5-de4335ac56d2', volume_type='iscsi', ) loaded = self.conn.baremetal.get_volume_target(volume_target.id) self.assertEqual(loaded.id, volume_target.id) self.assertIsNotNone(loaded.node_id) with_fields = self.conn.baremetal.get_volume_target( volume_target.id, fields=['uuid', 'extra'] ) self.assertEqual(volume_target.id, with_fields.id) self.assertIsNone(with_fields.node_id) self.conn.baremetal.delete_volume_target( volume_target, ignore_missing=False ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_volume_target, volume_target.id, ) def test_volume_target_list(self): node2 = self.create_node(name='test-node') self.conn.baremetal.set_node_provision_state( node2, 'manage', wait=True ) self.conn.baremetal.set_node_power_state(node2, 'power off') self.conn.baremetal.set_node_provision_state( self.node, 'manage', wait=True ) self.conn.baremetal.set_node_power_state(self.node, 'power off') vt1 = self.create_volume_target( boot_index=0, volume_id='bd4d008c-7d31-463d-abf9-6c23d9d55f7f', node_id=node2.id, volume_type='iscsi', ) vt2 = self.create_volume_target( boot_index=0, volume_id='04452bed-5367-4202-8bf5-de4335ac57c2', node_id=self.node.id, volume_type='iscsi', ) vts = self.conn.baremetal.volume_targets(node=self.node.id) self.assertEqual([v.id for v in vts], [vt2.id]) vts = self.conn.baremetal.volume_targets(node=node2.id) self.assertEqual([v.id for v in vts], [vt1.id]) vts = self.conn.baremetal.volume_targets(node='test-node') self.assertEqual([v.id for v in vts], [vt1.id]) vts_with_details = self.conn.baremetal.volume_targets(details=True) for i in vts_with_details: self.assertIsNotNone(i.id) self.assertIsNotNone(i.volume_type) vts_with_fields = self.conn.baremetal.volume_targets( fields=['uuid', 'node_uuid'] ) for i in vts_with_fields: self.assertIsNotNone(i.id) self.assertIsNone(i.volume_type) self.assertIsNotNone(i.node_id) def test_volume_target_list_update_delete(self): self.conn.baremetal.set_node_provision_state( self.node, 'manage', wait=True ) self.conn.baremetal.set_node_power_state(self.node, 'power off') self.create_volume_target( boot_index=0, volume_id='04452bed-5367-4202-8bf5-de4335ac57h3', node_id=self.node.id, volume_type='iscsi', extra={'foo': 'bar'}, ) volume_target = next( self.conn.baremetal.volume_targets(details=True, node=self.node.id) ) self.assertEqual(volume_target.extra, {'foo': 'bar'}) # This test checks that resources returned from listing are usable self.conn.baremetal.update_volume_target( volume_target, extra={'foo': 42} ) self.conn.baremetal.delete_volume_target( volume_target, ignore_missing=False ) def test_volume_target_update(self): self.conn.baremetal.set_node_provision_state( self.node, 'manage', wait=True ) self.conn.baremetal.set_node_power_state(self.node, 'power off') volume_target = self.create_volume_target( boot_index=0, volume_id='04452bed-5367-4202-8bf5-de4335ac53h7', node_id=self.node.id, volume_type='isci', ) volume_target.extra = {'answer': 42} volume_target = self.conn.baremetal.update_volume_target(volume_target) self.assertEqual({'answer': 42}, volume_target.extra) volume_target = self.conn.baremetal.get_volume_target(volume_target.id) self.assertEqual({'answer': 42}, volume_target.extra) def test_volume_target_patch(self): vol_targ_id = '04452bed-5367-4202-9cg6-de4335ac53h7' self.conn.baremetal.set_node_provision_state( self.node, 'manage', wait=True ) self.conn.baremetal.set_node_power_state(self.node, 'power off') volume_target = self.create_volume_target( boot_index=0, volume_id=vol_targ_id, node_id=self.node.id, volume_type='isci', ) volume_target = self.conn.baremetal.patch_volume_target( volume_target, dict(path='/extra/answer', op='add', value=42) ) self.assertEqual({'answer': 42}, volume_target.extra) self.assertEqual(vol_targ_id, volume_target.volume_id) volume_target = self.conn.baremetal.get_volume_target(volume_target.id) self.assertEqual({'answer': 42}, volume_target.extra) def test_volume_target_negative_non_existing(self): uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971" self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.get_volume_target, uuid, ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.find_volume_target, uuid, ignore_missing=False, ) self.assertRaises( exceptions.NotFoundException, self.conn.baremetal.delete_volume_target, uuid, ignore_missing=False, ) self.assertIsNone(self.conn.baremetal.find_volume_target(uuid)) self.assertIsNone(self.conn.baremetal.delete_volume_target(uuid)) def test_volume_target_fields(self): self.create_node() self.conn.baremetal.set_node_provision_state( self.node, 'manage', wait=True ) self.conn.baremetal.set_node_power_state(self.node, 'power off') self.create_volume_target( boot_index=0, volume_id='04452bed-5367-4202-8bf5-99ae634d8971', node_id=self.node.id, volume_type='iscsi', ) result = self.conn.baremetal.volume_targets(fields=['uuid', 'node_id']) for item in result: self.assertIsNotNone(item.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/base.py0000664000175000017500000002160200000000000023007 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator import os import time import uuid from keystoneauth1 import discover import openstack.config from openstack import connection from openstack.tests import base #: Defines the OpenStack Client Config (OCC) cloud key in your OCC config #: file, typically in $HOME/.config/openstack/clouds.yaml. That configuration #: will determine where the functional tests will be run and what resource #: defaults will be used to run the functional tests. TEST_CONFIG = openstack.config.OpenStackConfig() TEST_CLOUD_NAME = os.getenv('OS_CLOUD', 'devstack-admin') TEST_CLOUD_REGION = openstack.config.get_cloud_region(cloud=TEST_CLOUD_NAME) def _get_resource_value(resource_key): return TEST_CONFIG.get_extra_config('functional').get(resource_key) def _disable_keep_alive(conn): sess = conn.config.get_session() sess.keep_alive = False class BaseFunctionalTest(base.TestCase): user_cloud: connection.Connection user_cloud_alt: connection.Connection operator_cloud: connection.Connection _wait_for_timeout_key = '' def setUp(self): super().setUp() self.conn = connection.Connection(config=TEST_CLOUD_REGION) _disable_keep_alive(self.conn) self._demo_name = os.environ.get('OPENSTACKSDK_DEMO_CLOUD', 'devstack') if not self._demo_name: raise self.failureException( "OPENSTACKSDK_OPERATOR_CLOUD must be set to a non-empty value" ) self._demo_name_alt = os.environ.get( 'OPENSTACKSDK_DEMO_CLOUD_ALT', 'devstack-alt', ) if not self._demo_name_alt: raise self.failureException( "OPENSTACKSDK_OPERATOR_CLOUD must be set to a non-empty value" ) self._op_name = os.environ.get( 'OPENSTACKSDK_OPERATOR_CLOUD', 'devstack-admin', ) if not self._op_name: raise self.failureException( "OPENSTACKSDK_OPERATOR_CLOUD must be set to a non-empty value" ) self.config = openstack.config.OpenStackConfig() self._set_user_cloud() self._set_operator_cloud() self.identity_version = self.user_cloud.config.get_api_version( 'identity' ) self.flavor = self._pick_flavor() self.image = self._pick_image() # Defines default timeout for wait_for methods used # in the functional tests self._wait_for_timeout = int( os.getenv( self._wait_for_timeout_key, os.getenv('OPENSTACKSDK_FUNC_TEST_TIMEOUT', 300), ) ) def _set_user_cloud(self, **kwargs): user_config = self.config.get_one(cloud=self._demo_name, **kwargs) self.user_cloud = connection.Connection(config=user_config) _disable_keep_alive(self.user_cloud) user_config_alt = self.config.get_one( cloud=self._demo_name_alt, **kwargs ) self.user_cloud_alt = connection.Connection(config=user_config_alt) _disable_keep_alive(self.user_cloud_alt) def _set_operator_cloud(self, **kwargs): operator_config = self.config.get_one(cloud=self._op_name, **kwargs) self.operator_cloud = connection.Connection(config=operator_config) _disable_keep_alive(self.operator_cloud) def _pick_flavor(self): """Pick a sensible flavor to run tests with. This returns None if the compute service is not present (e.g. ironic-only deployments). """ if not self.user_cloud.has_service('compute'): return None flavors = self.user_cloud.list_flavors(get_extra=False) # self.add_info_on_exception('flavors', flavors) flavor_name = os.environ.get('OPENSTACKSDK_FLAVOR') if not flavor_name: flavor_name = _get_resource_value('flavor_name') if flavor_name: for flavor in flavors: if flavor.name == flavor_name: return flavor raise self.failureException( "Cloud does not have flavor '%s'", flavor_name, ) # Enable running functional tests against RAX, which requires # performance flavors be used for boot from volume for flavor in sorted(flavors, key=operator.attrgetter('ram')): if 'performance' in flavor.name: return flavor # Otherwise, pick the smallest flavor with a ephemeral disk configured for flavor in sorted(flavors, key=operator.attrgetter('ram')): if flavor.disk: return flavor raise self.failureException('No sensible flavor found') def _pick_image(self): """Pick a sensible image to run tests with. This returns None if the image service is not present. """ if not self.user_cloud.has_service('image'): return None images = self.user_cloud.list_images() # self.add_info_on_exception('images', images) image_name = os.environ.get('OPENSTACKSDK_IMAGE') if not image_name: image_name = _get_resource_value('image_name') if image_name: for image in images: if image.name == image_name: return image raise self.failureException( "Cloud does not have image '%s'", image_name, ) for image in images: if image.name.startswith('cirros') and image.name.endswith('-uec'): return image for image in images: if ( image.name.startswith('cirros') and image.disk_format == 'qcow2' ): return image for image in images: if image.name.lower().startswith('ubuntu'): return image for image in images: if image.name.lower().startswith('centos'): return image raise self.failureException('No sensible image found') def addEmptyCleanup(self, func, *args, **kwargs): def cleanup(): result = func(*args, **kwargs) self.assertIsNone(result) self.addCleanup(cleanup) def require_service(self, service_type, min_microversion=None, **kwargs): """Method to check whether a service exists Usage:: class TestMeter(base.BaseFunctionalTest): def setUp(self): super(TestMeter, self).setUp() self.require_service('metering') :returns: True if the service exists, otherwise False. """ if not self.conn.has_service(service_type): self.skipTest( 'Service {service_type} not found in cloud'.format( service_type=service_type ) ) if not min_microversion: return data = self.conn.session.get_endpoint_data( service_type=service_type, **kwargs ) if not ( data.min_microversion and data.max_microversion and discover.version_between( data.min_microversion, data.max_microversion, min_microversion, ) ): self.skipTest( f'Service {service_type} does not provide microversion ' f'{min_microversion}' ) def getUniqueString(self, prefix=None): """Generate unique resource name""" # Globally unique names can only rely on some form of uuid # unix_t is also used to easier determine orphans when running real # functional tests on a real cloud return (prefix if prefix else '') + "{time}-{uuid}".format( time=int(time.time()), uuid=uuid.uuid4().hex ) class KeystoneBaseFunctionalTest(BaseFunctionalTest): def setUp(self): super().setUp() use_keystone_v2 = os.environ.get('OPENSTACKSDK_USE_KEYSTONE_V2', False) if use_keystone_v2: # keystone v2 has special behavior for the admin # interface and some of the operations, so make a new cloud # object with interface set to admin. # We only do it for keystone tests on v2 because otherwise # the admin interface is not a thing that wants to actually # be used self._set_operator_cloud(interface='admin') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3373578 openstacksdk-4.0.0/openstack/tests/functional/block_storage/0000775000175000017500000000000000000000000024340 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/__init__.py0000664000175000017500000000000000000000000026437 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3413596 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v2/0000775000175000017500000000000000000000000024667 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v2/__init__.py0000664000175000017500000000000000000000000026766 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v2/base.py0000664000175000017500000000200700000000000026152 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class BaseBlockStorageTest(base.BaseFunctionalTest): _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_BLOCK_STORAGE' def setUp(self): super().setUp() self._set_user_cloud(block_storage_api_version='2') self._set_operator_cloud(block_storage_api_version='2') if not self.user_cloud.has_service('block-storage', '2'): self.skipTest('block-storage service not supported by cloud') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v2/test_backup.py0000664000175000017500000000465200000000000027554 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v2 import backup as _backup from openstack.block_storage.v2 import volume as _volume from openstack.tests.functional.block_storage.v2 import base class TestBackup(base.BaseBlockStorageTest): def setUp(self): super().setUp() if not self.user_cloud.has_service('object-store'): self.skipTest('Object service is requred, but not available') self.VOLUME_NAME = self.getUniqueString() self.VOLUME_ID = None self.BACKUP_NAME = self.getUniqueString() self.BACKUP_ID = None volume = self.user_cloud.block_storage.create_volume( name=self.VOLUME_NAME, size=1 ) self.user_cloud.block_storage.wait_for_status( volume, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) assert isinstance(volume, _volume.Volume) self.VOLUME_ID = volume.id backup = self.user_cloud.block_storage.create_backup( name=self.BACKUP_NAME, volume_id=volume.id ) self.user_cloud.block_storage.wait_for_status( backup, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) assert isinstance(backup, _backup.Backup) self.assertEqual(self.BACKUP_NAME, backup.name) self.BACKUP_ID = backup.id def tearDown(self): sot = self.user_cloud.block_storage.delete_backup( self.BACKUP_ID, ignore_missing=False ) sot = self.user_cloud.block_storage.delete_volume( self.VOLUME_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_get(self): sot = self.user_cloud.block_storage.get_backup(self.BACKUP_ID) self.assertEqual(self.BACKUP_NAME, sot.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v2/test_snapshot.py0000664000175000017500000000516000000000000030141 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v2 import snapshot as _snapshot from openstack.block_storage.v2 import volume as _volume from openstack.tests.functional.block_storage.v2 import base class TestSnapshot(base.BaseBlockStorageTest): def setUp(self): super().setUp() self.SNAPSHOT_NAME = self.getUniqueString() self.SNAPSHOT_ID = None self.VOLUME_NAME = self.getUniqueString() self.VOLUME_ID = None volume = self.user_cloud.block_storage.create_volume( name=self.VOLUME_NAME, size=1 ) self.user_cloud.block_storage.wait_for_status( volume, status='available', failures=['error'], interval=2, wait=self._wait_for_timeout, ) assert isinstance(volume, _volume.Volume) self.assertEqual(self.VOLUME_NAME, volume.name) self.VOLUME_ID = volume.id snapshot = self.user_cloud.block_storage.create_snapshot( name=self.SNAPSHOT_NAME, volume_id=self.VOLUME_ID ) self.user_cloud.block_storage.wait_for_status( snapshot, status='available', failures=['error'], interval=2, wait=self._wait_for_timeout, ) assert isinstance(snapshot, _snapshot.Snapshot) self.assertEqual(self.SNAPSHOT_NAME, snapshot.name) self.SNAPSHOT_ID = snapshot.id def tearDown(self): snapshot = self.user_cloud.block_storage.get_snapshot(self.SNAPSHOT_ID) sot = self.user_cloud.block_storage.delete_snapshot( snapshot, ignore_missing=False ) self.user_cloud.block_storage.wait_for_delete( snapshot, interval=2, wait=self._wait_for_timeout ) self.assertIsNone(sot) sot = self.user_cloud.block_storage.delete_volume( self.VOLUME_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_get(self): sot = self.user_cloud.block_storage.get_snapshot(self.SNAPSHOT_ID) self.assertEqual(self.SNAPSHOT_NAME, sot.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v2/test_stats.py0000664000175000017500000000367700000000000027453 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v2 import stats as _stats from openstack.tests.functional.block_storage.v2 import base class TestStats(base.BaseBlockStorageTest): def setUp(self): super().setUp() sot = self.operator_cloud.block_storage.backend_pools() for pool in sot: self.assertIsInstance(pool, _stats.Pools) def test_list(self): capList = [ 'volume_backend_name', 'storage_protocol', 'free_capacity_gb', 'driver_version', 'goodness_function', 'QoS_support', 'vendor_name', 'pool_name', 'thin_provisioning_support', 'thick_provisioning_support', 'timestamp', 'max_over_subscription_ratio', 'total_volumes', 'total_capacity_gb', 'filter_function', 'multiattach', 'provisioned_capacity_gb', 'allocated_capacity_gb', 'reserved_percentage', 'location_info', ] capList.sort() pools = self.operator_cloud.block_storage.backend_pools() for pool in pools: caps = pool.capabilities keys = list(caps.keys()) assert isinstance(caps, dict) # Check that we have at minimum listed capabilities for cap in sorted(capList): self.assertIn(cap, keys) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v2/test_type.py0000664000175000017500000000257400000000000027271 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v2 import type as _type from openstack.tests.functional.block_storage.v2 import base class TestType(base.BaseBlockStorageTest): def setUp(self): super().setUp() self.TYPE_NAME = self.getUniqueString() self.TYPE_ID = None sot = self.operator_cloud.block_storage.create_type( name=self.TYPE_NAME ) assert isinstance(sot, _type.Type) self.assertEqual(self.TYPE_NAME, sot.name) self.TYPE_ID = sot.id def tearDown(self): sot = self.operator_cloud.block_storage.delete_type( self.TYPE_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_get(self): sot = self.operator_cloud.block_storage.get_type(self.TYPE_ID) self.assertEqual(self.TYPE_NAME, sot.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v2/test_volume.py0000664000175000017500000000340100000000000027605 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v2 import volume as _volume from openstack.tests.functional.block_storage.v2 import base class TestVolume(base.BaseBlockStorageTest): def setUp(self): super().setUp() if not self.user_cloud.has_service('block-storage'): self.skipTest('block-storage service not supported by cloud') self.VOLUME_NAME = self.getUniqueString() self.VOLUME_ID = None volume = self.user_cloud.block_storage.create_volume( name=self.VOLUME_NAME, size=1 ) self.user_cloud.block_storage.wait_for_status( volume, status='available', failures=['error'], interval=2, wait=self._wait_for_timeout, ) assert isinstance(volume, _volume.Volume) self.assertEqual(self.VOLUME_NAME, volume.name) self.VOLUME_ID = volume.id def tearDown(self): sot = self.user_cloud.block_storage.delete_volume( self.VOLUME_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_get(self): sot = self.user_cloud.block_storage.get_volume(self.VOLUME_ID) self.assertEqual(self.VOLUME_NAME, sot.name) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3453615 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v3/0000775000175000017500000000000000000000000024670 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v3/__init__.py0000664000175000017500000000000000000000000026767 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v3/base.py0000664000175000017500000000170600000000000026160 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class BaseBlockStorageTest(base.BaseFunctionalTest): _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_BLOCK_STORAGE' def setUp(self): super().setUp() self._set_user_cloud(block_storage_api_version='3') if not self.user_cloud.has_service('block-storage', '3'): self.skipTest('block-storage service not supported by cloud') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v3/test_attachment.py0000664000175000017500000000665300000000000030443 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v3 import volume as _volume from openstack.tests.functional.block_storage.v3 import base class TestAttachment(base.BaseBlockStorageTest): """Test class for volume attachment operations. We have implemented a test that performs attachment create and attachment delete operations. Attachment create requires the instance ID and the volume ID for which we have created a volume resource and an instance resource. We haven't implemented attachment update test since it requires the host connector information which is not readily available to us and hard to retrieve. Without passing this information, the attachment update operation will fail. Similarly, we haven't implement attachment complete test since it depends on attachment update and can only be performed when the volume status is 'attaching' which is done by attachment update operation. """ def setUp(self): super().setUp() # Create Volume self.volume_name = self.getUniqueString() volume = self.user_cloud.block_storage.create_volume( name=self.volume_name, size=1 ) self.user_cloud.block_storage.wait_for_status( volume, status='available', failures=['error'], interval=2, wait=self._wait_for_timeout, ) self.assertIsInstance(volume, _volume.Volume) self.VOLUME_ID = volume.id # Create Server self.server_name = self.getUniqueString() self.server = self.operator_cloud.compute.create_server( name=self.server_name, flavor_id=self.flavor.id, image_id=self.image.id, networks='none', ) self.operator_cloud.compute.wait_for_server( self.server, wait=self._wait_for_timeout ) def tearDown(self): # Since delete_on_termination flag is set to True, we # don't need to cleanup the volume manually result = self.conn.compute.delete_server(self.server.id) self.conn.compute.wait_for_delete( self.server, wait=self._wait_for_timeout ) self.assertIsNone(result) super().tearDown() def test_attachment(self): attachment = self.conn.block_storage.create_attachment( self.VOLUME_ID, connector={}, instance_id=self.server.id, ) self.assertIn('id', attachment) self.assertIn('status', attachment) self.assertIn('instance', attachment) self.assertIn('volume_id', attachment) self.assertIn('attached_at', attachment) self.assertIn('detached_at', attachment) self.assertIn('attach_mode', attachment) self.assertIn('connection_info', attachment) attachment = self.user_cloud.block_storage.delete_attachment( attachment.id, ignore_missing=False ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v3/test_availability_zone.py0000664000175000017500000000165700000000000032017 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestAvailabilityZone(base.BaseFunctionalTest): def test_list(self): availability_zones = list(self.conn.block_storage.availability_zones()) self.assertGreater(len(availability_zones), 0) for az in availability_zones: self.assertIsInstance(az.name, str) self.assertIsInstance(az.state, dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v3/test_backup.py0000664000175000017500000000740300000000000027552 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v3 import backup as _backup from openstack.block_storage.v3 import volume as _volume from openstack.tests.functional.block_storage.v3 import base class TestBackup(base.BaseBlockStorageTest): def setUp(self): super().setUp() if not self.user_cloud.has_service('object-store'): self.skipTest('Object service is requred, but not available') self.VOLUME_NAME = self.getUniqueString() self.VOLUME_ID = None self.BACKUP_NAME = self.getUniqueString() self.BACKUP_ID = None volume = self.user_cloud.block_storage.create_volume( name=self.VOLUME_NAME, size=1 ) self.user_cloud.block_storage.wait_for_status( volume, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) assert isinstance(volume, _volume.Volume) self.VOLUME_ID = volume.id backup = self.user_cloud.block_storage.create_backup( name=self.BACKUP_NAME, volume_id=volume.id, is_incremental=False ) self.user_cloud.block_storage.wait_for_status( backup, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) assert isinstance(backup, _backup.Backup) self.assertEqual(self.BACKUP_NAME, backup.name) self.BACKUP_ID = backup.id def tearDown(self): sot = self.user_cloud.block_storage.delete_backup( self.BACKUP_ID, ignore_missing=False ) sot = self.user_cloud.block_storage.delete_volume( self.VOLUME_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_get(self): sot = self.user_cloud.block_storage.get_backup(self.BACKUP_ID) self.assertEqual(self.BACKUP_NAME, sot.name) self.assertEqual(False, sot.is_incremental) def test_create_metadata(self): metadata_backup = self.user_cloud.block_storage.create_backup( name=self.getUniqueString(), volume_id=self.VOLUME_ID, metadata=dict(foo="bar"), ) self.user_cloud.block_storage.wait_for_status( metadata_backup, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) self.user_cloud.block_storage.delete_backup( metadata_backup.id, ignore_missing=False ) def test_create_incremental(self): incremental_backup = self.user_cloud.block_storage.create_backup( name=self.getUniqueString(), volume_id=self.VOLUME_ID, is_incremental=True, ) self.user_cloud.block_storage.wait_for_status( incremental_backup, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) self.assertEqual(True, incremental_backup.is_incremental) self.user_cloud.block_storage.delete_backup( incremental_backup.id, ignore_missing=False ) self.user_cloud.block_storage.wait_for_delete(incremental_backup) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v3/test_block_storage_summary.py0000664000175000017500000000155600000000000032703 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional.block_storage.v3 import base class TestBlockStorageSummary(base.BaseBlockStorageTest): def test_get(self): sot = self.conn.block_storage.summary(all_projects=True) self.assertIn('total_size', sot) self.assertIn('total_count', sot) self.assertIn('metadata', sot) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v3/test_capabilities.py0000664000175000017500000000274600000000000030743 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional.block_storage.v3 import base class TestCapabilities(base.BaseBlockStorageTest): # getting capabilities can be slow TIMEOUT_SCALING_FACTOR = 1.5 def test_get(self): services = list(self.operator_cloud.block_storage.services()) host = [ service for service in services if service.binary == 'cinder-volume' ][0].host sot = self.conn.block_storage.get_capabilities(host) self.assertIn('description', sot) self.assertIn('display_name', sot) self.assertIn('driver_version', sot) self.assertIn('namespace', sot) self.assertIn('pool_name', sot) self.assertIn('properties', sot) self.assertIn('replication_targets', sot) self.assertIn('storage_protocol', sot) self.assertIn('vendor_name', sot) self.assertIn('visibility', sot) self.assertIn('volume_backend_name', sot) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v3/test_extension.py0000664000175000017500000000167700000000000030330 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional.block_storage.v3 import base class Extensions(base.BaseBlockStorageTest): def test_get(self): extensions = list(self.conn.block_storage.extensions()) for extension in extensions: self.assertIsInstance(extension.alias, str) self.assertIsInstance(extension.description, str) self.assertIsInstance(extension.updated_at, str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v3/test_group.py0000664000175000017500000002011600000000000027435 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v3 import group as _group from openstack.block_storage.v3 import group_snapshot as _group_snapshot from openstack.block_storage.v3 import group_type as _group_type from openstack.block_storage.v3 import volume as _volume from openstack.tests.functional.block_storage.v3 import base class TestGroup(base.BaseBlockStorageTest): # TODO(stephenfin): We should use setUpClass here for MOAR SPEED!!! def setUp(self): super().setUp() # there will always be at least one volume type, i.e. the default one volume_types = list(self.conn.block_storage.types()) self.volume_type = volume_types[0] group_type_name = self.getUniqueString() self.group_type = self.conn.block_storage.create_group_type( name=group_type_name, ) self.assertIsInstance(self.group_type, _group_type.GroupType) self.assertEqual(group_type_name, self.group_type.name) group_name = self.getUniqueString() self.group = self.conn.block_storage.create_group( name=group_name, group_type=self.group_type.id, volume_types=[self.volume_type.id], ) self.assertIsInstance(self.group, _group.Group) self.assertEqual(group_name, self.group.name) def tearDown(self): # we do this in tearDown rather than via 'addCleanup' since we need to # wait for the deletion of the group before moving onto the deletion of # the group type self.conn.block_storage.delete_group(self.group, delete_volumes=True) self.conn.block_storage.wait_for_delete(self.group) self.conn.block_storage.delete_group_type(self.group_type) self.conn.block_storage.wait_for_delete(self.group_type) super().tearDown() def test_group_type(self): # get group_type = self.conn.block_storage.get_group_type(self.group_type.id) self.assertEqual(self.group_type.name, group_type.name) # find group_type = self.conn.block_storage.find_group_type( self.group_type.name, ) self.assertEqual(self.group_type.id, group_type.id) # list group_types = list(self.conn.block_storage.group_types()) # other tests may have created group types and there can be defaults so # we don't assert that this is the *only* group type present self.assertIn(self.group_type.id, {g.id for g in group_types}) # update group_type_name = self.getUniqueString() group_type_description = self.getUniqueString() group_type = self.conn.block_storage.update_group_type( self.group_type, name=group_type_name, description=group_type_description, ) self.assertIsInstance(group_type, _group_type.GroupType) group_type = self.conn.block_storage.get_group_type(self.group_type.id) self.assertEqual(group_type_name, group_type.name) self.assertEqual(group_type_description, group_type.description) def test_group_type_group_specs(self): # create group_type = self.conn.block_storage.create_group_type_group_specs( self.group_type, {'foo': 'bar', 'acme': 'buzz'}, ) self.assertIsInstance(group_type, _group_type.GroupType) group_type = self.conn.block_storage.get_group_type(self.group_type.id) self.assertEqual( {'foo': 'bar', 'acme': 'buzz'}, group_type.group_specs ) # get spec = self.conn.block_storage.get_group_type_group_specs_property( self.group_type, 'foo', ) self.assertEqual('bar', spec) # update spec = self.conn.block_storage.update_group_type_group_specs_property( self.group_type, 'foo', 'baz', ) self.assertEqual('baz', spec) group_type = self.conn.block_storage.get_group_type(self.group_type.id) self.assertEqual( {'foo': 'baz', 'acme': 'buzz'}, group_type.group_specs ) # delete self.conn.block_storage.delete_group_type_group_specs_property( self.group_type, 'foo', ) group_type = self.conn.block_storage.get_group_type(self.group_type.id) self.assertEqual({'acme': 'buzz'}, group_type.group_specs) def test_group(self): # get group = self.conn.block_storage.get_group(self.group.id) self.assertEqual(self.group.name, group.name) # find group = self.conn.block_storage.find_group(self.group.name) self.assertEqual(self.group.id, group.id) # list groups = self.conn.block_storage.groups() # other tests may have created groups and there can be defaults so we # don't assert that this is the *only* group present self.assertIn(self.group.id, {g.id for g in groups}) # update group_name = self.getUniqueString() group_description = self.getUniqueString() group = self.conn.block_storage.update_group( self.group, name=group_name, description=group_description, ) self.assertIsInstance(group, _group.Group) group = self.conn.block_storage.get_group(self.group.id) self.assertEqual(group_name, group.name) self.assertEqual(group_description, group.description) def test_group_snapshot(self): # group snapshots require a volume # no need for a teardown as the deletion of the group (with the # 'delete_volumes' flag) will handle this but we do need to wait for # the thing to be created volume_name = self.getUniqueString() self.volume = self.conn.block_storage.create_volume( name=volume_name, volume_type=self.volume_type.id, group_id=self.group.id, size=1, ) self.conn.block_storage.wait_for_status( self.volume, status='available', failures=['error'], interval=2, wait=self._wait_for_timeout, ) self.assertIsInstance(self.volume, _volume.Volume) group_snapshot_name = self.getUniqueString() self.group_snapshot = self.conn.block_storage.create_group_snapshot( name=group_snapshot_name, group_id=self.group.id, ) self.conn.block_storage.wait_for_status( self.group_snapshot, status='available', failures=['error'], interval=2, wait=self._wait_for_timeout, ) self.assertIsInstance( self.group_snapshot, _group_snapshot.GroupSnapshot, ) # get group_snapshot = self.conn.block_storage.get_group_snapshot( self.group_snapshot.id, ) self.assertEqual(self.group_snapshot.name, group_snapshot.name) # find group_snapshot = self.conn.block_storage.find_group_snapshot( self.group_snapshot.name, ) self.assertEqual(self.group_snapshot.id, group_snapshot.id) # list group_snapshots = self.conn.block_storage.group_snapshots() # other tests may have created group snapshot and there can be defaults # so we don't assert that this is the *only* group snapshot present self.assertIn(self.group_snapshot.id, {g.id for g in group_snapshots}) # update (not supported) # delete self.conn.block_storage.delete_group_snapshot(self.group_snapshot) self.conn.block_storage.wait_for_delete(self.group_snapshot) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v3/test_limits.py0000664000175000017500000000261100000000000027602 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional.block_storage.v3 import base class TestLimits(base.BaseBlockStorageTest): def test_get(self): sot = self.conn.block_storage.get_limits() self.assertIsNotNone(sot.absolute.max_total_backup_gigabytes) self.assertIsNotNone(sot.absolute.max_total_backups) self.assertIsNotNone(sot.absolute.max_total_snapshots) self.assertIsNotNone(sot.absolute.max_total_volume_gigabytes) self.assertIsNotNone(sot.absolute.max_total_volumes) self.assertIsNotNone(sot.absolute.total_backup_gigabytes_used) self.assertIsNotNone(sot.absolute.total_backups_used) self.assertIsNotNone(sot.absolute.total_gigabytes_used) self.assertIsNotNone(sot.absolute.total_snapshots_used) self.assertIsNotNone(sot.absolute.total_volumes_used) self.assertIsNotNone(sot.rate) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v3/test_resource_filters.py0000664000175000017500000000160500000000000031662 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional.block_storage.v3 import base class ResourceFilters(base.BaseBlockStorageTest): def test_get(self): resource_filters = list(self.conn.block_storage.resource_filters()) for rf in resource_filters: self.assertIsInstance(rf.filters, list) self.assertIsInstance(rf.resource, str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v3/test_service.py0000664000175000017500000000266000000000000027745 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestService(base.BaseFunctionalTest): # listing services is slooowwww TIMEOUT_SCALING_FACTOR = 2.0 def test_list(self): sot = list(self.operator_cloud.block_storage.services()) self.assertIsNotNone(sot) def test_disable_enable(self): for srv in self.operator_cloud.block_storage.services(): # only nova-block_storage can be updated if srv.name == 'nova-block_storage': self.operator_cloud.block_storage.disable_service(srv) self.operator_cloud.block_storage.enable_service(srv) break def test_find(self): for srv in self.operator_cloud.block_storage.services(): self.operator_cloud.block_storage.find_service( srv.name, host=srv.host, ignore_missing=False, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v3/test_snapshot.py0000664000175000017500000000516000000000000030142 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v3 import snapshot as _snapshot from openstack.block_storage.v3 import volume as _volume from openstack.tests.functional.block_storage.v3 import base class TestSnapshot(base.BaseBlockStorageTest): def setUp(self): super().setUp() self.SNAPSHOT_NAME = self.getUniqueString() self.SNAPSHOT_ID = None self.VOLUME_NAME = self.getUniqueString() self.VOLUME_ID = None volume = self.user_cloud.block_storage.create_volume( name=self.VOLUME_NAME, size=1 ) self.user_cloud.block_storage.wait_for_status( volume, status='available', failures=['error'], interval=2, wait=self._wait_for_timeout, ) assert isinstance(volume, _volume.Volume) self.assertEqual(self.VOLUME_NAME, volume.name) self.VOLUME_ID = volume.id snapshot = self.user_cloud.block_storage.create_snapshot( name=self.SNAPSHOT_NAME, volume_id=self.VOLUME_ID ) self.user_cloud.block_storage.wait_for_status( snapshot, status='available', failures=['error'], interval=2, wait=self._wait_for_timeout, ) assert isinstance(snapshot, _snapshot.Snapshot) self.assertEqual(self.SNAPSHOT_NAME, snapshot.name) self.SNAPSHOT_ID = snapshot.id def tearDown(self): snapshot = self.user_cloud.block_storage.get_snapshot(self.SNAPSHOT_ID) sot = self.user_cloud.block_storage.delete_snapshot( snapshot, ignore_missing=False ) self.user_cloud.block_storage.wait_for_delete( snapshot, interval=2, wait=self._wait_for_timeout ) self.assertIsNone(sot) sot = self.user_cloud.block_storage.delete_volume( self.VOLUME_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_get(self): sot = self.user_cloud.block_storage.get_snapshot(self.SNAPSHOT_ID) self.assertEqual(self.SNAPSHOT_NAME, sot.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v3/test_transfer.py0000664000175000017500000000370600000000000030133 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional.block_storage.v3 import base from openstack import utils class TestTransfer(base.BaseBlockStorageTest): def setUp(self): super().setUp() self.VOLUME_NAME = self.getUniqueString() self.volume = self.user_cloud.block_storage.create_volume( name=self.VOLUME_NAME, size=1, ) self.user_cloud.block_storage.wait_for_status( self.volume, status='available', failures=['error'], interval=2, wait=self._wait_for_timeout, ) self.VOLUME_ID = self.volume.id def tearDown(self): sot = self.user_cloud.block_storage.delete_volume( self.VOLUME_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_transfer(self): if not utils.supports_microversion(self.conn.block_storage, "3.55"): self.skipTest("Cannot test new transfer API if MV < 3.55") sot = self.conn.block_storage.create_transfer( volume_id=self.VOLUME_ID, name=self.VOLUME_NAME, ) self.assertIn('auth_key', sot) self.assertIn('created_at', sot) self.assertIn('id', sot) self.assertIn('name', sot) self.assertIn('volume_id', sot) sot = self.user_cloud.block_storage.delete_transfer( sot.id, ignore_missing=False ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v3/test_type.py0000664000175000017500000000303300000000000027261 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v3 import type as _type from openstack.tests.functional.block_storage.v3 import base class TestType(base.BaseBlockStorageTest): def setUp(self): super().setUp() self.TYPE_NAME = self.getUniqueString() self.TYPE_ID = None if not self._op_name: self.skip("Operator cloud must be set for this test") self._set_operator_cloud(block_storage_api_version='3') sot = self.operator_cloud.block_storage.create_type( name=self.TYPE_NAME ) assert isinstance(sot, _type.Type) self.assertEqual(self.TYPE_NAME, sot.name) self.TYPE_ID = sot.id def tearDown(self): sot = self.operator_cloud.block_storage.delete_type( self.TYPE_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_get(self): sot = self.operator_cloud.block_storage.get_type(self.TYPE_ID) self.assertEqual(self.TYPE_NAME, sot.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/block_storage/v3/test_volume.py0000664000175000017500000000507000000000000027612 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v3 import volume as _volume from openstack.tests.functional.block_storage.v3 import base class TestVolume(base.BaseBlockStorageTest): def setUp(self): super().setUp() if not self.user_cloud.has_service('block-storage'): self.skipTest('block-storage service not supported by cloud') volume_name = self.getUniqueString() self.volume = self.user_cloud.block_storage.create_volume( name=volume_name, size=1, ) self.user_cloud.block_storage.wait_for_status( self.volume, status='available', failures=['error'], interval=2, wait=self._wait_for_timeout, ) self.assertIsInstance(self.volume, _volume.Volume) self.assertEqual(volume_name, self.volume.name) def tearDown(self): self.user_cloud.block_storage.delete_volume(self.volume) super().tearDown() def test_volume(self): # get volume = self.user_cloud.block_storage.get_volume(self.volume.id) self.assertEqual(self.volume.name, volume.name) # find volume = self.user_cloud.block_storage.find_volume(self.volume.name) self.assertEqual(self.volume.id, volume.id) # list volumes = self.user_cloud.block_storage.volumes() # other tests may have created volumes so we don't assert that this is # the *only* volume present self.assertIn(self.volume.id, {v.id for v in volumes}) # update volume_name = self.getUniqueString() volume_description = self.getUniqueString() volume = self.user_cloud.block_storage.update_volume( self.volume, name=volume_name, description=volume_description, ) self.assertIsInstance(volume, _volume.Volume) volume = self.user_cloud.block_storage.get_volume(self.volume.id) self.assertEqual(volume_name, volume.name) self.assertEqual(volume_description, volume.description) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3533654 openstacksdk-4.0.0/openstack/tests/functional/cloud/0000775000175000017500000000000000000000000022630 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/__init__.py0000664000175000017500000000000000000000000024727 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_aggregate.py0000664000175000017500000000413000000000000026165 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_aggregate ---------------------------------- Functional tests for aggregate resource. """ from openstack.tests.functional import base class TestAggregate(base.BaseFunctionalTest): def test_aggregates(self): if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") aggregate_name = self.getUniqueString() availability_zone = self.getUniqueString() self.addCleanup(self.cleanup, aggregate_name) aggregate = self.operator_cloud.create_aggregate(aggregate_name) aggregate_ids = [ v['id'] for v in self.operator_cloud.list_aggregates() ] self.assertIn(aggregate['id'], aggregate_ids) aggregate = self.operator_cloud.update_aggregate( aggregate_name, availability_zone=availability_zone ) self.assertEqual(availability_zone, aggregate['availability_zone']) aggregate = self.operator_cloud.set_aggregate_metadata( aggregate_name, {'key': 'value'} ) self.assertIn('key', aggregate['metadata']) aggregate = self.operator_cloud.set_aggregate_metadata( aggregate_name, {'key': None} ) self.assertNotIn('key', aggregate['metadata']) # Validate that we can delete by name self.assertTrue(self.operator_cloud.delete_aggregate(aggregate_name)) def cleanup(self, aggregate_name): aggregate = self.operator_cloud.get_aggregate(aggregate_name) if aggregate: self.operator_cloud.delete_aggregate(aggregate['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_cluster_templates.py0000664000175000017500000001026300000000000030002 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_cluster_templates ---------------------------------- Functional tests for `openstack.cloud` cluster_template methods. """ import subprocess import fixtures from testtools import content from openstack.tests.functional import base class TestClusterTemplate(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.user_cloud.has_service( 'container-infrastructure-management' ): self.skipTest('Container service not supported by cloud') self.ct = None self.ssh_directory = self.useFixture(fixtures.TempDir()).path def test_cluster_templates(self): '''Test cluster_templates functionality''' name = 'fake-cluster_template' server_type = 'vm' public = False image_id = 'fedora-atomic-f23-dib' tls_disabled = False registry_enabled = False coe = 'kubernetes' keypair_id = 'testkey' self.addDetail('cluster_template', content.text_content(name)) self.addCleanup(self.cleanup, name) # generate a keypair to add to nova subprocess.call( [ 'ssh-keygen', '-t', 'rsa', '-N', '', '-f', '%s/id_rsa_sdk' % self.ssh_directory, ] ) # add keypair to nova with open('%s/id_rsa_sdk.pub' % self.ssh_directory) as f: key_content = f.read() self.user_cloud.create_keypair('testkey', key_content) # Test we can create a cluster_template and we get it returned self.ct = self.user_cloud.create_cluster_template( name=name, image_id=image_id, keypair_id=keypair_id, coe=coe ) self.assertEqual(self.ct['name'], name) self.assertEqual(self.ct['image_id'], image_id) self.assertEqual(self.ct['keypair_id'], keypair_id) self.assertEqual(self.ct['coe'], coe) self.assertEqual(self.ct['registry_enabled'], registry_enabled) self.assertEqual(self.ct['tls_disabled'], tls_disabled) self.assertEqual(self.ct['public'], public) self.assertEqual(self.ct['server_type'], server_type) # Test that we can list cluster_templates cluster_templates = self.user_cloud.list_cluster_templates() self.assertIsNotNone(cluster_templates) # Test we get the same cluster_template with the # get_cluster_template method cluster_template_get = self.user_cloud.get_cluster_template( self.ct['uuid'] ) self.assertEqual(cluster_template_get['uuid'], self.ct['uuid']) # Test the get method also works by name cluster_template_get = self.user_cloud.get_cluster_template(name) self.assertEqual(cluster_template_get['name'], self.ct['name']) # Test we can update a field on the cluster_template and only that # field is updated cluster_template_update = self.user_cloud.update_cluster_template( self.ct, tls_disabled=True ) self.assertEqual(cluster_template_update['uuid'], self.ct['uuid']) self.assertTrue(cluster_template_update['tls_disabled']) # Test we can delete and get True returned cluster_template_delete = self.user_cloud.delete_cluster_template( self.ct['uuid'] ) self.assertTrue(cluster_template_delete) def cleanup(self, name): if self.ct: try: self.user_cloud.delete_cluster_template(self.ct['name']) except Exception: pass # delete keypair self.user_cloud.delete_keypair('testkey') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_coe_clusters.py0000664000175000017500000000175500000000000026743 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_coe_clusters ---------------------------------- Functional tests for COE clusters methods. """ from openstack.tests.functional import base class TestCompute(base.BaseFunctionalTest): # NOTE(flwang): Currently, running Magnum on a cloud which doesn't support # nested virtualization will lead to timeout. So this test file is mostly # like a note to document why we can't have function testing for Magnum # clusters CRUD. pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_compute.py0000664000175000017500000005667700000000000025742 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_compute ---------------------------------- Functional tests for compute methods. """ import datetime from fixtures import TimeoutException from openstack import exceptions from openstack.tests.functional import base from openstack import utils class TestCompute(base.BaseFunctionalTest): def setUp(self): # OS_TEST_TIMEOUT is 90 sec by default # but on a bad day, test_attach_detach_volume can take more time. self.TIMEOUT_SCALING_FACTOR = 1.5 super().setUp() self.server_name = self.getUniqueString() def _cleanup_servers_and_volumes(self, server_name): """Delete the named server and any attached volumes. Adding separate cleanup calls for servers and volumes can be tricky since they need to be done in the proper order. And sometimes deleting a server can start the process of deleting a volume if it is booted from that volume. This encapsulates that logic. """ server = self.user_cloud.get_server(server_name) if not server: return volumes = self.user_cloud.get_volumes(server) try: self.user_cloud.delete_server(server.name, wait=True) for volume in volumes: if volume.status != 'deleting': self.user_cloud.delete_volume(volume.id, wait=True) except (exceptions.ResourceTimeout, TimeoutException): # Ups, some timeout occured during process of deletion server # or volumes, so now we will try to call delete each of them # once again and we will try to live with it self.user_cloud.delete_server(server.name) for volume in volumes: self.operator_cloud.delete_volume( volume.id, wait=False, force=True ) def test_create_and_delete_server(self): self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) server = self.user_cloud.create_server( name=self.server_name, image=self.image, flavor=self.flavor, wait=True, ) self.assertEqual(self.server_name, server['name']) self.assertEqual(self.image.id, server['image']['id']) self.assertEqual(self.flavor.name, server['flavor']['original_name']) self.assertIsNotNone(server['adminPass']) self.assertTrue( self.user_cloud.delete_server(self.server_name, wait=True) ) srv = self.user_cloud.get_server(self.server_name) self.assertTrue(srv is None or srv.status.lower() == 'deleted') def test_create_and_delete_server_auto_ip_delete_ips(self): self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) server = self.user_cloud.create_server( name=self.server_name, image=self.image, flavor=self.flavor, auto_ip=True, wait=True, ) self.assertEqual(self.server_name, server['name']) self.assertEqual(self.image.id, server['image']['id']) self.assertEqual(self.flavor.name, server['flavor']['original_name']) self.assertIsNotNone(server['adminPass']) self.assertTrue( self.user_cloud.delete_server( self.server_name, wait=True, delete_ips=True ) ) srv = self.user_cloud.get_server(self.server_name) self.assertTrue(srv is None or srv.status.lower() == 'deleted') def test_attach_detach_volume(self): self.skipTest('Volume functional tests temporarily disabled') server_name = self.getUniqueString() self.addCleanup(self._cleanup_servers_and_volumes, server_name) server = self.user_cloud.create_server( name=server_name, image=self.image, flavor=self.flavor, wait=True ) volume = self.user_cloud.create_volume(1) vol_attachment = self.user_cloud.attach_volume(server, volume) for key in ('device', 'serverId', 'volumeId'): self.assertIn(key, vol_attachment) self.assertTrue(vol_attachment[key]) # assert string is not empty self.assertIsNone(self.user_cloud.detach_volume(server, volume)) def test_attach_volume_create_snapshot(self): self.skipTest('Volume functional tests temporarily disabled') server_name = self.getUniqueString() self.addCleanup(self._cleanup_servers_and_volumes, server_name) server = self.user_cloud.create_server( name=server_name, image=self.image, flavor=self.flavor, wait=True ) volume = self.user_cloud.create_volume(1) vol_attachment = self.user_cloud.attach_volume(server, volume) for key in ('device', 'serverId', 'volumeId'): self.assertIn(key, vol_attachment) self.assertTrue(vol_attachment[key]) # assert string is not empty snapshot = self.user_cloud.create_volume_snapshot( volume_id=volume.id, force=True, wait=True ) self.addCleanup(self.user_cloud.delete_volume_snapshot, snapshot['id']) self.assertIsNotNone(snapshot) def test_create_and_delete_server_with_config_drive(self): self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) server = self.user_cloud.create_server( name=self.server_name, image=self.image, flavor=self.flavor, config_drive=True, wait=True, ) self.assertEqual(self.server_name, server['name']) self.assertEqual(self.image.id, server['image']['id']) self.assertEqual(self.flavor.name, server['flavor']['original_name']) self.assertTrue(server['has_config_drive']) self.assertIsNotNone(server['adminPass']) self.assertTrue( self.user_cloud.delete_server(self.server_name, wait=True) ) srv = self.user_cloud.get_server(self.server_name) self.assertTrue(srv is None or srv.status.lower() == 'deleted') def test_create_and_delete_server_with_config_drive_none(self): # check that we're not sending invalid values for config_drive # if it's passed in explicitly as None - which nodepool does if it's # not set in the config self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) server = self.user_cloud.create_server( name=self.server_name, image=self.image, flavor=self.flavor, config_drive=None, wait=True, ) self.assertEqual(self.server_name, server['name']) self.assertEqual(self.image.id, server['image']['id']) self.assertEqual(self.flavor.name, server['flavor']['original_name']) self.assertFalse(server['has_config_drive']) self.assertIsNotNone(server['adminPass']) self.assertTrue( self.user_cloud.delete_server(self.server_name, wait=True) ) srv = self.user_cloud.get_server(self.server_name) self.assertTrue(srv is None or srv.status.lower() == 'deleted') def test_list_all_servers(self): if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) server = self.user_cloud.create_server( name=self.server_name, image=self.image, flavor=self.flavor, wait=True, ) # We're going to get servers from other tests, but that's ok, as long # as we get the server we created with the demo user. found_server = False for s in self.operator_cloud.list_servers(all_projects=True): if s.name == server.name: found_server = True self.assertTrue(found_server) def test_list_all_servers_bad_permissions(self): # Normal users are not allowed to pass all_projects=True self.assertRaises( exceptions.SDKException, self.user_cloud.list_servers, all_projects=True, ) def test_create_server_image_flavor_dict(self): self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) server = self.user_cloud.create_server( name=self.server_name, image={'id': self.image.id}, flavor={'id': self.flavor.id}, wait=True, ) self.assertEqual(self.server_name, server['name']) self.assertEqual(self.image.id, server['image']['id']) self.assertEqual(self.flavor.name, server['flavor']['original_name']) self.assertIsNotNone(server['adminPass']) self.assertTrue( self.user_cloud.delete_server(self.server_name, wait=True) ) srv = self.user_cloud.get_server(self.server_name) self.assertTrue(srv is None or srv.status.lower() == 'deleted') def test_get_server_console(self): self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) server = self.user_cloud.create_server( name=self.server_name, image=self.image, flavor=self.flavor, wait=True, ) # _get_server_console_output does not trap HTTP exceptions, so this # returning a string tests that the call is correct. Testing that # the cloud returns actual data in the output is out of scope. log = self.user_cloud._get_server_console_output(server_id=server.id) self.assertIsInstance(log, str) def test_get_server_console_name_or_id(self): self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) self.user_cloud.create_server( name=self.server_name, image=self.image, flavor=self.flavor, wait=True, ) log = self.user_cloud.get_server_console(server=self.server_name) self.assertIsInstance(log, str) def test_list_availability_zone_names(self): self.assertEqual( ['nova'], self.user_cloud.list_availability_zone_names() ) def test_get_server_console_bad_server(self): self.assertRaises( exceptions.SDKException, self.user_cloud.get_server_console, server=self.server_name, ) def test_create_and_delete_server_with_admin_pass(self): self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) server = self.user_cloud.create_server( name=self.server_name, image=self.image, flavor=self.flavor, admin_pass='sheiqu9loegahSh', wait=True, ) self.assertEqual(self.server_name, server['name']) self.assertEqual(self.image.id, server['image']['id']) self.assertEqual(self.flavor.name, server['flavor']['original_name']) self.assertEqual(server['adminPass'], 'sheiqu9loegahSh') self.assertTrue( self.user_cloud.delete_server(self.server_name, wait=True) ) srv = self.user_cloud.get_server(self.server_name) self.assertTrue(srv is None or srv.status.lower() == 'deleted') def test_get_image_id(self): self.assertEqual( self.image.id, self.user_cloud.get_image_id(self.image.id) ) self.assertEqual( self.image.id, self.user_cloud.get_image_id(self.image.name) ) def test_get_image_name(self): self.assertEqual( self.image.name, self.user_cloud.get_image_name(self.image.id) ) self.assertEqual( self.image.name, self.user_cloud.get_image_name(self.image.name) ) def _assert_volume_attach(self, server, volume_id=None, image=''): self.assertEqual(self.server_name, server['name']) self.assertEqual(image, server['image']) self.assertEqual(self.flavor.id, server['flavor']['id']) volumes = self.user_cloud.get_volumes(server) self.assertEqual(1, len(volumes)) volume = volumes[0] if volume_id: self.assertEqual(volume_id, volume['id']) else: volume_id = volume['id'] self.assertEqual(1, len(volume['attachments']), 1) self.assertEqual(server['id'], volume['attachments'][0]['server_id']) return volume_id def test_create_boot_from_volume_image(self): self.skipTest('Volume functional tests temporarily disabled') if not self.user_cloud.has_service('volume'): self.skipTest('volume service not supported by cloud') self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) server = self.user_cloud.create_server( name=self.server_name, image=self.image, flavor=self.flavor, boot_from_volume=True, volume_size=1, wait=True, ) volume_id = self._assert_volume_attach(server) volume = self.user_cloud.get_volume(volume_id) self.assertIsNotNone(volume) self.assertEqual(volume['name'], volume['display_name']) self.assertTrue(volume['bootable']) self.assertEqual(server['id'], volume['attachments'][0]['server_id']) self.assertTrue(self.user_cloud.delete_server(server.id, wait=True)) self._wait_for_detach(volume.id) self.assertTrue(self.user_cloud.delete_volume(volume.id, wait=True)) srv = self.user_cloud.get_server(self.server_name) self.assertTrue(srv is None or srv.status.lower() == 'deleted') self.assertIsNone(self.user_cloud.get_volume(volume.id)) def _wait_for_detach(self, volume_id): # Volumes do not show up as unattached for a bit immediately after # deleting a server that had had a volume attached. Yay for eventual # consistency! for count in utils.iterate_timeout( 60, 'Timeout waiting for volume {volume_id} to detach'.format( volume_id=volume_id ), ): volume = self.user_cloud.get_volume(volume_id) if volume.status in ( 'available', 'error', 'error_restoring', 'error_extending', ): return def test_create_terminate_volume_image(self): self.skipTest('Volume functional tests temporarily disabled') if not self.user_cloud.has_service('volume'): self.skipTest('volume service not supported by cloud') self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) server = self.user_cloud.create_server( name=self.server_name, image=self.image, flavor=self.flavor, boot_from_volume=True, terminate_volume=True, volume_size=1, wait=True, ) volume_id = self._assert_volume_attach(server) self.assertTrue( self.user_cloud.delete_server(self.server_name, wait=True) ) volume = self.user_cloud.get_volume(volume_id) # We can either get None (if the volume delete was quick), or a volume # that is in the process of being deleted. if volume: self.assertEqual('deleting', volume.status) srv = self.user_cloud.get_server(self.server_name) self.assertTrue(srv is None or srv.status.lower() == 'deleted') def test_create_boot_from_volume_preexisting(self): self.skipTest('Volume functional tests temporarily disabled') if not self.user_cloud.has_service('volume'): self.skipTest('volume service not supported by cloud') self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) volume = self.user_cloud.create_volume( size=1, name=self.server_name, image=self.image, wait=True ) self.addCleanup(self.user_cloud.delete_volume, volume.id) server = self.user_cloud.create_server( name=self.server_name, image=None, flavor=self.flavor, boot_volume=volume, volume_size=1, wait=True, ) volume_id = self._assert_volume_attach(server, volume_id=volume['id']) self.assertTrue( self.user_cloud.delete_server(self.server_name, wait=True) ) volume = self.user_cloud.get_volume(volume_id) self.assertIsNotNone(volume) self.assertEqual(volume['name'], volume['display_name']) self.assertTrue(volume['bootable']) self.assertEqual([], volume['attachments']) self._wait_for_detach(volume.id) self.assertTrue(self.user_cloud.delete_volume(volume_id)) srv = self.user_cloud.get_server(self.server_name) self.assertTrue(srv is None or srv.status.lower() == 'deleted') self.assertIsNone(self.user_cloud.get_volume(volume_id)) def test_create_boot_attach_volume(self): self.skipTest('Volume functional tests temporarily disabled') if not self.user_cloud.has_service('volume'): self.skipTest('volume service not supported by cloud') self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) volume = self.user_cloud.create_volume( size=1, name=self.server_name, image=self.image, wait=True ) self.addCleanup(self.user_cloud.delete_volume, volume['id']) server = self.user_cloud.create_server( name=self.server_name, flavor=self.flavor, image=self.image, boot_from_volume=False, volumes=[volume], wait=True, ) volume_id = self._assert_volume_attach( server, volume_id=volume['id'], image={'id': self.image['id']} ) self.assertTrue( self.user_cloud.delete_server(self.server_name, wait=True) ) volume = self.user_cloud.get_volume(volume_id) self.assertIsNotNone(volume) self.assertEqual(volume['name'], volume['display_name']) self.assertEqual([], volume['attachments']) self._wait_for_detach(volume.id) self.assertTrue(self.user_cloud.delete_volume(volume_id)) srv = self.user_cloud.get_server(self.server_name) self.assertTrue(srv is None or srv.status.lower() == 'deleted') self.assertIsNone(self.user_cloud.get_volume(volume_id)) def test_create_boot_from_volume_preexisting_terminate(self): self.skipTest('Volume functional tests temporarily disabled') if not self.user_cloud.has_service('volume'): self.skipTest('volume service not supported by cloud') self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) volume = self.user_cloud.create_volume( size=1, name=self.server_name, image=self.image, wait=True ) server = self.user_cloud.create_server( name=self.server_name, image=None, flavor=self.flavor, boot_volume=volume, terminate_volume=True, volume_size=1, wait=True, ) volume_id = self._assert_volume_attach(server, volume_id=volume['id']) self.assertTrue( self.user_cloud.delete_server(self.server_name, wait=True) ) volume = self.user_cloud.get_volume(volume_id) # We can either get None (if the volume delete was quick), or a volume # that is in the process of being deleted. if volume: self.assertEqual('deleting', volume.status) srv = self.user_cloud.get_server(self.server_name) self.assertTrue(srv is None or srv.status.lower() == 'deleted') def test_create_image_snapshot_wait_active(self): self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) server = self.user_cloud.create_server( name=self.server_name, image=self.image, flavor=self.flavor, admin_pass='sheiqu9loegahSh', wait=True, ) image = self.user_cloud.create_image_snapshot( 'test-snapshot', server, wait=True ) self.addCleanup(self.user_cloud.delete_image, image['id']) self.assertEqual('active', image['status']) def test_set_and_delete_metadata(self): self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) self.user_cloud.create_server( name=self.server_name, image=self.image, flavor=self.flavor, wait=True, ) self.user_cloud.set_server_metadata( self.server_name, {'key1': 'value1', 'key2': 'value2'} ) updated_server = self.user_cloud.get_server(self.server_name) self.assertEqual( set(updated_server.metadata.items()), set({'key1': 'value1', 'key2': 'value2'}.items()), ) self.user_cloud.set_server_metadata( self.server_name, {'key2': 'value3'} ) updated_server = self.user_cloud.get_server(self.server_name) self.assertEqual( set(updated_server.metadata.items()), set({'key1': 'value1', 'key2': 'value3'}.items()), ) self.user_cloud.delete_server_metadata(self.server_name, ['key2']) updated_server = self.user_cloud.get_server(self.server_name) self.assertEqual( set(updated_server.metadata.items()), set({'key1': 'value1'}.items()), ) self.user_cloud.delete_server_metadata(self.server_name, ['key1']) updated_server = self.user_cloud.get_server(self.server_name) self.assertEqual(set(updated_server.metadata.items()), set()) self.assertRaises( exceptions.NotFoundException, self.user_cloud.delete_server_metadata, self.server_name, ['key1'], ) def test_update_server(self): self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) self.user_cloud.create_server( name=self.server_name, image=self.image, flavor=self.flavor, wait=True, ) server_updated = self.user_cloud.update_server( self.server_name, name='new_name' ) self.assertEqual('new_name', server_updated['name']) def test_get_compute_usage(self): '''Test usage functionality''' # Add a server so that we can know we have usage if not self.operator_cloud: # TODO(gtema) rework method not to require getting project self.skipTest("Operator cloud is required for this test") self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) self.user_cloud.create_server( name=self.server_name, image=self.image, flavor=self.flavor, wait=True, ) start = datetime.datetime.now() - datetime.timedelta(seconds=5) usage = self.operator_cloud.get_compute_usage('demo', start) self.add_info_on_exception('usage', usage) self.assertIsNotNone(usage) self.assertIn('total_hours', usage) self.assertIn('start', usage) self.assertEqual(start.isoformat(), usage['start']) self.assertIn('location', usage) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_devstack.py0000664000175000017500000000273700000000000026056 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test_devstack ------------- Throw errors if we do not actually detect the services we're supposed to. """ import os from testscenarios import load_tests_apply_scenarios as load_tests # noqa from openstack.tests.functional import base class TestDevstack(base.BaseFunctionalTest): scenarios = [ ('designate', dict(env='DESIGNATE', service='dns')), ('heat', dict(env='HEAT', service='orchestration')), ( 'magnum', dict(env='MAGNUM', service='container-infrastructure-management'), ), ('neutron', dict(env='NEUTRON', service='network')), ('octavia', dict(env='OCTAVIA', service='load-balancer')), ('swift', dict(env='SWIFT', service='object-store')), ] def test_has_service(self): if os.environ.get(f'OPENSTACKSDK_HAS_{self.env}', '0') == '1': self.assertTrue(self.user_cloud.has_service(self.service)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_domain.py0000664000175000017500000001214300000000000025511 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test_domain ---------------------------------- Functional tests for keystone domain resource. """ from openstack import exceptions from openstack.tests.functional import base class TestDomain(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") i_ver = self.operator_cloud.config.get_api_version('identity') if i_ver in ('2', '2.0'): self.skipTest('Identity service does not support domains') self.domain_prefix = self.getUniqueString('domain') self.addCleanup(self._cleanup_domains) def _cleanup_domains(self): exception_list = list() for domain in self.operator_cloud.list_domains(): if domain['name'].startswith(self.domain_prefix): try: self.operator_cloud.delete_domain(domain['id']) except Exception as e: exception_list.append(str(e)) continue if exception_list: # Raise an error: we must make users aware that something went # wrong raise exceptions.SDKException('\n'.join(exception_list)) def test_search_domains(self): domain_name = self.domain_prefix + '_search' # Shouldn't find any domain with this name yet results = self.operator_cloud.search_domains( filters=dict(name=domain_name) ) self.assertEqual(0, len(results)) # Now create a new domain domain = self.operator_cloud.create_domain(domain_name) self.assertEqual(domain_name, domain['name']) # Now we should find only the new domain results = self.operator_cloud.search_domains( filters=dict(name=domain_name) ) self.assertEqual(1, len(results)) self.assertEqual(domain_name, results[0]['name']) # Now we search by name with name_or_id, should find only new domain results = self.operator_cloud.search_domains(name_or_id=domain_name) self.assertEqual(1, len(results)) self.assertEqual(domain_name, results[0]['name']) def test_update_domain(self): domain = self.operator_cloud.create_domain( self.domain_prefix, 'description' ) self.assertEqual(self.domain_prefix, domain['name']) self.assertEqual('description', domain['description']) self.assertTrue(domain['enabled']) updated = self.operator_cloud.update_domain( domain['id'], name='updated name', description='updated description', enabled=False, ) self.assertEqual('updated name', updated['name']) self.assertEqual('updated description', updated['description']) self.assertFalse(updated['enabled']) # Now we update domain by name with name_or_id updated = self.operator_cloud.update_domain( None, name_or_id='updated name', name='updated name 2', description='updated description 2', enabled=True, ) self.assertEqual('updated name 2', updated['name']) self.assertEqual('updated description 2', updated['description']) self.assertTrue(updated['enabled']) def test_delete_domain(self): domain = self.operator_cloud.create_domain( self.domain_prefix, 'description' ) self.assertEqual(self.domain_prefix, domain['name']) self.assertEqual('description', domain['description']) self.assertTrue(domain['enabled']) deleted = self.operator_cloud.delete_domain(domain['id']) self.assertTrue(deleted) # Now we delete domain by name with name_or_id domain = self.operator_cloud.create_domain( self.domain_prefix, 'description' ) self.assertEqual(self.domain_prefix, domain['name']) self.assertEqual('description', domain['description']) self.assertTrue(domain['enabled']) deleted = self.operator_cloud.delete_domain(None, domain['name']) self.assertTrue(deleted) # Finally, we assert we get False from delete_domain if domain does # not exist domain = self.operator_cloud.create_domain( self.domain_prefix, 'description' ) self.assertEqual(self.domain_prefix, domain['name']) self.assertEqual('description', domain['description']) self.assertTrue(domain['enabled']) deleted = self.operator_cloud.delete_domain(None, 'bogus_domain') self.assertFalse(deleted) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_endpoints.py0000664000175000017500000002033700000000000026251 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test_endpoint ---------------------------------- Functional tests for endpoint resource. """ import random import string from openstack.cloud.exc import OpenStackCloudUnavailableFeature from openstack import exceptions from openstack.tests.functional import base class TestEndpoints(base.KeystoneBaseFunctionalTest): endpoint_attributes = [ 'id', 'region', 'publicurl', 'internalurl', 'service_id', 'adminurl', ] def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") # Generate a random name for services and regions in this test self.new_item_name = 'test_' + ''.join( random.choice(string.ascii_lowercase) for _ in range(5) ) self.addCleanup(self._cleanup_services) self.addCleanup(self._cleanup_endpoints) def _cleanup_endpoints(self): exception_list = list() for e in self.operator_cloud.list_endpoints(): if e.get('region') is not None and e['region'].startswith( self.new_item_name ): try: self.operator_cloud.delete_endpoint(id=e['id']) except Exception as e: # We were unable to delete a service, let's try with next exception_list.append(str(e)) continue if exception_list: # Raise an error: we must make users aware that something went # wrong raise exceptions.SDKException('\n'.join(exception_list)) def _cleanup_services(self): exception_list = list() for s in self.operator_cloud.list_services(): if s['name'] is not None and s['name'].startswith( self.new_item_name ): try: self.operator_cloud.delete_service(name_or_id=s['id']) except Exception as e: # We were unable to delete a service, let's try with next exception_list.append(str(e)) continue if exception_list: # Raise an error: we must make users aware that something went # wrong raise exceptions.SDKException('\n'.join(exception_list)) def test_create_endpoint(self): service_name = self.new_item_name + '_create' region = list(self.operator_cloud.identity.regions())[0].id service = self.operator_cloud.create_service( name=service_name, type='test_type', description='this is a test description', ) endpoints = self.operator_cloud.create_endpoint( service_name_or_id=service['id'], public_url='http://public.test/', internal_url='http://internal.test/', admin_url='http://admin.url/', region=region, ) self.assertNotEqual([], endpoints) self.assertIsNotNone(endpoints[0].get('id')) # Test None parameters endpoints = self.operator_cloud.create_endpoint( service_name_or_id=service['id'], public_url='http://public.test/', region=region, ) self.assertNotEqual([], endpoints) self.assertIsNotNone(endpoints[0].get('id')) def test_update_endpoint(self): ver = self.operator_cloud.config.get_api_version('identity') if ver.startswith('2'): # NOTE(SamYaple): Update endpoint only works with v3 api self.assertRaises( OpenStackCloudUnavailableFeature, self.operator_cloud.update_endpoint, 'endpoint_id1', ) else: # service operations require existing region. Do not test updating # region for now region = list(self.operator_cloud.identity.regions())[0].id service = self.operator_cloud.create_service( name='service1', type='test_type' ) endpoint = self.operator_cloud.create_endpoint( service_name_or_id=service['id'], url='http://admin.url/', interface='admin', region=region, enabled=False, )[0] new_service = self.operator_cloud.create_service( name='service2', type='test_type' ) new_endpoint = self.operator_cloud.update_endpoint( endpoint.id, service_name_or_id=new_service.id, url='http://public.url/', interface='public', region=region, enabled=True, ) self.assertEqual(new_endpoint.url, 'http://public.url/') self.assertEqual(new_endpoint.interface, 'public') self.assertEqual(new_endpoint.region_id, region) self.assertEqual(new_endpoint.service_id, new_service.id) self.assertTrue(new_endpoint.is_enabled) def test_list_endpoints(self): service_name = self.new_item_name + '_list' region = list(self.operator_cloud.identity.regions())[0].id service = self.operator_cloud.create_service( name=service_name, type='test_type', description='this is a test description', ) endpoints = self.operator_cloud.create_endpoint( service_name_or_id=service['id'], public_url='http://public.test/', internal_url='http://internal.test/', region=region, ) observed_endpoints = self.operator_cloud.list_endpoints() found = False for e in observed_endpoints: # Test all attributes are returned for endpoint in endpoints: if e['id'] == endpoint['id']: found = True self.assertEqual(service['id'], e['service_id']) if 'interface' in e: if e['interface'] == 'internal': self.assertEqual('http://internal.test/', e['url']) elif e['interface'] == 'public': self.assertEqual('http://public.test/', e['url']) else: self.assertEqual('http://public.test/', e['publicurl']) self.assertEqual( 'http://internal.test/', e['internalurl'] ) self.assertEqual(region, e['region_id']) self.assertTrue(found, msg='new endpoint not found in endpoints list!') def test_delete_endpoint(self): service_name = self.new_item_name + '_delete' region = list(self.operator_cloud.identity.regions())[0].id service = self.operator_cloud.create_service( name=service_name, type='test_type', description='this is a test description', ) endpoints = self.operator_cloud.create_endpoint( service_name_or_id=service['id'], public_url='http://public.test/', internal_url='http://internal.test/', region=region, ) self.assertNotEqual([], endpoints) for endpoint in endpoints: self.operator_cloud.delete_endpoint(endpoint['id']) observed_endpoints = self.operator_cloud.list_endpoints() found = False for e in observed_endpoints: for endpoint in endpoints: if e['id'] == endpoint['id']: found = True break self.assertEqual(False, found, message='new endpoint was not deleted!') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_flavor.py0000664000175000017500000001572300000000000025542 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test_flavor ---------------------------------- Functional tests for flavor resource. """ from openstack import exceptions from openstack.tests.functional import base class TestFlavor(base.BaseFunctionalTest): def setUp(self): super().setUp() # Generate a random name for flavors in this test self.new_item_name = self.getUniqueString('flavor') self.addCleanup(self._cleanup_flavors) def _cleanup_flavors(self): exception_list = list() if self.operator_cloud: for f in self.operator_cloud.list_flavors(get_extra=False): if f['name'].startswith(self.new_item_name): try: self.operator_cloud.delete_flavor(f['id']) except Exception as e: # We were unable to delete a flavor, let's try with # next exception_list.append(str(e)) continue if exception_list: # Raise an error: we must make users aware that something went # wrong raise exceptions.SDKException('\n'.join(exception_list)) def test_create_flavor(self): if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") flavor_name = self.new_item_name + '_create' flavor_kwargs = dict( name=flavor_name, ram=1024, vcpus=2, disk=10, ephemeral=5, swap=100, rxtx_factor=1.5, is_public=True, ) flavor = self.operator_cloud.create_flavor(**flavor_kwargs) self.assertIsNotNone(flavor['id']) # When properly normalized, we should always get an extra_specs # and expect empty dict on create. self.assertIn('extra_specs', flavor) self.assertEqual({}, flavor['extra_specs']) # We should also always have ephemeral and public attributes self.assertIn('ephemeral', flavor) self.assertEqual(5, flavor['ephemeral']) self.assertIn('is_public', flavor) self.assertTrue(flavor['is_public']) for key in flavor_kwargs.keys(): self.assertIn(key, flavor) for key, value in flavor_kwargs.items(): self.assertEqual(value, flavor[key]) def test_list_flavors(self): pub_flavor_name = self.new_item_name + '_public' priv_flavor_name = self.new_item_name + '_private' public_kwargs = dict( name=pub_flavor_name, ram=1024, vcpus=2, disk=10, is_public=True ) private_kwargs = dict( name=priv_flavor_name, ram=1024, vcpus=2, disk=10, is_public=False ) if self.operator_cloud: # Create a public and private flavor. We expect both to be listed # for an operator. self.operator_cloud.create_flavor(**public_kwargs) self.operator_cloud.create_flavor(**private_kwargs) flavors = self.operator_cloud.list_flavors(get_extra=False) # Flavor list will include the standard devstack flavors. We just # want to make sure both of the flavors we just created are # present. found = [] for f in flavors: # extra_specs should be added within list_flavors() self.assertIn('extra_specs', f) if f['name'] in (pub_flavor_name, priv_flavor_name): found.append(f) self.assertEqual(2, len(found)) else: self.user_cloud.list_flavors() def test_flavor_access(self): if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") priv_flavor_name = self.new_item_name + '_private' private_kwargs = dict( name=priv_flavor_name, ram=1024, vcpus=2, disk=10, is_public=False ) new_flavor = self.operator_cloud.create_flavor(**private_kwargs) # Validate the 'demo' user cannot see the new flavor flavors = self.user_cloud.search_flavors(priv_flavor_name) self.assertEqual(0, len(flavors)) # We need the tenant ID for the 'demo' user project = self.operator_cloud.get_project('demo') self.assertIsNotNone(project) # Now give 'demo' access self.operator_cloud.add_flavor_access(new_flavor['id'], project['id']) # Now see if the 'demo' user has access to it flavors = self.user_cloud.search_flavors(priv_flavor_name) self.assertEqual(1, len(flavors)) self.assertEqual(priv_flavor_name, flavors[0]['name']) # Now see if the 'demo' user has access to it without needing # the demo_cloud access. acls = self.operator_cloud.list_flavor_access(new_flavor['id']) self.assertEqual(1, len(acls)) self.assertEqual(project['id'], acls[0]['tenant_id']) # Now revoke the access and make sure we can't find it self.operator_cloud.remove_flavor_access( new_flavor['id'], project['id'] ) flavors = self.user_cloud.search_flavors(priv_flavor_name) self.assertEqual(0, len(flavors)) def test_set_unset_flavor_specs(self): """ Test setting and unsetting flavor extra specs """ if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") flavor_name = self.new_item_name + '_spec_test' kwargs = dict(name=flavor_name, ram=1024, vcpus=2, disk=10) new_flavor = self.operator_cloud.create_flavor(**kwargs) # Expect no extra_specs self.assertEqual({}, new_flavor['extra_specs']) # Now set them extra_specs = {'foo': 'aaa', 'bar': 'bbb'} self.operator_cloud.set_flavor_specs(new_flavor['id'], extra_specs) mod_flavor = self.operator_cloud.get_flavor( new_flavor['id'], get_extra=True ) # Verify extra_specs were set self.assertIn('extra_specs', mod_flavor) self.assertEqual(extra_specs, mod_flavor['extra_specs']) # Unset the 'foo' value self.operator_cloud.unset_flavor_specs(mod_flavor['id'], ['foo']) mod_flavor = self.operator_cloud.get_flavor_by_id( new_flavor['id'], get_extra=True ) # Verify 'foo' is unset and 'bar' is still set self.assertEqual({'bar': 'bbb'}, mod_flavor['extra_specs']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_floating_ip.py0000664000175000017500000003041000000000000026532 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test_floating_ip ---------------------------------- Functional tests for floating IP resource. """ import pprint import sys from testtools import content from openstack.cloud import meta from openstack import exceptions from openstack import proxy from openstack.tests.functional import base from openstack import utils class TestFloatingIP(base.BaseFunctionalTest): timeout = 60 def setUp(self): super().setUp() # Generate a random name for these tests self.new_item_name = self.getUniqueString() self.addCleanup(self._cleanup_network) self.addCleanup(self._cleanup_servers) def _cleanup_network(self): exception_list = list() tb_list = list() # Delete stale networks as well as networks created for this test if self.user_cloud.has_service('network'): # Delete routers for r in self.user_cloud.list_routers(): try: if r['name'].startswith(self.new_item_name): self.user_cloud.update_router( r, ext_gateway_net_id=None ) for s in self.user_cloud.list_subnets(): if s['name'].startswith(self.new_item_name): try: self.user_cloud.remove_router_interface( r, subnet_id=s['id'] ) except Exception: pass self.user_cloud.delete_router(r.id) except Exception as e: exception_list.append(e) tb_list.append(sys.exc_info()[2]) continue # Delete subnets for s in self.user_cloud.list_subnets(): if s['name'].startswith(self.new_item_name): try: self.user_cloud.delete_subnet(s.id) except Exception as e: exception_list.append(e) tb_list.append(sys.exc_info()[2]) continue # Delete networks for n in self.user_cloud.list_networks(): if n['name'].startswith(self.new_item_name): try: self.user_cloud.delete_network(n.id) except Exception as e: exception_list.append(e) tb_list.append(sys.exc_info()[2]) continue if exception_list: # Raise an error: we must make users aware that something went # wrong if len(exception_list) > 1: self.addDetail( 'exceptions', content.text_content( '\n'.join([str(ex) for ex in exception_list]) ), ) exc = exception_list[0] raise exc def _cleanup_servers(self): exception_list = list() # Delete stale servers as well as server created for this test for i in self.user_cloud.list_servers(bare=True): if i.name.startswith(self.new_item_name): try: self.user_cloud.delete_server(i.id, wait=True) except Exception as e: exception_list.append(str(e)) continue if exception_list: # Raise an error: we must make users aware that something went # wrong raise exceptions.SDKException('\n'.join(exception_list)) def _cleanup_ips(self, server): exception_list = list() fixed_ip = meta.get_server_private_ip(server) for ip in self.user_cloud.list_floating_ips(): if ( ip.get('fixed_ip', None) == fixed_ip or ip.get('fixed_ip_address', None) == fixed_ip ): try: self.user_cloud.delete_floating_ip(ip.id) except Exception as e: exception_list.append(str(e)) continue if exception_list: # Raise an error: we must make users aware that something went # wrong raise exceptions.SDKException('\n'.join(exception_list)) def _setup_networks(self): if self.user_cloud.has_service('network'): # Create a network self.test_net = self.user_cloud.create_network( name=self.new_item_name + '_net' ) # Create a subnet on it self.test_subnet = self.user_cloud.create_subnet( subnet_name=self.new_item_name + '_subnet', network_name_or_id=self.test_net['id'], cidr='10.24.4.0/24', enable_dhcp=True, ) # Create a router self.test_router = self.user_cloud.create_router( name=self.new_item_name + '_router' ) # Attach the router to an external network ext_nets = self.user_cloud.search_networks( filters={'router:external': True} ) self.user_cloud.update_router( name_or_id=self.test_router['id'], ext_gateway_net_id=ext_nets[0]['id'], ) # Attach the router to the internal subnet self.user_cloud.add_router_interface( self.test_router, subnet_id=self.test_subnet['id'] ) # Select the network for creating new servers self.nic = {'net-id': self.test_net['id']} self.addDetail( 'networks-neutron', content.text_content( pprint.pformat(self.user_cloud.list_networks()) ), ) else: # Find network names for nova-net data = proxy._json_response( self.user_cloud.compute.get('/os-tenant-networks') ) nets = meta.get_and_munchify('networks', data) self.addDetail( 'networks-nova', content.text_content(pprint.pformat(nets)) ) self.nic = {'net-id': nets[0].id} def test_private_ip(self): self._setup_networks() new_server = self.user_cloud.get_openstack_vars( self.user_cloud.create_server( wait=True, name=self.new_item_name + '_server', image=self.image, flavor=self.flavor, nics=[self.nic], ) ) self.addDetail( 'server', content.text_content(pprint.pformat(new_server)) ) self.assertNotEqual(new_server['private_v4'], '') def test_add_auto_ip(self): self._setup_networks() new_server = self.user_cloud.create_server( wait=True, name=self.new_item_name + '_server', image=self.image, flavor=self.flavor, nics=[self.nic], ) # ToDo: remove the following iteration when create_server waits for # the IP to be attached ip = None for _ in utils.iterate_timeout( self.timeout, "Timeout waiting for IP address to be attached" ): ip = meta.get_server_external_ipv4(self.user_cloud, new_server) if ip is not None: break new_server = self.user_cloud.get_server(new_server.id) self.addCleanup(self._cleanup_ips, new_server) def test_detach_ip_from_server(self): self._setup_networks() new_server = self.user_cloud.create_server( wait=True, name=self.new_item_name + '_server', image=self.image, flavor=self.flavor, nics=[self.nic], ) # ToDo: remove the following iteration when create_server waits for # the IP to be attached ip = None for _ in utils.iterate_timeout( self.timeout, "Timeout waiting for IP address to be attached" ): ip = meta.get_server_external_ipv4(self.user_cloud, new_server) if ip is not None: break new_server = self.user_cloud.get_server(new_server.id) self.addCleanup(self._cleanup_ips, new_server) f_ip = self.user_cloud.get_floating_ip( id=None, filters={'floating_ip_address': ip} ) self.user_cloud.detach_ip_from_server( server_id=new_server.id, floating_ip_id=f_ip['id'] ) def test_list_floating_ips(self): if self.operator_cloud: fip_admin = self.operator_cloud.create_floating_ip() self.addCleanup( self.operator_cloud.delete_floating_ip, fip_admin.id ) fip_user = self.user_cloud.create_floating_ip() self.addCleanup(self.user_cloud.delete_floating_ip, fip_user.id) # Get all the floating ips. if self.operator_cloud: fip_op_id_list = [ fip.id for fip in self.operator_cloud.list_floating_ips() ] fip_user_id_list = [ fip.id for fip in self.user_cloud.list_floating_ips() ] if self.user_cloud.has_service('network'): self.assertIn(fip_user.id, fip_user_id_list) # Neutron returns all FIP for all projects by default if self.operator_cloud and fip_admin: self.assertIn(fip_user.id, fip_op_id_list) # Ask Neutron for only a subset of all the FIPs. if self.operator_cloud: filtered_fip_id_list = [ fip.id for fip in self.operator_cloud.list_floating_ips( {'tenant_id': self.user_cloud.current_project_id} ) ] self.assertNotIn(fip_admin.id, filtered_fip_id_list) self.assertIn(fip_user.id, filtered_fip_id_list) else: if fip_admin: self.assertIn(fip_admin.id, fip_op_id_list) # By default, Nova returns only the FIPs that belong to the # project which made the listing request. if self.operator_cloud: self.assertNotIn(fip_user.id, fip_op_id_list) self.assertRaisesRegex( ValueError, "Nova-network don't support server-side.*", self.operator_cloud.list_floating_ips, filters={'foo': 'bar'}, ) def test_search_floating_ips(self): fip_user = self.user_cloud.create_floating_ip() self.addCleanup(self.user_cloud.delete_floating_ip, fip_user.id) self.assertIn( fip_user['id'], [fip.id for fip in self.user_cloud.search_floating_ips()], ) def test_get_floating_ip_by_id(self): fip_user = self.user_cloud.create_floating_ip() self.addCleanup(self.user_cloud.delete_floating_ip, fip_user.id) ret_fip = self.user_cloud.get_floating_ip_by_id(fip_user.id) self.assertEqual(fip_user, ret_fip) def test_available_floating_ip(self): fips_user = self.user_cloud.list_floating_ips() self.assertEqual(fips_user, []) new_fip = self.user_cloud.available_floating_ip() self.assertIsNotNone(new_fip) self.assertIn('id', new_fip) self.addCleanup(self.user_cloud.delete_floating_ip, new_fip.id) new_fips_user = self.user_cloud.list_floating_ips() self.assertEqual(new_fips_user, [new_fip]) reuse_fip = self.user_cloud.available_floating_ip() self.assertEqual(reuse_fip.id, new_fip.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_floating_ip_pool.py0000664000175000017500000000264400000000000027573 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test_floating_ip_pool ---------------------------------- Functional tests for floating IP pool resource (managed by nova) """ from openstack.tests.functional import base # When using nova-network, floating IP pools are created with nova-manage # command. # When using Neutron, floating IP pools in Nova are mapped from external # network names. This only if the floating-ip-pools nova extension is # available. # For instance, for current implementation of hpcloud that's not true: # nova floating-ip-pool-list returns 404. class TestFloatingIPPool(base.BaseFunctionalTest): def test_list_floating_ip_pools(self): pools = self.user_cloud.list_floating_ip_pools() if not pools: self.assertFalse('no floating-ip pool available') for pool in pools: self.assertIn('name', pool) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_groups.py0000664000175000017500000000777100000000000025574 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test_groups ---------------------------------- Functional tests for keystone group resource. """ from openstack import exceptions from openstack.tests.functional import base class TestGroup(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") i_ver = self.operator_cloud.config.get_api_version('identity') if i_ver in ('2', '2.0'): self.skipTest('Identity service does not support groups') self.group_prefix = self.getUniqueString('group') self.addCleanup(self._cleanup_groups) def _cleanup_groups(self): exception_list = list() for group in self.operator_cloud.list_groups(): if group['name'].startswith(self.group_prefix): try: self.operator_cloud.delete_group(group['id']) except Exception as e: exception_list.append(str(e)) continue if exception_list: # Raise an error: we must make users aware that something went # wrong raise exceptions.SDKException('\n'.join(exception_list)) def test_create_group(self): group_name = self.group_prefix + '_create' group = self.operator_cloud.create_group(group_name, 'test group') for key in ('id', 'name', 'description', 'domain_id'): self.assertIn(key, group) self.assertEqual(group_name, group['name']) self.assertEqual('test group', group['description']) def test_delete_group(self): group_name = self.group_prefix + '_delete' group = self.operator_cloud.create_group(group_name, 'test group') self.assertIsNotNone(group) self.assertTrue(self.operator_cloud.delete_group(group_name)) results = self.operator_cloud.search_groups( filters=dict(name=group_name) ) self.assertEqual(0, len(results)) def test_delete_group_not_exists(self): self.assertFalse(self.operator_cloud.delete_group('xInvalidGroupx')) def test_search_groups(self): group_name = self.group_prefix + '_search' # Shouldn't find any group with this name yet results = self.operator_cloud.search_groups( filters=dict(name=group_name) ) self.assertEqual(0, len(results)) # Now create a new group group = self.operator_cloud.create_group(group_name, 'test group') self.assertEqual(group_name, group['name']) # Now we should find only the new group results = self.operator_cloud.search_groups( filters=dict(name=group_name) ) self.assertEqual(1, len(results)) self.assertEqual(group_name, results[0]['name']) def test_update_group(self): group_name = self.group_prefix + '_update' group_desc = 'test group' group = self.operator_cloud.create_group(group_name, group_desc) self.assertEqual(group_name, group['name']) self.assertEqual(group_desc, group['description']) updated_group_name = group_name + '_xyz' updated_group_desc = group_desc + ' updated' updated_group = self.operator_cloud.update_group( group_name, name=updated_group_name, description=updated_group_desc ) self.assertEqual(updated_group_name, updated_group['name']) self.assertEqual(updated_group_desc, updated_group['description']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_identity.py0000664000175000017500000003244000000000000026075 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_identity ---------------------------------- Functional tests for identity methods. """ import random import string from openstack import exceptions from openstack.tests.functional import base class TestIdentity(base.KeystoneBaseFunctionalTest): def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") self.role_prefix = 'test_role' + ''.join( random.choice(string.ascii_lowercase) for _ in range(5) ) self.user_prefix = self.getUniqueString('user') self.group_prefix = self.getUniqueString('group') self.addCleanup(self._cleanup_users) if self.identity_version not in ('2', '2.0'): self.addCleanup(self._cleanup_groups) self.addCleanup(self._cleanup_roles) def _cleanup_groups(self): exception_list = list() for group in self.operator_cloud.list_groups(): if group['name'].startswith(self.group_prefix): try: self.operator_cloud.delete_group(group['id']) except Exception as e: exception_list.append(str(e)) continue if exception_list: # Raise an error: we must make users aware that something went # wrong raise exceptions.SDKException('\n'.join(exception_list)) def _cleanup_users(self): exception_list = list() for user in self.operator_cloud.list_users(): if user['name'].startswith(self.user_prefix): try: self.operator_cloud.delete_user(user['id']) except Exception as e: exception_list.append(str(e)) continue if exception_list: raise exceptions.SDKException('\n'.join(exception_list)) def _cleanup_roles(self): exception_list = list() for role in self.operator_cloud.list_roles(): if role['name'].startswith(self.role_prefix): try: self.operator_cloud.delete_role(role['name']) except Exception as e: exception_list.append(str(e)) continue if exception_list: raise exceptions.SDKException('\n'.join(exception_list)) def _create_user(self, **kwargs): domain_id = None if self.identity_version not in ('2', '2.0'): domain = self.operator_cloud.get_domain('default') domain_id = domain['id'] return self.operator_cloud.create_user(domain_id=domain_id, **kwargs) def test_list_roles(self): roles = self.operator_cloud.list_roles() self.assertIsNotNone(roles) self.assertNotEqual([], roles) def test_get_role(self): role = self.operator_cloud.get_role('admin') self.assertIsNotNone(role) self.assertIn('id', role) self.assertIn('name', role) self.assertEqual('admin', role['name']) def test_search_roles(self): roles = self.operator_cloud.search_roles(filters={'name': 'admin'}) self.assertIsNotNone(roles) self.assertEqual(1, len(roles)) self.assertEqual('admin', roles[0]['name']) def test_create_role(self): role_name = self.role_prefix + '_create_role' role = self.operator_cloud.create_role(role_name) self.assertIsNotNone(role) self.assertIn('id', role) self.assertIn('name', role) self.assertEqual(role_name, role['name']) def test_delete_role(self): role_name = self.role_prefix + '_delete_role' role = self.operator_cloud.create_role(role_name) self.assertIsNotNone(role) self.assertTrue(self.operator_cloud.delete_role(role_name)) # TODO(Shrews): Once we can support assigning roles within shade, we # need to make this test a little more specific, and add more for testing # filtering functionality. def test_list_role_assignments(self): if self.identity_version in ('2', '2.0'): self.skipTest("Identity service does not support role assignments") assignments = self.operator_cloud.list_role_assignments() self.assertIsInstance(assignments, list) self.assertGreater(len(assignments), 0) def test_list_role_assignments_v2(self): user = self.operator_cloud.get_user('demo') project = self.operator_cloud.get_project('demo') assignments = self.operator_cloud.list_role_assignments( filters={'user': user['id'], 'project': project['id']} ) self.assertIsInstance(assignments, list) self.assertGreater(len(assignments), 0) def test_grant_revoke_role_user_project(self): user_name = self.user_prefix + '_user_project' user_email = 'nobody@nowhere.com' role_name = self.role_prefix + '_grant_user_project' role = self.operator_cloud.create_role(role_name) user = self._create_user( name=user_name, email=user_email, default_project='demo' ) self.assertTrue( self.operator_cloud.grant_role( role_name, user=user['id'], project='demo', wait=True ) ) assignments = self.operator_cloud.list_role_assignments( { 'role': role['id'], 'user': user['id'], 'project': self.operator_cloud.get_project('demo')['id'], } ) self.assertIsInstance(assignments, list) self.assertEqual(1, len(assignments)) self.assertTrue( self.operator_cloud.revoke_role( role_name, user=user['id'], project='demo', wait=True ) ) assignments = self.operator_cloud.list_role_assignments( { 'role': role['id'], 'user': user['id'], 'project': self.operator_cloud.get_project('demo')['id'], } ) self.assertIsInstance(assignments, list) self.assertEqual(0, len(assignments)) def test_grant_revoke_role_group_project(self): if self.identity_version in ('2', '2.0'): self.skipTest("Identity service does not support group") role_name = self.role_prefix + '_grant_group_project' role = self.operator_cloud.create_role(role_name) group_name = self.group_prefix + '_group_project' group = self.operator_cloud.create_group( name=group_name, description='test group', domain='default' ) self.assertTrue( self.operator_cloud.grant_role( role_name, group=group['id'], project='demo' ) ) assignments = self.operator_cloud.list_role_assignments( { 'role': role['id'], 'group': group['id'], 'project': self.operator_cloud.get_project('demo')['id'], } ) self.assertIsInstance(assignments, list) self.assertEqual(1, len(assignments)) self.assertTrue( self.operator_cloud.revoke_role( role_name, group=group['id'], project='demo' ) ) assignments = self.operator_cloud.list_role_assignments( { 'role': role['id'], 'group': group['id'], 'project': self.operator_cloud.get_project('demo')['id'], } ) self.assertIsInstance(assignments, list) self.assertEqual(0, len(assignments)) def test_grant_revoke_role_user_domain(self): if self.identity_version in ('2', '2.0'): self.skipTest("Identity service does not support domain") role_name = self.role_prefix + '_grant_user_domain' role = self.operator_cloud.create_role(role_name) user_name = self.user_prefix + '_user_domain' user_email = 'nobody@nowhere.com' user = self._create_user( name=user_name, email=user_email, default_project='demo' ) self.assertTrue( self.operator_cloud.grant_role( role_name, user=user['id'], domain='default' ) ) assignments = self.operator_cloud.list_role_assignments( { 'role': role['id'], 'user': user['id'], 'domain': self.operator_cloud.get_domain('default')['id'], } ) self.assertIsInstance(assignments, list) self.assertEqual(1, len(assignments)) self.assertTrue( self.operator_cloud.revoke_role( role_name, user=user['id'], domain='default' ) ) assignments = self.operator_cloud.list_role_assignments( { 'role': role['id'], 'user': user['id'], 'domain': self.operator_cloud.get_domain('default')['id'], } ) self.assertIsInstance(assignments, list) self.assertEqual(0, len(assignments)) def test_grant_revoke_role_group_domain(self): if self.identity_version in ('2', '2.0'): self.skipTest("Identity service does not support domain or group") role_name = self.role_prefix + '_grant_group_domain' role = self.operator_cloud.create_role(role_name) group_name = self.group_prefix + '_group_domain' group = self.operator_cloud.create_group( name=group_name, description='test group', domain='default' ) self.assertTrue( self.operator_cloud.grant_role( role_name, group=group['id'], domain='default' ) ) assignments = self.operator_cloud.list_role_assignments( { 'role': role['id'], 'group': group['id'], 'domain': self.operator_cloud.get_domain('default')['id'], } ) self.assertIsInstance(assignments, list) self.assertEqual(1, len(assignments)) self.assertTrue( self.operator_cloud.revoke_role( role_name, group=group['id'], domain='default' ) ) assignments = self.operator_cloud.list_role_assignments( { 'role': role['id'], 'group': group['id'], 'domain': self.operator_cloud.get_domain('default')['id'], } ) self.assertIsInstance(assignments, list) self.assertEqual(0, len(assignments)) def test_grant_revoke_role_user_system(self): role_name = self.role_prefix + '_grant_user_system' role = self.operator_cloud.create_role(role_name) user_name = self.user_prefix + '_user_system' user_email = 'nobody@nowhere.com' user = self._create_user( name=user_name, email=user_email, default_project='demo' ) self.assertTrue( self.operator_cloud.grant_role( role_name, user=user['id'], system='all' ) ) assignments = self.operator_cloud.list_role_assignments( {'role': role['id'], 'user': user['id'], 'system': 'all'} ) self.assertIsInstance(assignments, list) self.assertEqual(1, len(assignments)) self.assertTrue( self.operator_cloud.revoke_role( role_name, user=user['id'], system='all' ) ) assignments = self.operator_cloud.list_role_assignments( {'role': role['id'], 'user': user['id'], 'system': 'all'} ) self.assertIsInstance(assignments, list) self.assertEqual(0, len(assignments)) def test_grant_revoke_role_group_system(self): if self.identity_version in ('2', '2.0'): self.skipTest("Identity service does not support system or group") role_name = self.role_prefix + '_grant_group_system' role = self.operator_cloud.create_role(role_name) group_name = self.group_prefix + '_group_system' group = self.operator_cloud.create_group( name=group_name, description='test group' ) self.assertTrue( self.operator_cloud.grant_role( role_name, group=group['id'], system='all' ) ) assignments = self.operator_cloud.list_role_assignments( {'role': role['id'], 'group': group['id'], 'system': 'all'} ) self.assertIsInstance(assignments, list) self.assertEqual(1, len(assignments)) self.assertTrue( self.operator_cloud.revoke_role( role_name, group=group['id'], system='all' ) ) assignments = self.operator_cloud.list_role_assignments( {'role': role['id'], 'group': group['id'], 'system': 'all'} ) self.assertIsInstance(assignments, list) self.assertEqual(0, len(assignments)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_image.py0000664000175000017500000001531600000000000025331 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_compute ---------------------------------- Functional tests for image methods. """ import filecmp import os import tempfile from openstack.tests.functional import base class TestImage(base.BaseFunctionalTest): def test_create_image(self): test_image = tempfile.NamedTemporaryFile(delete=False) test_image.write(b'\0' * 1024 * 1024) test_image.close() image_name = self.getUniqueString('image') try: self.user_cloud.create_image( name=image_name, filename=test_image.name, disk_format='raw', container_format='bare', min_disk=10, min_ram=1024, tags=['custom'], wait=True, ) finally: self.user_cloud.delete_image(image_name, wait=True) def test_download_image(self): test_image = tempfile.NamedTemporaryFile(delete=False) self.addCleanup(os.remove, test_image.name) test_image.write(b'\0' * 1024 * 1024) test_image.close() image_name = self.getUniqueString('image') self.user_cloud.create_image( name=image_name, filename=test_image.name, disk_format='raw', container_format='bare', min_disk=10, min_ram=1024, wait=True, ) self.addCleanup(self.user_cloud.delete_image, image_name, wait=True) output = os.path.join(tempfile.gettempdir(), self.getUniqueString()) self.user_cloud.download_image(image_name, output) self.addCleanup(os.remove, output) self.assertTrue( filecmp.cmp(test_image.name, output), "Downloaded contents don't match created image", ) def test_create_image_skip_duplicate(self): test_image = tempfile.NamedTemporaryFile(delete=False) test_image.write(b'\0' * 1024 * 1024) test_image.close() image_name = self.getUniqueString('image') try: first_image = self.user_cloud.create_image( name=image_name, filename=test_image.name, disk_format='raw', container_format='bare', min_disk=10, min_ram=1024, validate_checksum=True, wait=True, ) second_image = self.user_cloud.create_image( name=image_name, filename=test_image.name, disk_format='raw', container_format='bare', min_disk=10, min_ram=1024, validate_checksum=True, wait=True, ) self.assertEqual(first_image.id, second_image.id) finally: self.user_cloud.delete_image(image_name, wait=True) def test_create_image_force_duplicate(self): test_image = tempfile.NamedTemporaryFile(delete=False) test_image.write(b'\0' * 1024 * 1024) test_image.close() image_name = self.getUniqueString('image') first_image = None second_image = None try: first_image = self.user_cloud.create_image( name=image_name, filename=test_image.name, disk_format='raw', container_format='bare', min_disk=10, min_ram=1024, wait=True, ) second_image = self.user_cloud.create_image( name=image_name, filename=test_image.name, disk_format='raw', container_format='bare', min_disk=10, min_ram=1024, allow_duplicates=True, wait=True, ) self.assertNotEqual(first_image.id, second_image.id) finally: if first_image: self.user_cloud.delete_image(first_image.id, wait=True) if second_image: self.user_cloud.delete_image(second_image.id, wait=True) def test_create_image_update_properties(self): test_image = tempfile.NamedTemporaryFile(delete=False) test_image.write(b'\0' * 1024 * 1024) test_image.close() image_name = self.getUniqueString('image') try: image = self.user_cloud.create_image( name=image_name, filename=test_image.name, disk_format='raw', container_format='bare', min_disk=10, min_ram=1024, wait=True, ) self.user_cloud.update_image_properties( image=image, name=image_name, foo='bar' ) image = self.user_cloud.get_image(image_name) self.assertIn('foo', image.properties) self.assertEqual(image.properties['foo'], 'bar') finally: self.user_cloud.delete_image(image_name, wait=True) def test_create_image_without_filename(self): image_name = self.getUniqueString('image') image = self.user_cloud.create_image( name=image_name, disk_format='raw', container_format='bare', min_disk=10, min_ram=1024, allow_duplicates=True, wait=False, ) self.assertEqual(image_name, image.name) self.user_cloud.delete_image(image.id, wait=True) def test_get_image_by_id(self): test_image = tempfile.NamedTemporaryFile(delete=False) test_image.write(b'\0' * 1024 * 1024) test_image.close() image_name = self.getUniqueString('image') try: image = self.user_cloud.create_image( name=image_name, filename=test_image.name, disk_format='raw', container_format='bare', min_disk=10, min_ram=1024, wait=True, ) image = self.user_cloud.get_image_by_id(image.id) self.assertEqual(image_name, image.name) self.assertEqual('raw', image.disk_format) finally: self.user_cloud.delete_image(image_name, wait=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_inventory.py0000664000175000017500000000633600000000000026306 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_inventory ---------------------------------- Functional tests for inventory methods. """ from openstack.cloud import inventory from openstack.tests.functional import base class TestInventory(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") # This needs to use an admin account, otherwise a public IP # is not allocated from devstack. self.inventory = inventory.OpenStackInventory(cloud='devstack-admin') self.server_name = self.getUniqueString('inventory') self.addCleanup(self._cleanup_server) server = self.operator_cloud.create_server( name=self.server_name, image=self.image, flavor=self.flavor, wait=True, auto_ip=True, network='public', ) self.server_id = server['id'] def _cleanup_server(self): self.user_cloud.delete_server(self.server_id, wait=True) def _test_host_content(self, host): self.assertEqual(host['image']['id'], self.image.id) self.assertIsInstance(host['volumes'], list) self.assertIsInstance(host['metadata'], dict) self.assertIn('interface_ip', host) self.assertIn('ram', host['flavor']) def _test_expanded_host_content(self, host): self.assertEqual(host['image']['name'], self.image.name) self.assertEqual(host['flavor']['name'], self.flavor.name) def test_get_host(self): host = self.inventory.get_host(self.server_id) self.assertIsNotNone(host) self.assertEqual(host['name'], self.server_name) self._test_host_content(host) self._test_expanded_host_content(host) host_found = False for host in self.inventory.list_hosts(): if host['id'] == self.server_id: host_found = True self._test_host_content(host) self.assertTrue(host_found) def test_get_host_no_detail(self): host = self.inventory.get_host(self.server_id, expand=False) self.assertIsNotNone(host) self.assertEqual(host['name'], self.server_name) self.assertEqual(host['image']['id'], self.image.id) self.assertNotIn('links', host['image']) self.assertNotIn('name', host['name']) self.assertIn('ram', host['flavor']) host_found = False for host in self.inventory.list_hosts(expand=False): if host['id'] == self.server_id: host_found = True self._test_host_content(host) self.assertTrue(host_found) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_keypairs.py0000664000175000017500000000457000000000000026076 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_keypairs ---------------------------------- Functional tests for keypairs methods """ from openstack.tests import fakes from openstack.tests.functional import base class TestKeypairs(base.BaseFunctionalTest): def test_create_and_delete(self): '''Test creating and deleting keypairs functionality''' name = self.getUniqueString('keypair') self.addCleanup(self.user_cloud.delete_keypair, name) keypair = self.user_cloud.create_keypair(name=name) self.assertEqual(keypair['name'], name) self.assertIsNotNone(keypair['public_key']) self.assertIsNotNone(keypair['private_key']) self.assertIsNotNone(keypair['fingerprint']) self.assertEqual(keypair['type'], 'ssh') keypairs = self.user_cloud.list_keypairs() self.assertIn(name, [k['name'] for k in keypairs]) self.user_cloud.delete_keypair(name) keypairs = self.user_cloud.list_keypairs() self.assertNotIn(name, [k['name'] for k in keypairs]) def test_create_and_delete_with_key(self): '''Test creating and deleting keypairs functionality''' name = self.getUniqueString('keypair') self.addCleanup(self.user_cloud.delete_keypair, name) keypair = self.user_cloud.create_keypair( name=name, public_key=fakes.FAKE_PUBLIC_KEY ) self.assertEqual(keypair['name'], name) self.assertIsNotNone(keypair['public_key']) self.assertIsNone(keypair['private_key']) self.assertIsNotNone(keypair['fingerprint']) self.assertEqual(keypair['type'], 'ssh') keypairs = self.user_cloud.list_keypairs() self.assertIn(name, [k['name'] for k in keypairs]) self.user_cloud.delete_keypair(name) keypairs = self.user_cloud.list_keypairs() self.assertNotIn(name, [k['name'] for k in keypairs]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_limits.py0000664000175000017500000000406500000000000025547 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_limits ---------------------------------- Functional tests for limits method """ from openstack.compute.v2 import limits as _limits from openstack.tests.functional import base class TestUsage(base.BaseFunctionalTest): def test_get_our_compute_limits(self): '''Test quotas functionality''' limits = self.user_cloud.get_compute_limits() self.assertIsNotNone(limits) self.assertIsInstance(limits, _limits.AbsoluteLimits) self.assertIsNotNone(limits.server_meta) self.assertIsNotNone(limits.image_meta) def test_get_other_compute_limits(self): '''Test quotas functionality''' if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") limits = self.operator_cloud.get_compute_limits('demo') self.assertIsNotNone(limits) self.assertTrue(hasattr(limits, 'server_meta')) # Test normalize limits self.assertFalse(hasattr(limits, 'maxImageMeta')) def test_get_our_volume_limits(self): '''Test quotas functionality''' limits = self.user_cloud.get_volume_limits() self.assertIsNotNone(limits) self.assertFalse(hasattr(limits, 'maxTotalVolumes')) def test_get_other_volume_limits(self): '''Test quotas functionality''' if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") limits = self.operator_cloud.get_volume_limits('demo') self.assertFalse(hasattr(limits, 'maxTotalVolumes')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_magnum_services.py0000664000175000017500000000260600000000000027434 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_magnum_services -------------------- Functional tests for services method. """ from openstack.tests.functional import base class TestMagnumServices(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.user_cloud.has_service( 'container-infrastructure-management' ): self.skipTest('Container service not supported by cloud') def test_magnum_services(self): '''Test magnum services functionality''' # Test that we can list services services = self.operator_cloud.list_magnum_services() self.assertEqual(1, len(services)) self.assertEqual(services[0]['id'], 1) self.assertEqual('up', services[0]['state']) self.assertEqual('magnum-conductor', services[0]['binary']) self.assertGreater(services[0]['report_count'], 0) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_network.py0000664000175000017500000001244600000000000025741 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_network ---------------------------------- Functional tests for network methods. """ from openstack import exceptions from openstack.tests.functional import base class TestNetwork(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") if not self.operator_cloud.has_service('network'): self.skipTest('Network service not supported by cloud') self.network_name = self.getUniqueString('network') self.addCleanup(self._cleanup_networks) def _cleanup_networks(self): exception_list = list() for network in self.operator_cloud.list_networks(): if network['name'].startswith(self.network_name): try: self.operator_cloud.delete_network(network['name']) except Exception as e: exception_list.append(str(e)) continue if exception_list: raise exceptions.SDKException('\n'.join(exception_list)) def test_create_network_basic(self): net1 = self.operator_cloud.create_network(name=self.network_name) self.assertIn('id', net1) self.assertEqual(self.network_name, net1['name']) self.assertFalse(net1['shared']) self.assertFalse(net1['router:external']) self.assertTrue(net1['admin_state_up']) self.assertTrue(net1['port_security_enabled']) def test_get_network_by_id(self): net1 = self.operator_cloud.create_network(name=self.network_name) self.assertIn('id', net1) self.assertEqual(self.network_name, net1['name']) self.assertFalse(net1['shared']) self.assertFalse(net1['router:external']) self.assertTrue(net1['admin_state_up']) ret_net1 = self.operator_cloud.get_network_by_id(net1.id) self.assertIn('id', ret_net1) self.assertEqual(self.network_name, ret_net1['name']) self.assertFalse(ret_net1['shared']) self.assertFalse(ret_net1['router:external']) self.assertTrue(ret_net1['admin_state_up']) def test_create_network_advanced(self): net1 = self.operator_cloud.create_network( name=self.network_name, shared=True, external=True, admin_state_up=False, ) self.assertIn('id', net1) self.assertEqual(self.network_name, net1['name']) self.assertTrue(net1['router:external']) self.assertTrue(net1['shared']) self.assertFalse(net1['admin_state_up']) def test_create_network_provider_flat(self): existing_public = self.operator_cloud.search_networks( filters={'provider:network_type': 'flat'} ) if existing_public: self.skipTest('Physical network already allocated') net1 = self.operator_cloud.create_network( name=self.network_name, shared=True, provider={ 'physical_network': 'public', 'network_type': 'flat', }, ) self.assertIn('id', net1) self.assertEqual(self.network_name, net1['name']) self.assertEqual('flat', net1['provider:network_type']) self.assertEqual('public', net1['provider:physical_network']) self.assertIsNone(net1['provider:segmentation_id']) def test_create_network_port_security_disabled(self): net1 = self.operator_cloud.create_network( name=self.network_name, port_security_enabled=False, ) self.assertIn('id', net1) self.assertEqual(self.network_name, net1['name']) self.assertTrue(net1['admin_state_up']) self.assertFalse(net1['shared']) self.assertFalse(net1['router:external']) self.assertFalse(net1['port_security_enabled']) def test_list_networks_filtered(self): net1 = self.operator_cloud.create_network(name=self.network_name) self.assertIsNotNone(net1) net2 = self.operator_cloud.create_network( name=self.network_name + 'other' ) self.assertIsNotNone(net2) match = self.operator_cloud.list_networks( filters=dict(name=self.network_name) ) self.assertEqual(1, len(match)) self.assertEqual(net1['name'], match[0]['name']) def test_update_network(self): net = self.operator_cloud.create_network(name=self.network_name) self.assertEqual(net.name, self.network_name) new_name = self.getUniqueString('network') net = self.operator_cloud.update_network(net.id, name=new_name) self.addCleanup(self.operator_cloud.delete_network, new_name) self.assertNotEqual(net.name, self.network_name) self.assertEqual(net.name, new_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_object.py0000664000175000017500000001565700000000000025525 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_object ---------------------------------- Functional tests for object methods. """ import random import string import tempfile from testtools import content from openstack.tests.functional import base class TestObject(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.user_cloud.has_service('object-store'): self.skipTest('Object service not supported by cloud') def test_create_object(self): '''Test uploading small and large files.''' container_name = self.getUniqueString('container') self.addDetail('container', content.text_content(container_name)) self.addCleanup(self.user_cloud.delete_container, container_name) self.user_cloud.create_container(container_name) container = self.user_cloud.get_container(container_name) self.assertEqual(container_name, container.name) self.assertEqual( [], self.user_cloud.list_containers(prefix='somethin') ) sizes = ( (64 * 1024, 1), # 64K, one segment (64 * 1024, 5), # 64MB, 5 segments ) for size, nseg in sizes: segment_size = int(round(size / nseg)) with tempfile.NamedTemporaryFile() as fake_file: fake_content = ''.join( random.SystemRandom().choice( string.ascii_uppercase + string.digits ) for _ in range(size) ).encode('latin-1') fake_file.write(fake_content) fake_file.flush() name = 'test-%d' % size self.addCleanup( self.user_cloud.delete_object, container_name, name ) self.user_cloud.create_object( container_name, name, fake_file.name, segment_size=segment_size, metadata={'foo': 'bar'}, ) self.assertFalse( self.user_cloud.is_object_stale( container_name, name, fake_file.name ) ) self.assertEqual( 'bar', self.user_cloud.get_object_metadata(container_name, name)[ 'foo' ], ) self.user_cloud.update_object( container=container_name, name=name, metadata={'testk': 'testv'}, ) self.assertEqual( 'testv', self.user_cloud.get_object_metadata(container_name, name)[ 'testk' ], ) self.assertIsNotNone( self.user_cloud.get_object(container_name, name) ) self.assertEqual( name, self.user_cloud.list_objects(container_name)[0]['name'] ) self.assertEqual( [], self.user_cloud.list_objects(container_name, prefix='abc') ) self.assertTrue( self.user_cloud.delete_object(container_name, name) ) self.assertEqual([], self.user_cloud.list_objects(container_name)) self.assertEqual( container_name, self.user_cloud.get_container(container_name).name ) self.user_cloud.delete_container(container_name) def test_download_object_to_file(self): '''Test uploading small and large files.''' container_name = self.getUniqueString('container') self.addDetail('container', content.text_content(container_name)) self.addCleanup(self.user_cloud.delete_container, container_name) self.user_cloud.create_container(container_name) self.assertEqual( container_name, self.user_cloud.list_containers()[0]['name'] ) sizes = ( (64 * 1024, 1), # 64K, one segment (64 * 1024, 5), # 64MB, 5 segments ) for size, nseg in sizes: fake_content = b'' segment_size = int(round(size / nseg)) with tempfile.NamedTemporaryFile() as fake_file: fake_content = ''.join( random.SystemRandom().choice( string.ascii_uppercase + string.digits ) for _ in range(size) ).encode('latin-1') fake_file.write(fake_content) fake_file.flush() name = 'test-%d' % size self.addCleanup( self.user_cloud.delete_object, container_name, name ) self.user_cloud.create_object( container_name, name, fake_file.name, segment_size=segment_size, metadata={'foo': 'bar'}, ) self.assertFalse( self.user_cloud.is_object_stale( container_name, name, fake_file.name ) ) self.assertEqual( 'bar', self.user_cloud.get_object_metadata(container_name, name)[ 'foo' ], ) self.user_cloud.update_object( container=container_name, name=name, metadata={'testk': 'testv'}, ) self.assertEqual( 'testv', self.user_cloud.get_object_metadata(container_name, name)[ 'testk' ], ) with tempfile.NamedTemporaryFile() as fake_file: self.user_cloud.get_object( container_name, name, outfile=fake_file.name ) downloaded_content = open(fake_file.name, 'rb').read() self.assertEqual(fake_content, downloaded_content) self.assertEqual( name, self.user_cloud.list_objects(container_name)[0]['name'] ) self.assertTrue( self.user_cloud.delete_object(container_name, name) ) self.assertEqual([], self.user_cloud.list_objects(container_name)) self.assertEqual( container_name, self.user_cloud.list_containers()[0]['name'] ) self.user_cloud.delete_container(container_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_port.py0000664000175000017500000001221300000000000025224 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test_port ---------------------------------- Functional tests for port resource. """ import random import string from openstack import exceptions from openstack.tests.functional import base class TestPort(base.BaseFunctionalTest): def setUp(self): super().setUp() # Skip Neutron tests if neutron is not present if not self.user_cloud.has_service('network'): self.skipTest('Network service not supported by cloud') net_name = self.getUniqueString('CloudPortName') self.net = self.user_cloud.network.create_network(name=net_name) self.addCleanup(self.user_cloud.network.delete_network, self.net.id) # Generate a unique port name to allow concurrent tests self.new_port_name = 'test_' + ''.join( random.choice(string.ascii_lowercase) for _ in range(5) ) self.addCleanup(self._cleanup_ports) def _cleanup_ports(self): exception_list = list() for p in self.user_cloud.list_ports(): if p['name'].startswith(self.new_port_name): try: self.user_cloud.delete_port(name_or_id=p['id']) except Exception as e: # We were unable to delete this port, let's try with next exception_list.append(str(e)) continue if exception_list: # Raise an error: we must make users aware that something went # wrong raise exceptions.SDKException('\n'.join(exception_list)) def test_create_port(self): port_name = self.new_port_name + '_create' port = self.user_cloud.create_port( network_id=self.net.id, name=port_name ) self.assertIsInstance(port, dict) self.assertIn('id', port) self.assertEqual(port.get('name'), port_name) def test_get_port(self): port_name = self.new_port_name + '_get' port = self.user_cloud.create_port( network_id=self.net.id, name=port_name ) self.assertIsInstance(port, dict) self.assertIn('id', port) self.assertEqual(port.get('name'), port_name) updated_port = self.user_cloud.get_port(name_or_id=port['id']) # extra_dhcp_opts is added later by Neutron... if 'extra_dhcp_opts' in updated_port and 'extra_dhcp_opts' not in port: del updated_port['extra_dhcp_opts'] self.assertEqual(port, updated_port) def test_get_port_by_id(self): port_name = self.new_port_name + '_get_by_id' port = self.user_cloud.create_port( network_id=self.net.id, name=port_name ) self.assertIsInstance(port, dict) self.assertIn('id', port) self.assertEqual(port.get('name'), port_name) updated_port = self.user_cloud.get_port_by_id(port['id']) # extra_dhcp_opts is added later by Neutron... if 'extra_dhcp_opts' in updated_port and 'extra_dhcp_opts' not in port: del updated_port['extra_dhcp_opts'] self.assertEqual(port, updated_port) def test_update_port(self): port_name = self.new_port_name + '_update' new_port_name = port_name + '_new' self.user_cloud.create_port(network_id=self.net.id, name=port_name) port = self.user_cloud.update_port( name_or_id=port_name, name=new_port_name ) self.assertIsInstance(port, dict) self.assertEqual(port.get('name'), new_port_name) updated_port = self.user_cloud.get_port(name_or_id=port['id']) self.assertEqual(port.get('name'), new_port_name) port.pop('revision_number', None) port.pop('revision_number', None) port.pop('updated_at', None) port.pop('updated_at', None) updated_port.pop('revision_number', None) updated_port.pop('revision_number', None) updated_port.pop('updated_at', None) updated_port.pop('updated_at', None) self.assertEqual(port, updated_port) def test_delete_port(self): port_name = self.new_port_name + '_delete' port = self.user_cloud.create_port( network_id=self.net.id, name=port_name ) self.assertIsInstance(port, dict) self.assertIn('id', port) self.assertEqual(port.get('name'), port_name) updated_port = self.user_cloud.get_port(name_or_id=port['id']) self.assertIsNotNone(updated_port) self.user_cloud.delete_port(name_or_id=port_name) updated_port = self.user_cloud.get_port(name_or_id=port['id']) self.assertIsNone(updated_port) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_project.py0000664000175000017500000001161000000000000025706 0ustar00zuulzuul00000000000000# Copyright (c) 2016 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test_project ---------------------------------- Functional tests for project resource. """ import pprint from openstack import exceptions from openstack.tests.functional import base class TestProject(base.KeystoneBaseFunctionalTest): def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") self.new_project_name = self.getUniqueString('project') self.addCleanup(self._cleanup_projects) def _cleanup_projects(self): exception_list = list() for p in self.operator_cloud.list_projects(): if p['name'].startswith(self.new_project_name): try: self.operator_cloud.delete_project(p['id']) except Exception as e: exception_list.append(str(e)) continue if exception_list: raise exceptions.SDKException('\n'.join(exception_list)) def test_create_project(self): project_name = self.new_project_name + '_create' params = { 'name': project_name, 'description': 'test_create_project', } if self.identity_version == '3': params['domain_id'] = self.operator_cloud.get_domain('default')[ 'id' ] project = self.operator_cloud.create_project(**params) self.assertIsNotNone(project) self.assertEqual(project_name, project['name']) self.assertEqual('test_create_project', project['description']) user_id = self.operator_cloud.current_user_id # Grant the current user access to the project self.assertTrue( self.operator_cloud.grant_role( 'member', user=user_id, project=project['id'], wait=True ) ) self.addCleanup( self.operator_cloud.revoke_role, 'member', user=user_id, project=project['id'], wait=True, ) new_cloud = self.operator_cloud.connect_as_project(project) self.add_info_on_exception( 'new_cloud_config', pprint.pformat(new_cloud.config.config) ) location = new_cloud.current_location self.assertEqual(project_name, location['project']['name']) def test_update_project(self): project_name = self.new_project_name + '_update' params = { 'name': project_name, 'description': 'test_update_project', 'enabled': True, } if self.identity_version == '3': params['domain_id'] = self.operator_cloud.get_domain('default')[ 'id' ] project = self.operator_cloud.create_project(**params) updated_project = self.operator_cloud.update_project( project_name, enabled=False, description='new' ) self.assertIsNotNone(updated_project) self.assertEqual(project['id'], updated_project['id']) self.assertEqual(project['name'], updated_project['name']) self.assertEqual(updated_project['description'], 'new') self.assertTrue(project['enabled']) self.assertFalse(updated_project['enabled']) # Revert the description and verify the project is still disabled updated_project = self.operator_cloud.update_project( project_name, description=params['description'] ) self.assertIsNotNone(updated_project) self.assertEqual(project['id'], updated_project['id']) self.assertEqual(project['name'], updated_project['name']) self.assertEqual( project['description'], updated_project['description'] ) self.assertTrue(project['enabled']) self.assertFalse(updated_project['enabled']) def test_delete_project(self): project_name = self.new_project_name + '_delete' params = {'name': project_name} if self.identity_version == '3': params['domain_id'] = self.operator_cloud.get_domain('default')[ 'id' ] project = self.operator_cloud.create_project(**params) self.assertIsNotNone(project) self.assertTrue(self.operator_cloud.delete_project(project['id'])) def test_delete_project_not_found(self): self.assertFalse(self.operator_cloud.delete_project('doesNotExist')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_project_cleanup.py0000664000175000017500000003466400000000000027433 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_project_cleanup ---------------------------------- Functional tests for project cleanup methods. """ import queue from openstack.network.v2 import network as _network from openstack import resource from openstack.tests.functional import base class TestProjectCleanup(base.BaseFunctionalTest): _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_CLEANUP' def setUp(self): super().setUp() if not self.user_cloud_alt: self.skipTest("Alternate demo cloud is required for this test") self.conn = self.user_cloud_alt self.network_name = self.getUniqueString('network') def _create_network_resources(self): conn = self.conn self.net = conn.network.create_network( name=self.network_name, ) self.subnet = conn.network.create_subnet( name=self.getUniqueString('subnet'), network_id=self.net.id, cidr='192.169.1.0/24', ip_version=4, ) self.router = conn.network.create_router( name=self.getUniqueString('router') ) conn.network.add_interface_to_router( self.router.id, subnet_id=self.subnet.id ) def test_cleanup(self): self._create_network_resources() status_queue: queue.Queue[resource.Resource] = queue.Queue() # First round - check no resources are old enough self.conn.project_cleanup( dry_run=True, wait_timeout=120, status_queue=status_queue, filters={'created_at': '2000-01-01'}, ) self.assertTrue(status_queue.empty()) # Second round - resource evaluation function return false, ensure # nothing identified self.conn.project_cleanup( dry_run=True, wait_timeout=120, status_queue=status_queue, filters={'created_at': '2200-01-01'}, resource_evaluation_fn=lambda x, y, z: False, ) self.assertTrue(status_queue.empty()) # Third round - filters set too low self.conn.project_cleanup( dry_run=True, wait_timeout=120, status_queue=status_queue, filters={'created_at': '2200-01-01'}, ) objects = [] while not status_queue.empty(): objects.append(status_queue.get()) # At least known networks should be identified net_names = list(obj.name for obj in objects) self.assertIn(self.network_name, net_names) # Fourth round - dry run with no filters, ensure everything identified self.conn.project_cleanup( dry_run=True, wait_timeout=120, status_queue=status_queue ) objects = [] while not status_queue.empty(): objects.append(status_queue.get()) net_names = list(obj.name for obj in objects) self.assertIn(self.network_name, net_names) # Ensure network still exists net = self.conn.network.get_network(self.net.id) self.assertEqual(net.name, self.net.name) # Last round - do a real cleanup self.conn.project_cleanup( dry_run=False, wait_timeout=600, status_queue=status_queue ) objects = [] while not status_queue.empty(): objects.append(status_queue.get()) nets = self.conn.network.networks() net_names = list(obj.name for obj in nets) # Since we might not have enough privs to drop all nets - ensure # we do not have our known one self.assertNotIn(self.network_name, net_names) def test_block_storage_cleanup(self): if not self.user_cloud.has_service('object-store'): self.skipTest('Object service is requred, but not available') status_queue: queue.Queue[resource.Resource] = queue.Queue() vol = self.conn.block_storage.create_volume(name='vol1', size='1') self.conn.block_storage.wait_for_status(vol) s1 = self.conn.block_storage.create_snapshot(volume_id=vol.id) self.conn.block_storage.wait_for_status(s1) b1 = self.conn.block_storage.create_backup(volume_id=vol.id) self.conn.block_storage.wait_for_status(b1) b2 = self.conn.block_storage.create_backup( volume_id=vol.id, is_incremental=True, snapshot_id=s1.id ) self.conn.block_storage.wait_for_status(b2) b3 = self.conn.block_storage.create_backup( volume_id=vol.id, is_incremental=True, snapshot_id=s1.id ) self.conn.block_storage.wait_for_status(b3) # First round - check no resources are old enough self.conn.project_cleanup( dry_run=True, wait_timeout=120, status_queue=status_queue, filters={'created_at': '2000-01-01'}, ) self.assertTrue(status_queue.empty()) # Second round - resource evaluation function return false, ensure # nothing identified self.conn.project_cleanup( dry_run=True, wait_timeout=120, status_queue=status_queue, filters={'created_at': '2200-01-01'}, resource_evaluation_fn=lambda x, y, z: False, ) self.assertTrue(status_queue.empty()) # Third round - filters set too low self.conn.project_cleanup( dry_run=True, wait_timeout=120, status_queue=status_queue, filters={'created_at': '2200-01-01'}, ) objects = [] while not status_queue.empty(): objects.append(status_queue.get()) # At least known networks should be identified volumes = list(obj.id for obj in objects) self.assertIn(vol.id, volumes) # Fourth round - dry run with no filters, ensure everything identified self.conn.project_cleanup( dry_run=True, wait_timeout=120, status_queue=status_queue ) objects = [] while not status_queue.empty(): objects.append(status_queue.get()) vol_ids = list(obj.id for obj in objects) self.assertIn(vol.id, vol_ids) # Ensure volume still exists vol_check = self.conn.block_storage.get_volume(vol.id) self.assertEqual(vol.name, vol_check.name) # Last round - do a real cleanup self.conn.project_cleanup( dry_run=False, wait_timeout=600, status_queue=status_queue ) # Ensure no backups remain self.assertEqual(0, len(list(self.conn.block_storage.backups()))) # Ensure no snapshots remain self.assertEqual(0, len(list(self.conn.block_storage.snapshots()))) def test_cleanup_swift(self): if not self.user_cloud.has_service('object-store'): self.skipTest('Object service is requred, but not available') status_queue: queue.Queue[resource.Resource] = queue.Queue() self.conn.object_store.create_container('test_cleanup') for i in range(1, 10): self.conn.object_store.create_object( "test_cleanup", f"test{i}", data="test{i}" ) # First round - check no resources are old enough self.conn.project_cleanup( dry_run=True, wait_timeout=120, status_queue=status_queue, filters={'updated_at': '2000-01-01'}, ) self.assertTrue(status_queue.empty()) # Second round - filters set too low self.conn.project_cleanup( dry_run=True, wait_timeout=120, status_queue=status_queue, filters={'updated_at': '2200-01-01'}, ) objects = [] while not status_queue.empty(): objects.append(status_queue.get()) # At least known objects should be identified obj_names = list(obj.name for obj in objects) self.assertIn('test1', obj_names) # Ensure object still exists obj = self.conn.object_store.get_object("test1", "test_cleanup") self.assertIsNotNone(obj) # Last round - do a real cleanup self.conn.project_cleanup( dry_run=False, wait_timeout=600, status_queue=status_queue ) objects.clear() while not status_queue.empty(): objects.append(status_queue.get()) self.assertIsNone(self.conn.get_container('test_container')) def test_cleanup_vpnaas(self): if not list(self.conn.network.service_providers(service_type="VPN")): self.skipTest("VPNaaS plugin is requred, but not available") status_queue: queue.Queue[resource.Resource] = queue.Queue() # Find available external networks and use one for network in self.conn.network.networks(): if network.is_router_external: external_network: _network.Network = network break else: self.skipTest("External network is required, but not available") # Create left network resources network_left = self.conn.network.create_network(name="network_left") subnet_left = self.conn.network.create_subnet( name="subnet_left", network_id=network_left.id, cidr="192.168.1.0/24", ip_version=4, ) router_left = self.conn.network.create_router(name="router_left") self.conn.network.add_interface_to_router( router=router_left.id, subnet_id=subnet_left.id ) router_left = self.conn.network.update_router( router_left, external_gateway_info={"network_id": external_network.id}, ) # Create right network resources network_right = self.conn.network.create_network(name="network_right") subnet_right = self.conn.network.create_subnet( name="subnet_right", network_id=network_right.id, cidr="192.168.2.0/24", ip_version=4, ) router_right = self.conn.network.create_router(name="router_right") self.conn.network.add_interface_to_router( router=router_right.id, subnet_id=subnet_right.id ) router_right = self.conn.network.update_router( router_right, external_gateway_info={"network_id": external_network.id}, ) # Create VPNaaS resources ike_policy = self.conn.network.create_vpn_ike_policy(name="ike_policy") ipsec_policy = self.conn.network.create_vpn_ipsec_policy( name="ipsec_policy" ) vpn_service = self.conn.network.create_vpn_service( name="vpn_service", router_id=router_left.id ) ep_group_local = self.conn.network.create_vpn_endpoint_group( name="endpoint_group_local", type="subnet", endpoints=[subnet_left.id], ) ep_group_peer = self.conn.network.create_vpn_endpoint_group( name="endpoint_group_peer", type="cidr", endpoints=[subnet_right.cidr], ) router_right_ip = router_right.external_gateway_info[ 'external_fixed_ips' ][0]['ip_address'] ipsec_site_conn = self.conn.network.create_vpn_ipsec_site_connection( name="ipsec_site_connection", vpnservice_id=vpn_service.id, ikepolicy_id=ike_policy.id, ipsecpolicy_id=ipsec_policy.id, local_ep_group_id=ep_group_local.id, peer_ep_group_id=ep_group_peer.id, psk="test", peer_address=router_right_ip, peer_id=router_right_ip, ) # First round - check no resources are old enough self.conn.project_cleanup( dry_run=True, wait_timeout=120, status_queue=status_queue, filters={'created_at': '2000-01-01'}, ) self.assertTrue(status_queue.empty()) # Second round - resource evaluation function return false, ensure # nothing identified self.conn.project_cleanup( dry_run=True, wait_timeout=120, status_queue=status_queue, filters={'created_at': '2200-01-01'}, resource_evaluation_fn=lambda x, y, z: False, ) self.assertTrue(status_queue.empty()) # Third round - filters set too low self.conn.project_cleanup( dry_run=True, wait_timeout=120, status_queue=status_queue, filters={'created_at': '2200-01-01'}, ) objects = [] while not status_queue.empty(): objects.append(status_queue.get()) # VPN resources do not have a created_at property # Check for the network instead resource_ids = list(obj.id for obj in objects) self.assertIn(network_left.id, resource_ids) # Fourth round - dry run with no filters, ensure everything identified self.conn.project_cleanup( dry_run=True, wait_timeout=120, status_queue=status_queue ) objects = [] while not status_queue.empty(): objects.append(status_queue.get()) resource_ids = list(obj.id for obj in objects) self.assertIn(ipsec_site_conn.id, resource_ids) # Ensure vpn resources still exist site_conn_check = self.conn.network.get_vpn_ipsec_site_connection( ipsec_site_conn.id ) self.assertEqual(site_conn_check.name, ipsec_site_conn.name) # Last round - do a real cleanup self.conn.project_cleanup( dry_run=False, wait_timeout=600, status_queue=status_queue ) # Ensure no VPN resources remain self.assertEqual(0, len(list(self.conn.network.vpn_ike_policies()))) self.assertEqual(0, len(list(self.conn.network.vpn_ipsec_policies()))) self.assertEqual(0, len(list(self.conn.network.vpn_services()))) self.assertEqual(0, len(list(self.conn.network.vpn_endpoint_groups()))) self.assertEqual( 0, len(list(self.conn.network.vpn_ipsec_site_connections())) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_qos_bandwidth_limit_rule.py0000664000175000017500000001006300000000000031314 0ustar00zuulzuul00000000000000# Copyright 2017 OVH SAS # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_qos_bandwidth_limit_rule ---------------------------------- Functional tests for QoS bandwidth limit methods. """ from openstack import exceptions from openstack.tests.functional import base class TestQosBandwidthLimitRule(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") if not self.operator_cloud.has_service('network'): self.skipTest('Network service not supported by cloud') if not self.operator_cloud._has_neutron_extension('qos'): self.skipTest('QoS network extension not supported by cloud') policy_name = self.getUniqueString('qos_policy') self.policy = self.operator_cloud.create_qos_policy(name=policy_name) self.addCleanup(self._cleanup_qos_policy) def _cleanup_qos_policy(self): try: self.operator_cloud.delete_qos_policy(self.policy['id']) except Exception as e: raise exceptions.SDKException(e) def test_qos_bandwidth_limit_rule_lifecycle(self): max_kbps = 1500 max_burst_kbps = 500 updated_max_kbps = 2000 # Create bw limit rule rule = self.operator_cloud.create_qos_bandwidth_limit_rule( self.policy['id'], max_kbps=max_kbps, max_burst_kbps=max_burst_kbps ) self.assertIn('id', rule) self.assertEqual(max_kbps, rule['max_kbps']) self.assertEqual(max_burst_kbps, rule['max_burst_kbps']) # Now try to update rule updated_rule = self.operator_cloud.update_qos_bandwidth_limit_rule( self.policy['id'], rule['id'], max_kbps=updated_max_kbps ) self.assertIn('id', updated_rule) self.assertEqual(updated_max_kbps, updated_rule['max_kbps']) self.assertEqual(max_burst_kbps, updated_rule['max_burst_kbps']) # List rules from policy policy_rules = self.operator_cloud.list_qos_bandwidth_limit_rules( self.policy['id'] ) self.assertEqual([updated_rule], policy_rules) # Delete rule self.operator_cloud.delete_qos_bandwidth_limit_rule( self.policy['id'], updated_rule['id'] ) # Check if there is no rules in policy policy_rules = self.operator_cloud.list_qos_bandwidth_limit_rules( self.policy['id'] ) self.assertEqual([], policy_rules) def test_create_qos_bandwidth_limit_rule_direction(self): if not self.operator_cloud._has_neutron_extension( 'qos-bw-limit-direction' ): self.skipTest( "'qos-bw-limit-direction' network extension " "not supported by cloud" ) max_kbps = 1500 direction = "ingress" updated_direction = "egress" # Create bw limit rule rule = self.operator_cloud.create_qos_bandwidth_limit_rule( self.policy['id'], max_kbps=max_kbps, direction=direction ) self.assertIn('id', rule) self.assertEqual(max_kbps, rule['max_kbps']) self.assertEqual(direction, rule['direction']) # Now try to update direction in rule updated_rule = self.operator_cloud.update_qos_bandwidth_limit_rule( self.policy['id'], rule['id'], direction=updated_direction ) self.assertIn('id', updated_rule) self.assertEqual(max_kbps, updated_rule['max_kbps']) self.assertEqual(updated_direction, updated_rule['direction']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_qos_dscp_marking_rule.py0000664000175000017500000000542000000000000030614 0ustar00zuulzuul00000000000000# Copyright 2017 OVH SAS # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_qos_dscp_marking_rule ---------------------------------- Functional tests for QoS DSCP marking rule methods. """ from openstack import exceptions from openstack.tests.functional import base class TestQosDscpMarkingRule(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") if not self.operator_cloud.has_service('network'): self.skipTest('Network service not supported by cloud') if not self.operator_cloud._has_neutron_extension('qos'): self.skipTest('QoS network extension not supported by cloud') policy_name = self.getUniqueString('qos_policy') self.policy = self.operator_cloud.create_qos_policy(name=policy_name) self.addCleanup(self._cleanup_qos_policy) def _cleanup_qos_policy(self): try: self.operator_cloud.delete_qos_policy(self.policy['id']) except Exception as e: raise exceptions.SDKException(e) def test_qos_dscp_marking_rule_lifecycle(self): dscp_mark = 16 updated_dscp_mark = 32 # Create DSCP marking rule rule = self.operator_cloud.create_qos_dscp_marking_rule( self.policy['id'], dscp_mark=dscp_mark ) self.assertIn('id', rule) self.assertEqual(dscp_mark, rule['dscp_mark']) # Now try to update rule updated_rule = self.operator_cloud.update_qos_dscp_marking_rule( self.policy['id'], rule['id'], dscp_mark=updated_dscp_mark ) self.assertIn('id', updated_rule) self.assertEqual(updated_dscp_mark, updated_rule['dscp_mark']) # List rules from policy policy_rules = self.operator_cloud.list_qos_dscp_marking_rules( self.policy['id'] ) self.assertEqual([updated_rule], policy_rules) # Delete rule self.operator_cloud.delete_qos_dscp_marking_rule( self.policy['id'], updated_rule['id'] ) # Check if there is no rules in policy policy_rules = self.operator_cloud.list_qos_dscp_marking_rules( self.policy['id'] ) self.assertEqual([], policy_rules) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_qos_minimum_bandwidth_rule.py0000664000175000017500000000545400000000000031661 0ustar00zuulzuul00000000000000# Copyright 2017 OVH SAS # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_qos_minumum_bandwidth_rule ---------------------------------- Functional tests for QoS minimum bandwidth methods. """ from openstack import exceptions from openstack.tests.functional import base class TestQosMinimumBandwidthRule(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") if not self.operator_cloud.has_service('network'): self.skipTest('Network service not supported by cloud') if not self.operator_cloud._has_neutron_extension('qos'): self.skipTest('QoS network extension not supported by cloud') policy_name = self.getUniqueString('qos_policy') self.policy = self.operator_cloud.create_qos_policy(name=policy_name) self.addCleanup(self._cleanup_qos_policy) def _cleanup_qos_policy(self): try: self.operator_cloud.delete_qos_policy(self.policy['id']) except Exception as e: raise exceptions.SDKException(e) def test_qos_minimum_bandwidth_rule_lifecycle(self): min_kbps = 1500 updated_min_kbps = 2000 # Create min bw rule rule = self.operator_cloud.create_qos_minimum_bandwidth_rule( self.policy['id'], min_kbps=min_kbps ) self.assertIn('id', rule) self.assertEqual(min_kbps, rule['min_kbps']) # Now try to update rule updated_rule = self.operator_cloud.update_qos_minimum_bandwidth_rule( self.policy['id'], rule['id'], min_kbps=updated_min_kbps ) self.assertIn('id', updated_rule) self.assertEqual(updated_min_kbps, updated_rule['min_kbps']) # List rules from policy policy_rules = self.operator_cloud.list_qos_minimum_bandwidth_rules( self.policy['id'] ) self.assertEqual([updated_rule], policy_rules) # Delete rule self.operator_cloud.delete_qos_minimum_bandwidth_rule( self.policy['id'], updated_rule['id'] ) # Check if there is no rules in policy policy_rules = self.operator_cloud.list_qos_minimum_bandwidth_rules( self.policy['id'] ) self.assertEqual([], policy_rules) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_qos_policy.py0000664000175000017500000001002300000000000026416 0ustar00zuulzuul00000000000000# Copyright 2017 OVH SAS # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_qos_policy ---------------------------------- Functional tests for QoS policies methods. """ from openstack import exceptions from openstack.tests.functional import base class TestQosPolicy(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") if not self.operator_cloud.has_service('network'): self.skipTest('Network service not supported by cloud') if not self.operator_cloud._has_neutron_extension('qos'): self.skipTest('QoS network extension not supported by cloud') self.policy_name = self.getUniqueString('qos_policy') self.addCleanup(self._cleanup_policies) def _cleanup_policies(self): exception_list = list() for policy in self.operator_cloud.list_qos_policies(): if policy['name'].startswith(self.policy_name): try: self.operator_cloud.delete_qos_policy(policy['id']) except Exception as e: exception_list.append(str(e)) continue if exception_list: raise exceptions.SDKException('\n'.join(exception_list)) def test_create_qos_policy_basic(self): policy = self.operator_cloud.create_qos_policy(name=self.policy_name) self.assertIn('id', policy) self.assertEqual(self.policy_name, policy['name']) self.assertFalse(policy['is_shared']) self.assertFalse(policy['is_default']) def test_create_qos_policy_shared(self): policy = self.operator_cloud.create_qos_policy( name=self.policy_name, shared=True ) self.assertIn('id', policy) self.assertEqual(self.policy_name, policy['name']) self.assertTrue(policy['is_shared']) self.assertFalse(policy['is_default']) def test_create_qos_policy_default(self): if not self.operator_cloud._has_neutron_extension('qos-default'): self.skipTest( "'qos-default' network extension not supported by cloud" ) policy = self.operator_cloud.create_qos_policy( name=self.policy_name, default=True ) self.assertIn('id', policy) self.assertEqual(self.policy_name, policy['name']) self.assertFalse(policy['is_shared']) self.assertTrue(policy['is_default']) def test_update_qos_policy(self): policy = self.operator_cloud.create_qos_policy(name=self.policy_name) self.assertEqual(self.policy_name, policy['name']) self.assertFalse(policy['is_shared']) self.assertFalse(policy['is_default']) updated_policy = self.operator_cloud.update_qos_policy( policy['id'], shared=True, default=True ) self.assertEqual(self.policy_name, updated_policy['name']) self.assertTrue(updated_policy['is_shared']) self.assertTrue(updated_policy['is_default']) def test_list_qos_policies_filtered(self): policy1 = self.operator_cloud.create_qos_policy(name=self.policy_name) self.assertIsNotNone(policy1) policy2 = self.operator_cloud.create_qos_policy( name=self.policy_name + 'other' ) self.assertIsNotNone(policy2) match = self.operator_cloud.list_qos_policies( filters=dict(name=self.policy_name) ) self.assertEqual(1, len(match)) self.assertEqual(policy1['name'], match[0]['name']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_quotas.py0000664000175000017500000001063600000000000025563 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_quotas ---------------------------------- Functional tests for quotas methods. """ from openstack.tests.functional import base class TestComputeQuotas(base.BaseFunctionalTest): def test_get_quotas(self): '''Test quotas functionality''' self.user_cloud.get_compute_quotas(self.user_cloud.current_project_id) def test_set_quotas(self): '''Test quotas functionality''' if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") quotas = self.operator_cloud.get_compute_quotas('demo') cores = quotas['cores'] self.operator_cloud.set_compute_quotas('demo', cores=cores + 1) self.assertEqual( cores + 1, self.operator_cloud.get_compute_quotas('demo')['cores'] ) self.operator_cloud.delete_compute_quotas('demo') self.assertEqual( cores, self.operator_cloud.get_compute_quotas('demo')['cores'] ) class TestVolumeQuotas(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.user_cloud.has_service('volume'): self.skipTest('volume service not supported by cloud') def test_get_quotas(self): '''Test get quotas functionality''' self.user_cloud.get_volume_quotas(self.user_cloud.current_project_id) def test_set_quotas(self): '''Test set quotas functionality''' if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") quotas = self.operator_cloud.get_volume_quotas('demo') volumes = quotas['volumes'] self.operator_cloud.set_volume_quotas('demo', volumes=volumes + 1) self.assertEqual( volumes + 1, self.operator_cloud.get_volume_quotas('demo')['volumes'], ) self.operator_cloud.delete_volume_quotas('demo') self.assertEqual( volumes, self.operator_cloud.get_volume_quotas('demo')['volumes'] ) class TestNetworkQuotas(base.BaseFunctionalTest): def test_get_quotas(self): '''Test get quotas functionality''' self.user_cloud.get_network_quotas(self.user_cloud.current_project_id) def test_quotas(self): '''Test quotas functionality''' if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") if not self.operator_cloud.has_service('network'): self.skipTest('network service not supported by cloud') quotas = self.operator_cloud.get_network_quotas('demo') network = quotas['networks'] self.operator_cloud.set_network_quotas('demo', networks=network + 1) self.assertEqual( network + 1, self.operator_cloud.get_network_quotas('demo')['networks'], ) self.operator_cloud.delete_network_quotas('demo') self.assertEqual( network, self.operator_cloud.get_network_quotas('demo')['networks'] ) def test_get_quotas_details(self): if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") if not self.operator_cloud.has_service('network'): self.skipTest('network service not supported by cloud') quotas = [ 'floating_ips', 'networks', 'ports', 'rbac_policies', 'routers', 'subnets', 'subnet_pools', 'security_group_rules', 'security_groups', ] expected_keys = ['limit', 'used', 'reserved'] '''Test getting details about quota usage''' quota_details = self.operator_cloud.get_network_quotas( 'demo', details=True ) for quota in quotas: quota_val = quota_details[quota] if quota_val: for expected_key in expected_keys: self.assertIn(expected_key, quota_val) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_range_search.py0000664000175000017500000001405200000000000026664 0ustar00zuulzuul00000000000000# Copyright (c) 2016 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack import exceptions from openstack.tests.functional import base class TestRangeSearch(base.BaseFunctionalTest): def _filter_m1_flavors(self, results): """The m1 flavors are the original devstack flavors""" new_results = [] for flavor in results: if flavor['name'].startswith("m1."): new_results.append(flavor) return new_results def test_range_search_bad_range(self): flavors = self.user_cloud.list_flavors(get_extra=False) self.assertRaises( exceptions.SDKException, self.user_cloud.range_search, flavors, {"ram": "<1a0"}, ) def test_range_search_exact(self): flavors = self.user_cloud.list_flavors(get_extra=False) result = self.user_cloud.range_search(flavors, {"ram": "4096"}) self.assertIsInstance(result, list) # should only be 1 m1 flavor with 4096 ram result = self._filter_m1_flavors(result) self.assertEqual(1, len(result)) self.assertEqual("m1.medium", result[0]['name']) def test_range_search_min(self): flavors = self.user_cloud.list_flavors(get_extra=False) result = self.user_cloud.range_search(flavors, {"ram": "MIN"}) self.assertIsInstance(result, list) self.assertEqual(1, len(result)) # older devstack does not have cirros256 self.assertIn(result[0]['name'], ('cirros256', 'm1.tiny')) def test_range_search_max(self): flavors = self.user_cloud.list_flavors(get_extra=False) result = self.user_cloud.range_search(flavors, {"ram": "MAX"}) self.assertIsInstance(result, list) self.assertEqual(1, len(result)) self.assertEqual("m1.xlarge", result[0]['name']) def test_range_search_lt(self): flavors = self.user_cloud.list_flavors(get_extra=False) result = self.user_cloud.range_search(flavors, {"ram": "<1024"}) self.assertIsInstance(result, list) # should only be 1 m1 flavor with <1024 ram result = self._filter_m1_flavors(result) self.assertEqual(1, len(result)) self.assertEqual("m1.tiny", result[0]['name']) def test_range_search_gt(self): flavors = self.user_cloud.list_flavors(get_extra=False) result = self.user_cloud.range_search(flavors, {"ram": ">4096"}) self.assertIsInstance(result, list) # should only be 2 m1 flavors with >4096 ram result = self._filter_m1_flavors(result) self.assertEqual(2, len(result)) flavor_names = [r['name'] for r in result] self.assertIn("m1.large", flavor_names) self.assertIn("m1.xlarge", flavor_names) def test_range_search_le(self): flavors = self.user_cloud.list_flavors(get_extra=False) result = self.user_cloud.range_search(flavors, {"ram": "<=4096"}) self.assertIsInstance(result, list) # should only be 3 m1 flavors with <=4096 ram result = self._filter_m1_flavors(result) self.assertEqual(3, len(result)) flavor_names = [r['name'] for r in result] self.assertIn("m1.tiny", flavor_names) self.assertIn("m1.small", flavor_names) self.assertIn("m1.medium", flavor_names) def test_range_search_ge(self): flavors = self.user_cloud.list_flavors(get_extra=False) result = self.user_cloud.range_search(flavors, {"ram": ">=4096"}) self.assertIsInstance(result, list) # should only be 3 m1 flavors with >=4096 ram result = self._filter_m1_flavors(result) self.assertEqual(3, len(result)) flavor_names = [r['name'] for r in result] self.assertIn("m1.medium", flavor_names) self.assertIn("m1.large", flavor_names) self.assertIn("m1.xlarge", flavor_names) def test_range_search_multi_1(self): flavors = self.user_cloud.list_flavors(get_extra=False) result = self.user_cloud.range_search( flavors, {"ram": "MIN", "vcpus": "MIN"} ) self.assertIsInstance(result, list) self.assertEqual(1, len(result)) # older devstack does not have cirros256 self.assertIn(result[0]['name'], ('cirros256', 'm1.tiny')) def test_range_search_multi_2(self): flavors = self.user_cloud.list_flavors(get_extra=False) result = self.user_cloud.range_search( flavors, {"ram": "<1024", "vcpus": "MIN"} ) self.assertIsInstance(result, list) result = self._filter_m1_flavors(result) self.assertEqual(1, len(result)) flavor_names = [r['name'] for r in result] self.assertIn("m1.tiny", flavor_names) def test_range_search_multi_3(self): flavors = self.user_cloud.list_flavors(get_extra=False) result = self.user_cloud.range_search( flavors, {"ram": ">=4096", "vcpus": "<6"} ) self.assertIsInstance(result, list) result = self._filter_m1_flavors(result) self.assertEqual(2, len(result)) flavor_names = [r['name'] for r in result] self.assertIn("m1.medium", flavor_names) self.assertIn("m1.large", flavor_names) def test_range_search_multi_4(self): flavors = self.user_cloud.list_flavors(get_extra=False) result = self.user_cloud.range_search( flavors, {"ram": ">=4096", "vcpus": "MAX"} ) self.assertIsInstance(result, list) self.assertEqual(1, len(result)) # This is the only result that should have max vcpu self.assertEqual("m1.xlarge", result[0]['name']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_recordset.py0000664000175000017500000001410400000000000026233 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_recordset ---------------------------------- Functional tests for recordset methods. """ import random import string from testtools import content from openstack.tests.functional import base class TestRecordset(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.user_cloud.has_service('dns'): self.skipTest('dns service not supported by cloud') def test_recordsets_with_zone_id(self): '''Test DNS recordsets functionality''' sub = ''.join(random.choice(string.ascii_lowercase) for _ in range(6)) zone = '%s.example2.net.' % sub email = 'test@example2.net' name = 'www.%s' % zone type_ = 'a' description = 'Test recordset' ttl = 3600 records = ['192.168.1.1'] self.addDetail('zone', content.text_content(zone)) self.addDetail('recordset', content.text_content(name)) # Create a zone to hold the tested recordset zone_obj = self.user_cloud.create_zone(name=zone, email=email) # Test we can create a recordset and we get it returned created_recordset = self.user_cloud.create_recordset( zone_obj['id'], name, type_, records, description, ttl ) self.addCleanup(self.cleanup, zone, created_recordset['id']) self.assertEqual(created_recordset['zone_id'], zone_obj['id']) self.assertEqual(created_recordset['name'], name) self.assertEqual(created_recordset['type'], type_.upper()) self.assertEqual(created_recordset['records'], records) self.assertEqual(created_recordset['description'], description) self.assertEqual(created_recordset['ttl'], ttl) # Test that we can list recordsets recordsets = self.user_cloud.list_recordsets( zone_obj['id'], ) self.assertIsNotNone(recordsets) # Test we get the same recordset with the get_recordset method get_recordset = self.user_cloud.get_recordset( zone_obj['id'], created_recordset['id'] ) self.assertEqual(get_recordset['id'], created_recordset['id']) # Test we can update a field on the recordset and only that field # is updated updated_recordset = self.user_cloud.update_recordset( zone_obj['id'], created_recordset['id'], ttl=7200 ) self.assertEqual(updated_recordset['id'], created_recordset['id']) self.assertEqual(updated_recordset['name'], name) self.assertEqual(updated_recordset['type'], type_.upper()) self.assertEqual(updated_recordset['records'], records) self.assertEqual(updated_recordset['description'], description) self.assertEqual(updated_recordset['ttl'], 7200) # Test we can delete and get True returned deleted_recordset = self.user_cloud.delete_recordset( zone, created_recordset['id'] ) self.assertTrue(deleted_recordset) def test_recordsets_with_zone_name(self): '''Test DNS recordsets functionality''' sub = ''.join(random.choice(string.ascii_lowercase) for _ in range(6)) zone = '%s.example2.net.' % sub email = 'test@example2.net' name = 'www.%s' % zone type_ = 'a' description = 'Test recordset' ttl = 3600 records = ['192.168.1.1'] self.addDetail('zone', content.text_content(zone)) self.addDetail('recordset', content.text_content(name)) # Create a zone to hold the tested recordset zone_obj = self.user_cloud.create_zone(name=zone, email=email) # Test we can create a recordset and we get it returned created_recordset = self.user_cloud.create_recordset( zone, name, type_, records, description, ttl ) self.addCleanup(self.cleanup, zone, created_recordset['id']) self.assertEqual(created_recordset['zone_id'], zone_obj['id']) self.assertEqual(created_recordset['name'], name) self.assertEqual(created_recordset['type'], type_.upper()) self.assertEqual(created_recordset['records'], records) self.assertEqual(created_recordset['description'], description) self.assertEqual(created_recordset['ttl'], ttl) # Test that we can list recordsets recordsets = self.user_cloud.list_recordsets(zone) self.assertIsNotNone(recordsets) # Test we get the same recordset with the get_recordset method get_recordset = self.user_cloud.get_recordset( zone, created_recordset['id'] ) self.assertEqual(get_recordset['id'], created_recordset['id']) # Test we can update a field on the recordset and only that field # is updated updated_recordset = self.user_cloud.update_recordset( zone_obj['id'], created_recordset['id'], ttl=7200 ) self.assertEqual(updated_recordset['id'], created_recordset['id']) self.assertEqual(updated_recordset['name'], name) self.assertEqual(updated_recordset['type'], type_.upper()) self.assertEqual(updated_recordset['records'], records) self.assertEqual(updated_recordset['description'], description) self.assertEqual(updated_recordset['ttl'], 7200) # Test we can delete and get True returned deleted_recordset = self.user_cloud.delete_recordset( zone, created_recordset['id'] ) self.assertTrue(deleted_recordset) def cleanup(self, zone_name, recordset_id): self.user_cloud.delete_recordset(zone_name, recordset_id) self.user_cloud.delete_zone(zone_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_router.py0000664000175000017500000003340200000000000025563 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_router ---------------------------------- Functional tests for router methods. """ import ipaddress from openstack import exceptions from openstack.tests.functional import base EXPECTED_TOPLEVEL_FIELDS = ( 'id', 'name', 'is_admin_state_up', 'external_gateway_info', 'project_id', 'routes', 'status', ) EXPECTED_GW_INFO_FIELDS = ('network_id', 'enable_snat', 'external_fixed_ips') class TestRouter(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud required for this test") if not self.operator_cloud.has_service('network'): self.skipTest('Network service not supported by cloud') self.router_prefix = self.getUniqueString('router') self.network_prefix = self.getUniqueString('network') self.subnet_prefix = self.getUniqueString('subnet') # NOTE(Shrews): Order matters! self.addCleanup(self._cleanup_networks) self.addCleanup(self._cleanup_subnets) self.addCleanup(self._cleanup_routers) def _cleanup_routers(self): exception_list = list() for router in self.operator_cloud.list_routers(): if router['name'].startswith(self.router_prefix): try: self.operator_cloud.delete_router(router['name']) except Exception as e: exception_list.append(str(e)) continue if exception_list: raise exceptions.SDKException('\n'.join(exception_list)) def _cleanup_networks(self): exception_list = list() for network in self.operator_cloud.list_networks(): if network['name'].startswith(self.network_prefix): try: self.operator_cloud.delete_network(network['name']) except Exception as e: exception_list.append(str(e)) continue if exception_list: raise exceptions.SDKException('\n'.join(exception_list)) def _cleanup_subnets(self): exception_list = list() for subnet in self.operator_cloud.list_subnets(): if subnet['name'].startswith(self.subnet_prefix): try: self.operator_cloud.delete_subnet(subnet['id']) except Exception as e: exception_list.append(str(e)) continue if exception_list: raise exceptions.SDKException('\n'.join(exception_list)) def test_create_router_basic(self): net1_name = self.network_prefix + '_net1' net1 = self.operator_cloud.create_network( name=net1_name, external=True ) router_name = self.router_prefix + '_create_basic' router = self.operator_cloud.create_router( name=router_name, admin_state_up=True, ext_gateway_net_id=net1['id'], ) for field in EXPECTED_TOPLEVEL_FIELDS: self.assertIn(field, router) ext_gw_info = router['external_gateway_info'] for field in EXPECTED_GW_INFO_FIELDS: self.assertIn(field, ext_gw_info) self.assertEqual(router_name, router['name']) self.assertEqual('ACTIVE', router['status']) self.assertEqual(net1['id'], ext_gw_info['network_id']) self.assertTrue(ext_gw_info['enable_snat']) def test_create_router_project(self): project = self.operator_cloud.get_project('demo') self.assertIsNotNone(project) proj_id = project['id'] net1_name = self.network_prefix + '_net1' net1 = self.operator_cloud.create_network( name=net1_name, external=True, project_id=proj_id ) router_name = self.router_prefix + '_create_project' router = self.operator_cloud.create_router( name=router_name, admin_state_up=True, ext_gateway_net_id=net1['id'], project_id=proj_id, ) for field in EXPECTED_TOPLEVEL_FIELDS: self.assertIn(field, router) ext_gw_info = router['external_gateway_info'] for field in EXPECTED_GW_INFO_FIELDS: self.assertIn(field, ext_gw_info) self.assertEqual(router_name, router['name']) self.assertEqual('ACTIVE', router['status']) self.assertEqual(proj_id, router['tenant_id']) self.assertEqual(net1['id'], ext_gw_info['network_id']) self.assertTrue(ext_gw_info['enable_snat']) def _create_and_verify_advanced_router( self, external_cidr, external_gateway_ip=None ): # external_cidr must be passed in as unicode (u'') # NOTE(Shrews): The arguments are needed because these tests # will run in parallel and we want to make sure that each test # is using different resources to prevent race conditions. net1_name = self.network_prefix + '_net1' sub1_name = self.subnet_prefix + '_sub1' net1 = self.operator_cloud.create_network( name=net1_name, external=True ) sub1 = self.operator_cloud.create_subnet( net1['id'], external_cidr, subnet_name=sub1_name, gateway_ip=external_gateway_ip, ) ip_net = ipaddress.IPv4Network(external_cidr) last_ip = str(list(ip_net.hosts())[-1]) router_name = self.router_prefix + '_create_advanced' router = self.operator_cloud.create_router( name=router_name, admin_state_up=False, ext_gateway_net_id=net1['id'], enable_snat=False, ext_fixed_ips=[{'subnet_id': sub1['id'], 'ip_address': last_ip}], ) for field in EXPECTED_TOPLEVEL_FIELDS: self.assertIn(field, router) ext_gw_info = router['external_gateway_info'] for field in EXPECTED_GW_INFO_FIELDS: self.assertIn(field, ext_gw_info) self.assertEqual(router_name, router['name']) self.assertEqual('ACTIVE', router['status']) self.assertFalse(router['admin_state_up']) self.assertEqual(1, len(ext_gw_info['external_fixed_ips'])) self.assertEqual( sub1['id'], ext_gw_info['external_fixed_ips'][0]['subnet_id'] ) self.assertEqual( last_ip, ext_gw_info['external_fixed_ips'][0]['ip_address'] ) return router def test_create_router_advanced(self): self._create_and_verify_advanced_router(external_cidr='10.2.2.0/24') def test_add_remove_router_interface(self): router = self._create_and_verify_advanced_router( external_cidr='10.3.3.0/24' ) net_name = self.network_prefix + '_intnet1' sub_name = self.subnet_prefix + '_intsub1' net = self.operator_cloud.create_network(name=net_name) sub = self.operator_cloud.create_subnet( net['id'], '10.4.4.0/24', subnet_name=sub_name, gateway_ip='10.4.4.1', ) iface = self.operator_cloud.add_router_interface( router, subnet_id=sub['id'] ) self.assertIsNone( self.operator_cloud.remove_router_interface( router, subnet_id=sub['id'] ) ) # Test return values *after* the interface is detached so the # resources we've created can be cleaned up if these asserts fail. self.assertIsNotNone(iface) for key in ('id', 'subnet_id', 'port_id', 'tenant_id'): self.assertIn(key, iface) self.assertEqual(router['id'], iface['id']) self.assertEqual(sub['id'], iface['subnet_id']) def test_list_router_interfaces(self): router = self._create_and_verify_advanced_router( external_cidr='10.5.5.0/24' ) net_name = self.network_prefix + '_intnet1' sub_name = self.subnet_prefix + '_intsub1' net = self.operator_cloud.create_network(name=net_name) sub = self.operator_cloud.create_subnet( net['id'], '10.6.6.0/24', subnet_name=sub_name, gateway_ip='10.6.6.1', ) iface = self.operator_cloud.add_router_interface( router, subnet_id=sub['id'] ) all_ifaces = self.operator_cloud.list_router_interfaces(router) int_ifaces = self.operator_cloud.list_router_interfaces( router, interface_type='internal' ) ext_ifaces = self.operator_cloud.list_router_interfaces( router, interface_type='external' ) self.assertIsNone( self.operator_cloud.remove_router_interface( router, subnet_id=sub['id'] ) ) # Test return values *after* the interface is detached so the # resources we've created can be cleaned up if these asserts fail. self.assertIsNotNone(iface) self.assertEqual(2, len(all_ifaces)) self.assertEqual(1, len(int_ifaces)) self.assertEqual(1, len(ext_ifaces)) ext_fixed_ips = router['external_gateway_info']['external_fixed_ips'] self.assertEqual( ext_fixed_ips[0]['subnet_id'], ext_ifaces[0]['fixed_ips'][0]['subnet_id'], ) self.assertEqual(sub['id'], int_ifaces[0]['fixed_ips'][0]['subnet_id']) def test_update_router_name(self): router = self._create_and_verify_advanced_router( external_cidr='10.7.7.0/24' ) new_name = self.router_prefix + '_update_name' updated = self.operator_cloud.update_router( router['id'], name=new_name ) self.assertIsNotNone(updated) for field in EXPECTED_TOPLEVEL_FIELDS: self.assertIn(field, updated) # Name is the only change we expect self.assertEqual(new_name, updated['name']) # Validate nothing else changed self.assertEqual(router['status'], updated['status']) self.assertEqual(router['admin_state_up'], updated['admin_state_up']) self.assertEqual( router['external_gateway_info'], updated['external_gateway_info'] ) def test_update_router_routes(self): router = self._create_and_verify_advanced_router( external_cidr='10.7.7.0/24' ) routes = [{"destination": "10.7.7.0/24", "nexthop": "10.7.7.99"}] updated = self.operator_cloud.update_router( router['id'], routes=routes ) self.assertIsNotNone(updated) for field in EXPECTED_TOPLEVEL_FIELDS: self.assertIn(field, updated) # Name is the only change we expect self.assertEqual(routes, updated['routes']) # Validate nothing else changed self.assertEqual(router['status'], updated['status']) self.assertEqual(router['admin_state_up'], updated['admin_state_up']) self.assertEqual( router['external_gateway_info'], updated['external_gateway_info'] ) def test_update_router_admin_state(self): router = self._create_and_verify_advanced_router( external_cidr='10.8.8.0/24' ) updated = self.operator_cloud.update_router( router['id'], admin_state_up=True ) self.assertIsNotNone(updated) for field in EXPECTED_TOPLEVEL_FIELDS: self.assertIn(field, updated) # admin_state_up is the only change we expect self.assertTrue(updated['admin_state_up']) self.assertNotEqual( router['admin_state_up'], updated['admin_state_up'] ) # Validate nothing else changed self.assertEqual(router['status'], updated['status']) self.assertEqual(router['name'], updated['name']) self.assertEqual( router['external_gateway_info'], updated['external_gateway_info'] ) def test_update_router_ext_gw_info(self): router = self._create_and_verify_advanced_router( external_cidr='10.9.9.0/24' ) # create a new subnet existing_net_id = router['external_gateway_info']['network_id'] sub_name = self.subnet_prefix + '_update' sub = self.operator_cloud.create_subnet( existing_net_id, '10.10.10.0/24', subnet_name=sub_name, gateway_ip='10.10.10.1', ) updated = self.operator_cloud.update_router( router['id'], ext_gateway_net_id=existing_net_id, ext_fixed_ips=[ {'subnet_id': sub['id'], 'ip_address': '10.10.10.77'} ], ) self.assertIsNotNone(updated) for field in EXPECTED_TOPLEVEL_FIELDS: self.assertIn(field, updated) # external_gateway_info is the only change we expect ext_gw_info = updated['external_gateway_info'] self.assertEqual(1, len(ext_gw_info['external_fixed_ips'])) self.assertEqual( sub['id'], ext_gw_info['external_fixed_ips'][0]['subnet_id'] ) self.assertEqual( '10.10.10.77', ext_gw_info['external_fixed_ips'][0]['ip_address'] ) # Validate nothing else changed self.assertEqual(router['status'], updated['status']) self.assertEqual(router['name'], updated['name']) self.assertEqual(router['admin_state_up'], updated['admin_state_up']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_security_groups.py0000664000175000017500000000633200000000000027513 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_security_groups ---------------------------------- Functional tests for security_groups resource. """ from openstack.tests.functional import base class TestSecurityGroups(base.BaseFunctionalTest): def test_create_list_security_groups(self): sg1 = self.user_cloud.create_security_group( name="sg1", description="sg1" ) self.addCleanup(self.user_cloud.delete_security_group, sg1['id']) if self.user_cloud.has_service('network'): # Neutron defaults to all_tenants=1 when admin sg_list = self.user_cloud.list_security_groups() self.assertIn(sg1['id'], [sg['id'] for sg in sg_list]) else: # Nova does not list all tenants by default sg_list = self.operator_cloud.list_security_groups() def test_create_list_security_groups_operator(self): if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") sg1 = self.user_cloud.create_security_group( name="sg1", description="sg1" ) self.addCleanup(self.user_cloud.delete_security_group, sg1['id']) sg2 = self.operator_cloud.create_security_group( name="sg2", description="sg2" ) self.addCleanup(self.operator_cloud.delete_security_group, sg2['id']) if self.user_cloud.has_service('network'): # Neutron defaults to all_tenants=1 when admin sg_list = self.operator_cloud.list_security_groups() self.assertIn(sg1['id'], [sg['id'] for sg in sg_list]) # Filter by tenant_id (filtering by project_id won't work with # Keystone V2) sg_list = self.operator_cloud.list_security_groups( filters={'tenant_id': self.user_cloud.current_project_id} ) self.assertIn(sg1['id'], [sg['id'] for sg in sg_list]) self.assertNotIn(sg2['id'], [sg['id'] for sg in sg_list]) else: # Nova does not list all tenants by default sg_list = self.operator_cloud.list_security_groups() self.assertIn(sg2['id'], [sg['id'] for sg in sg_list]) self.assertNotIn(sg1['id'], [sg['id'] for sg in sg_list]) sg_list = self.operator_cloud.list_security_groups( filters={'all_tenants': 1} ) self.assertIn(sg1['id'], [sg['id'] for sg in sg_list]) def test_get_security_group_by_id(self): sg = self.user_cloud.create_security_group(name='sg', description='sg') self.addCleanup(self.user_cloud.delete_security_group, sg['id']) ret_sg = self.user_cloud.get_security_group_by_id(sg['id']) self.assertEqual(sg, ret_sg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_server_group.py0000664000175000017500000000264300000000000026770 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_server_group ---------------------------------- Functional tests for server_group resource. """ from openstack.tests.functional import base class TestServerGroup(base.BaseFunctionalTest): def test_server_group(self): server_group_name = self.getUniqueString() self.addCleanup(self.cleanup, server_group_name) server_group = self.user_cloud.create_server_group( server_group_name, ['affinity'] ) server_group_ids = [ v['id'] for v in self.user_cloud.list_server_groups() ] self.assertIn(server_group['id'], server_group_ids) self.user_cloud.delete_server_group(server_group_name) def cleanup(self, server_group_name): server_group = self.user_cloud.get_server_group(server_group_name) if server_group: self.user_cloud.delete_server_group(server_group['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_services.py0000664000175000017500000001253500000000000026072 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test_services ---------------------------------- Functional tests for service resource. """ import random import string from openstack.cloud import exc from openstack import exceptions from openstack.tests.functional import base class TestServices(base.KeystoneBaseFunctionalTest): service_attributes = ['id', 'name', 'type', 'description'] def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") # Generate a random name for services in this test self.new_service_name = 'test_' + ''.join( random.choice(string.ascii_lowercase) for _ in range(5) ) self.addCleanup(self._cleanup_services) def _cleanup_services(self): exception_list = list() for s in self.operator_cloud.list_services(): if s['name'] is not None and s['name'].startswith( self.new_service_name ): try: self.operator_cloud.delete_service(name_or_id=s['id']) except Exception as e: # We were unable to delete a service, let's try with next exception_list.append(str(e)) continue if exception_list: # Raise an error: we must make users aware that something went # wrong raise exceptions.SDKException('\n'.join(exception_list)) def test_create_service(self): service = self.operator_cloud.create_service( name=self.new_service_name + '_create', type='test_type', description='this is a test description', ) self.assertIsNotNone(service.get('id')) def test_update_service(self): ver = self.operator_cloud.config.get_api_version('identity') if ver.startswith('2'): # NOTE(SamYaple): Update service only works with v3 api self.assertRaises( exc.OpenStackCloudUnavailableFeature, self.operator_cloud.update_service, 'service_id', name='new name', ) else: service = self.operator_cloud.create_service( name=self.new_service_name + '_create', type='test_type', description='this is a test description', enabled=True, ) new_service = self.operator_cloud.update_service( service.id, name=self.new_service_name + '_update', description='this is an updated description', enabled=False, ) self.assertEqual( new_service.name, self.new_service_name + '_update' ) self.assertEqual( new_service.description, 'this is an updated description' ) self.assertFalse(new_service.is_enabled) self.assertEqual(service.id, new_service.id) def test_list_services(self): service = self.operator_cloud.create_service( name=self.new_service_name + '_list', type='test_type' ) observed_services = self.operator_cloud.list_services() self.assertIsInstance(observed_services, list) found = False for s in observed_services: # Test all attributes are returned if s['id'] == service['id']: self.assertEqual( self.new_service_name + '_list', s.get('name') ) self.assertEqual('test_type', s.get('type')) found = True self.assertTrue(found, msg='new service not found in service list!') def test_delete_service_by_name(self): # Test delete by name service = self.operator_cloud.create_service( name=self.new_service_name + '_delete_by_name', type='test_type' ) self.operator_cloud.delete_service(name_or_id=service['name']) observed_services = self.operator_cloud.list_services() found = False for s in observed_services: if s['id'] == service['id']: found = True break self.assertEqual(False, found, message='service was not deleted!') def test_delete_service_by_id(self): # Test delete by id service = self.operator_cloud.create_service( name=self.new_service_name + '_delete_by_id', type='test_type' ) self.operator_cloud.delete_service(name_or_id=service['id']) observed_services = self.operator_cloud.list_services() found = False for s in observed_services: if s['id'] == service['id']: found = True self.assertEqual(False, found, message='service was not deleted!') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_stack.py0000664000175000017500000001307100000000000025350 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_stack ---------------------------------- Functional tests for stack methods. """ import tempfile from openstack import exceptions from openstack.tests import fakes from openstack.tests.functional import base simple_template = '''heat_template_version: 2014-10-16 parameters: length: type: number default: 10 resources: my_rand: type: OS::Heat::RandomString properties: length: {get_param: length} outputs: rand: value: get_attr: [my_rand, value] ''' root_template = '''heat_template_version: 2014-10-16 parameters: length: type: number default: 10 count: type: number default: 5 resources: my_rands: type: OS::Heat::ResourceGroup properties: count: {get_param: count} resource_def: type: My::Simple::Template properties: length: {get_param: length} outputs: rands: value: get_attr: [my_rands, attributes, rand] ''' environment = ''' resource_registry: My::Simple::Template: %s ''' validate_template = '''heat_template_version: asdf-no-such-version ''' class TestStack(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.user_cloud.has_service('orchestration'): self.skipTest('Orchestration service not supported by cloud') def _cleanup_stack(self): self.user_cloud.delete_stack(self.stack_name, wait=True) self.assertIsNone(self.user_cloud.get_stack(self.stack_name)) def test_stack_validation(self): test_template = tempfile.NamedTemporaryFile(delete=False) test_template.write(validate_template.encode('utf-8')) test_template.close() stack_name = self.getUniqueString('validate_template') self.assertRaises( exceptions.SDKException, self.user_cloud.create_stack, name=stack_name, template_file=test_template.name, ) def test_stack_simple(self): test_template = tempfile.NamedTemporaryFile(delete=False) test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8')) test_template.close() self.stack_name = self.getUniqueString('simple_stack') self.addCleanup(self._cleanup_stack) stack = self.user_cloud.create_stack( name=self.stack_name, template_file=test_template.name, wait=True ) # assert expected values in stack self.assertEqual('CREATE_COMPLETE', stack['stack_status']) rand = stack['outputs'][0]['output_value'] self.assertEqual(10, len(rand)) # assert get_stack matches returned create_stack stack = self.user_cloud.get_stack(self.stack_name) self.assertEqual('CREATE_COMPLETE', stack['stack_status']) self.assertEqual(rand, stack['outputs'][0]['output_value']) # assert stack is in list_stacks stacks = self.user_cloud.list_stacks() stack_ids = [s['id'] for s in stacks] self.assertIn(stack['id'], stack_ids) # update with no changes stack = self.user_cloud.update_stack( self.stack_name, template_file=test_template.name, wait=True ) # assert no change in updated stack self.assertEqual('UPDATE_COMPLETE', stack['stack_status']) rand = stack['outputs'][0]['output_value'] self.assertEqual(rand, stack['outputs'][0]['output_value']) # update with changes stack = self.user_cloud.update_stack( self.stack_name, template_file=test_template.name, wait=True, length=12, ) # assert changed output in updated stack stack = self.user_cloud.get_stack(self.stack_name) self.assertEqual('UPDATE_COMPLETE', stack['stack_status']) new_rand = stack['outputs'][0]['output_value'] self.assertNotEqual(rand, new_rand) self.assertEqual(12, len(new_rand)) def test_stack_nested(self): test_template = tempfile.NamedTemporaryFile( suffix='.yaml', delete=False ) test_template.write(root_template.encode('utf-8')) test_template.close() simple_tmpl = tempfile.NamedTemporaryFile(suffix='.yaml', delete=False) simple_tmpl.write(fakes.FAKE_TEMPLATE.encode('utf-8')) simple_tmpl.close() env = tempfile.NamedTemporaryFile(suffix='.yaml', delete=False) expanded_env = environment % simple_tmpl.name env.write(expanded_env.encode('utf-8')) env.close() self.stack_name = self.getUniqueString('nested_stack') self.addCleanup(self._cleanup_stack) stack = self.user_cloud.create_stack( name=self.stack_name, template_file=test_template.name, environment_files=[env.name], wait=True, ) # assert expected values in stack self.assertEqual('CREATE_COMPLETE', stack['stack_status']) rands = stack['outputs'][0]['output_value'] self.assertEqual(['0', '1', '2', '3', '4'], sorted(rands.keys())) for rand in rands.values(): self.assertEqual(10, len(rand)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_users.py0000664000175000017500000001516200000000000025407 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_users ---------------------------------- Functional tests for user methods. """ from openstack import exceptions from openstack.tests.functional import base class TestUsers(base.KeystoneBaseFunctionalTest): def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") self.user_prefix = self.getUniqueString('user') self.addCleanup(self._cleanup_users) def _cleanup_users(self): exception_list = list() for user in self.operator_cloud.list_users(): if user['name'].startswith(self.user_prefix): try: self.operator_cloud.delete_user(user['id']) except Exception as e: exception_list.append(str(e)) continue if exception_list: raise exceptions.SDKException('\n'.join(exception_list)) def _create_user(self, **kwargs): domain_id = None i_ver = self.operator_cloud.config.get_api_version('identity') if i_ver not in ('2', '2.0'): domain = self.operator_cloud.get_domain('default') domain_id = domain['id'] return self.operator_cloud.create_user(domain_id=domain_id, **kwargs) def test_list_users(self): users = self.operator_cloud.list_users() self.assertIsNotNone(users) self.assertNotEqual([], users) def test_get_user(self): user = self.operator_cloud.get_user('admin') self.assertIsNotNone(user) self.assertIn('id', user) self.assertIn('name', user) self.assertEqual('admin', user['name']) def test_search_users(self): users = self.operator_cloud.search_users(filters={'is_enabled': True}) self.assertIsNotNone(users) def test_search_users_jmespath(self): users = self.operator_cloud.search_users(filters="[?enabled]") self.assertIsNotNone(users) def test_create_user(self): user_name = self.user_prefix + '_create' user_email = 'nobody@nowhere.com' user = self._create_user(name=user_name, email=user_email) self.assertIsNotNone(user) self.assertEqual(user_name, user['name']) self.assertEqual(user_email, user['email']) self.assertTrue(user['is_enabled']) def test_delete_user(self): user_name = self.user_prefix + '_delete' user_email = 'nobody@nowhere.com' user = self._create_user(name=user_name, email=user_email) self.assertIsNotNone(user) self.assertTrue(self.operator_cloud.delete_user(user['id'])) def test_delete_user_not_found(self): self.assertFalse(self.operator_cloud.delete_user('does_not_exist')) def test_update_user(self): user_name = self.user_prefix + '_updatev3' user_email = 'nobody@nowhere.com' user = self._create_user(name=user_name, email=user_email) self.assertIsNotNone(user) self.assertTrue(user['is_enabled']) # Pass some keystone v3 params. This should work no matter which # version of keystone we are testing against. new_user = self.operator_cloud.update_user( user['id'], name=user_name + '2', email='somebody@nowhere.com', enabled=False, password='secret', description='', ) self.assertIsNotNone(new_user) self.assertEqual(user['id'], new_user['id']) self.assertEqual(user_name + '2', new_user['name']) self.assertEqual('somebody@nowhere.com', new_user['email']) self.assertFalse(new_user['is_enabled']) def test_update_user_password(self): user_name = self.user_prefix + '_password' user_email = 'nobody@nowhere.com' user = self._create_user( name=user_name, email=user_email, password='old_secret' ) self.assertIsNotNone(user) self.assertTrue(user['enabled']) # This should work for both v2 and v3 new_user = self.operator_cloud.update_user( user['id'], password='new_secret' ) self.assertIsNotNone(new_user) self.assertEqual(user['id'], new_user['id']) self.assertEqual(user_name, new_user['name']) self.assertEqual(user_email, new_user['email']) self.assertTrue(new_user['enabled']) self.assertTrue( self.operator_cloud.grant_role( 'member', user=user['id'], project='demo', wait=True ) ) self.addCleanup( self.operator_cloud.revoke_role, 'member', user=user['id'], project='demo', wait=True, ) new_cloud = self.operator_cloud.connect_as( user_id=user['id'], password='new_secret', project_name='demo' ) self.assertIsNotNone(new_cloud) location = new_cloud.current_location self.assertEqual(location['project']['name'], 'demo') self.assertIsNotNone(new_cloud.service_catalog) def test_users_and_groups(self): i_ver = self.operator_cloud.config.get_api_version('identity') if i_ver in ('2', '2.0'): self.skipTest('Identity service does not support groups') group_name = self.getUniqueString('group') self.addCleanup(self.operator_cloud.delete_group, group_name) # Create a group group = self.operator_cloud.create_group(group_name, 'test group') self.assertIsNotNone(group) # Create a user user_name = self.user_prefix + '_ug' user_email = 'nobody@nowhere.com' user = self._create_user(name=user_name, email=user_email) self.assertIsNotNone(user) # Add the user to the group self.operator_cloud.add_user_to_group(user_name, group_name) self.assertTrue( self.operator_cloud.is_user_in_group(user_name, group_name) ) # Remove them from the group self.operator_cloud.remove_user_from_group(user_name, group_name) self.assertFalse( self.operator_cloud.is_user_in_group(user_name, group_name) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_volume.py0000664000175000017500000001527000000000000025555 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_volume ---------------------------------- Functional tests for block storage methods. """ from fixtures import TimeoutException from testtools import content from openstack import exceptions from openstack.tests.functional import base from openstack import utils class TestVolume(base.BaseFunctionalTest): # Creating and deleting volumes is slow TIMEOUT_SCALING_FACTOR = 1.5 def setUp(self): super().setUp() self.skipTest('Volume functional tests temporarily disabled') if not self.user_cloud.has_service('volume'): self.skipTest('volume service not supported by cloud') def test_volumes(self): '''Test volume and snapshot functionality''' volume_name = self.getUniqueString() snapshot_name = self.getUniqueString() self.addDetail('volume', content.text_content(volume_name)) self.addCleanup(self.cleanup, volume_name, snapshot_name=snapshot_name) volume = self.user_cloud.create_volume( display_name=volume_name, size=1 ) snapshot = self.user_cloud.create_volume_snapshot( volume['id'], display_name=snapshot_name ) ret_volume = self.user_cloud.get_volume_by_id(volume['id']) self.assertEqual(volume['id'], ret_volume['id']) volume_ids = [v['id'] for v in self.user_cloud.list_volumes()] self.assertIn(volume['id'], volume_ids) snapshot_list = self.user_cloud.list_volume_snapshots() snapshot_ids = [s['id'] for s in snapshot_list] self.assertIn(snapshot['id'], snapshot_ids) ret_snapshot = self.user_cloud.get_volume_snapshot_by_id( snapshot['id'] ) self.assertEqual(snapshot['id'], ret_snapshot['id']) self.user_cloud.delete_volume_snapshot(snapshot_name, wait=True) self.user_cloud.delete_volume(volume_name, wait=True) def test_volume_to_image(self): '''Test volume export to image functionality''' volume_name = self.getUniqueString() image_name = self.getUniqueString() self.addDetail('volume', content.text_content(volume_name)) self.addCleanup(self.cleanup, volume_name, image_name=image_name) volume = self.user_cloud.create_volume( display_name=volume_name, size=1 ) image = self.user_cloud.create_image( image_name, volume=volume, wait=True ) volume_ids = [v['id'] for v in self.user_cloud.list_volumes()] self.assertIn(volume['id'], volume_ids) image_list = self.user_cloud.list_images() image_ids = [s['id'] for s in image_list] self.assertIn(image['id'], image_ids) self.user_cloud.delete_image(image_name, wait=True) self.user_cloud.delete_volume(volume_name, wait=True) def cleanup(self, volume, snapshot_name=None, image_name=None): # Need to delete snapshots before volumes if snapshot_name: snapshot = self.user_cloud.get_volume_snapshot(snapshot_name) if snapshot: self.user_cloud.delete_volume_snapshot( snapshot_name, wait=True ) if image_name: image = self.user_cloud.get_image(image_name) if image: self.user_cloud.delete_image(image_name, wait=True) if not isinstance(volume, list): self.user_cloud.delete_volume(volume, wait=True) else: # We have more than one volume to clean up - submit all of the # deletes without wait, then poll until none of them are found # in the volume list anymore for v in volume: self.user_cloud.delete_volume(v, wait=False) try: for count in utils.iterate_timeout( 180, "Timeout waiting for volume cleanup" ): found = False for existing in self.user_cloud.list_volumes(): for v in volume: if v['id'] == existing['id']: found = True break if found: break if not found: break except (exceptions.ResourceTimeout, TimeoutException): # NOTE(slaweq): ups, some volumes are still not removed # so we should try to force delete it once again and move # forward for existing in self.user_cloud.list_volumes(): for v in volume: if v['id'] == existing['id']: self.operator_cloud.delete_volume( v, wait=False, force=True ) def test_list_volumes_pagination(self): '''Test pagination for list volumes functionality''' volumes = [] # the number of created volumes needs to be higher than # CONF.osapi_max_limit but not higher than volume quotas for # the test user in the tenant(default quotas is set to 10) num_volumes = 8 for i in range(num_volumes): name = self.getUniqueString() v = self.user_cloud.create_volume(display_name=name, size=1) volumes.append(v) self.addCleanup(self.cleanup, volumes) result = [] for v in self.user_cloud.list_volumes(): if v['name'] and v['name'].startswith(self.id()): result.append(v['id']) self.assertEqual(sorted([v['id'] for v in volumes]), sorted(result)) def test_update_volume(self): name, desc = self.getUniqueString('name'), self.getUniqueString('desc') self.addCleanup(self.cleanup, name) volume = self.user_cloud.create_volume(1, name=name, description=desc) self.assertEqual(volume.name, name) self.assertEqual(volume.description, desc) new_name = self.getUniqueString('name') volume = self.user_cloud.update_volume(volume.id, name=new_name) self.assertNotEqual(volume.name, name) self.assertEqual(volume.name, new_name) self.assertEqual(volume.description, desc) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_volume_backup.py0000664000175000017500000001205500000000000027100 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestVolume(base.BaseFunctionalTest): # Creating a volume backup is incredibly slow. TIMEOUT_SCALING_FACTOR = 1.5 def setUp(self): super().setUp() self.skipTest('Volume functional tests temporarily disabled') if not self.user_cloud.has_service('volume'): self.skipTest('volume service not supported by cloud') if not self.user_cloud.has_service('object-store'): self.skipTest('volume backups require swift') def test_create_get_delete_volume_backup(self): volume = self.user_cloud.create_volume( display_name=self.getUniqueString(), size=1 ) self.addCleanup(self.user_cloud.delete_volume, volume['id']) backup_name_1 = self.getUniqueString() backup_desc_1 = self.getUniqueString() backup = self.user_cloud.create_volume_backup( volume_id=volume['id'], name=backup_name_1, description=backup_desc_1, wait=True, ) self.assertEqual(backup_name_1, backup['name']) backup = self.user_cloud.get_volume_backup(backup['id']) self.assertEqual("available", backup['status']) self.assertEqual(backup_desc_1, backup['description']) self.user_cloud.delete_volume_backup(backup['id'], wait=True) self.assertIsNone(self.user_cloud.get_volume_backup(backup['id'])) def test_create_get_delete_volume_backup_from_snapshot(self): volume = self.user_cloud.create_volume(size=1) snapshot = self.user_cloud.create_volume_snapshot(volume['id']) self.addCleanup(self.user_cloud.delete_volume, volume['id']) self.addCleanup( self.user_cloud.delete_volume_snapshot, snapshot['id'], wait=True ) backup = self.user_cloud.create_volume_backup( volume_id=volume['id'], snapshot_id=snapshot['id'], wait=True ) backup = self.user_cloud.get_volume_backup(backup['id']) self.assertEqual(backup['snapshot_id'], snapshot['id']) self.user_cloud.delete_volume_backup(backup['id'], wait=True) self.assertIsNone(self.user_cloud.get_volume_backup(backup['id'])) def test_create_get_delete_incremental_volume_backup(self): volume = self.user_cloud.create_volume(size=1) self.addCleanup(self.user_cloud.delete_volume, volume['id']) full_backup = self.user_cloud.create_volume_backup( volume_id=volume['id'], wait=True ) incr_backup = self.user_cloud.create_volume_backup( volume_id=volume['id'], incremental=True, wait=True ) full_backup = self.user_cloud.get_volume_backup(full_backup['id']) incr_backup = self.user_cloud.get_volume_backup(incr_backup['id']) self.assertEqual(full_backup['has_dependent_backups'], True) self.assertEqual(incr_backup['is_incremental'], True) self.user_cloud.delete_volume_backup(incr_backup['id'], wait=True) self.user_cloud.delete_volume_backup(full_backup['id'], wait=True) self.assertIsNone(self.user_cloud.get_volume_backup(full_backup['id'])) self.assertIsNone(self.user_cloud.get_volume_backup(incr_backup['id'])) def test_list_volume_backups(self): vol1 = self.user_cloud.create_volume( display_name=self.getUniqueString(), size=1 ) self.addCleanup(self.user_cloud.delete_volume, vol1['id']) # We create 2 volumes to create 2 backups. We could have created 2 # backups from the same volume but taking 2 successive backups seems # to be race-condition prone. And I didn't want to use an ugly sleep() # here. vol2 = self.user_cloud.create_volume( display_name=self.getUniqueString(), size=1 ) self.addCleanup(self.user_cloud.delete_volume, vol2['id']) backup_name_1 = self.getUniqueString() backup = self.user_cloud.create_volume_backup( volume_id=vol1['id'], name=backup_name_1 ) self.addCleanup(self.user_cloud.delete_volume_backup, backup['id']) backup = self.user_cloud.create_volume_backup(volume_id=vol2['id']) self.addCleanup(self.user_cloud.delete_volume_backup, backup['id']) backups = self.user_cloud.list_volume_backups() self.assertEqual(2, len(backups)) backups = self.user_cloud.list_volume_backups( filters={"name": backup_name_1} ) self.assertEqual(1, len(backups)) self.assertEqual(backup_name_1, backups[0]['name']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_volume_type.py0000664000175000017500000001063400000000000026615 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_volume ---------------------------------- Functional tests for block storage methods. """ import testtools from openstack import exceptions from openstack.tests.functional import base class TestVolumeType(base.BaseFunctionalTest): def _assert_project(self, volume_name_or_id, project_id, allowed=True): acls = self.operator_cloud.get_volume_type_access(volume_name_or_id) allowed_projects = [x.get('project_id') for x in acls] self.assertEqual(allowed, project_id in allowed_projects) def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") if not self.user_cloud.has_service('volume'): self.skipTest('volume service not supported by cloud') volume_type = { "name": 'test-volume-type', "description": None, "os-volume-type-access:is_public": False, } self.operator_cloud.block_storage.post( '/types', json={'volume_type': volume_type} ) def tearDown(self): ret = self.operator_cloud.get_volume_type('test-volume-type') if ret.get('id'): self.operator_cloud.block_storage.delete(f'/types/{ret.id}') super().tearDown() def test_list_volume_types(self): volume_types = self.operator_cloud.list_volume_types() self.assertTrue(volume_types) self.assertTrue( any(x for x in volume_types if x.name == 'test-volume-type') ) def test_add_remove_volume_type_access(self): volume_type = self.operator_cloud.get_volume_type('test-volume-type') self.assertEqual('test-volume-type', volume_type.name) self.operator_cloud.add_volume_type_access( 'test-volume-type', self.operator_cloud.current_project_id ) self._assert_project( 'test-volume-type', self.operator_cloud.current_project_id, allowed=True, ) self.operator_cloud.remove_volume_type_access( 'test-volume-type', self.operator_cloud.current_project_id ) self._assert_project( 'test-volume-type', self.operator_cloud.current_project_id, allowed=False, ) def test_add_volume_type_access_missing_project(self): # Project id is not valitaded and it may not exist. self.operator_cloud.add_volume_type_access( 'test-volume-type', '00000000000000000000000000000000' ) self.operator_cloud.remove_volume_type_access( 'test-volume-type', '00000000000000000000000000000000' ) def test_add_volume_type_access_missing_volume(self): with testtools.ExpectedException( exceptions.SDKException, "VolumeType not found.*" ): self.operator_cloud.add_volume_type_access( 'MISSING_VOLUME_TYPE', self.operator_cloud.current_project_id ) def test_remove_volume_type_access_missing_volume(self): with testtools.ExpectedException( exceptions.SDKException, "VolumeType not found.*" ): self.operator_cloud.remove_volume_type_access( 'MISSING_VOLUME_TYPE', self.operator_cloud.current_project_id ) def test_add_volume_type_access_bad_project(self): with testtools.ExpectedException( exceptions.BadRequestException, "Unable to authorize.*" ): self.operator_cloud.add_volume_type_access( 'test-volume-type', 'BAD_PROJECT_ID' ) def test_remove_volume_type_access_missing_project(self): with testtools.ExpectedException( exceptions.NotFoundException, "Unable to revoke.*" ): self.operator_cloud.remove_volume_type_access( 'test-volume-type', '00000000000000000000000000000000' ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/cloud/test_zone.py0000664000175000017500000000603600000000000025221 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_zone ---------------------------------- Functional tests for zone methods. """ from testtools import content from openstack.tests.functional import base class TestZone(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.user_cloud.has_service('dns'): self.skipTest('dns service not supported by cloud') def test_zones(self): '''Test DNS zones functionality''' name = 'example.net.' zone_type = 'primary' email = 'test@example.net' description = 'Test zone' ttl = 3600 masters = None self.addDetail('zone', content.text_content(name)) self.addCleanup(self.cleanup, name) # Test we can create a zone and we get it returned zone = self.user_cloud.create_zone( name=name, zone_type=zone_type, email=email, description=description, ttl=ttl, masters=masters, ) self.assertEqual(zone['name'], name) self.assertEqual(zone['type'], zone_type.upper()) self.assertEqual(zone['email'], email) self.assertEqual(zone['description'], description) self.assertEqual(zone['ttl'], ttl) self.assertEqual(zone['masters'], []) # Test that we can list zones zones = self.user_cloud.list_zones() self.assertIsNotNone(zones) # Test we get the same zone with the get_zone method zone_get = self.user_cloud.get_zone(zone['id']) self.assertEqual(zone_get['id'], zone['id']) # Test the get method also works by name zone_get = self.user_cloud.get_zone(name) self.assertEqual(zone_get['name'], zone['name']) # Test we can update a field on the zone and only that field # is updated zone_update = self.user_cloud.update_zone(zone['id'], ttl=7200) self.assertEqual(zone_update['id'], zone['id']) self.assertEqual(zone_update['name'], zone['name']) self.assertEqual(zone_update['type'], zone['type']) self.assertEqual(zone_update['email'], zone['email']) self.assertEqual(zone_update['description'], zone['description']) self.assertEqual(zone_update['ttl'], 7200) self.assertEqual(zone_update['masters'], zone['masters']) # Test we can delete and get True returned zone_delete = self.user_cloud.delete_zone(zone['id']) self.assertTrue(zone_delete) def cleanup(self, name): self.user_cloud.delete_zone(name) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3533654 openstacksdk-4.0.0/openstack/tests/functional/clustering/0000775000175000017500000000000000000000000023701 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/clustering/__init__.py0000664000175000017500000000000000000000000026000 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/clustering/test_cluster.py0000664000175000017500000001052500000000000026776 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from openstack.clustering.v1 import cluster from openstack.tests.functional import base from openstack.tests.functional.network.v2 import test_network class TestCluster(base.BaseFunctionalTest): _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_CLUSTER' def setUp(self): super().setUp() self.require_service('clustering') self.cidr = '10.99.99.0/16' self.network, self.subnet = test_network.create_network( self.conn, self.getUniqueString(), self.cidr ) self.assertIsNotNone(self.network) profile_attrs = { 'name': self.getUniqueString(), 'spec': { 'type': 'os.nova.server', 'version': 1.0, 'properties': { 'name': self.getUniqueString(), 'flavor': self.flavor.name, 'image': self.image.name, 'networks': [{'network': self.network.id}], }, }, } self.profile = self.conn.clustering.create_profile(**profile_attrs) self.assertIsNotNone(self.profile) self.cluster_name = self.getUniqueString() cluster_spec = { "name": self.cluster_name, "profile_id": self.profile.name, "min_size": 0, "max_size": -1, "desired_capacity": 0, } self.cluster = self.conn.clustering.create_cluster(**cluster_spec) self.conn.clustering.wait_for_status( self.cluster, 'ACTIVE', wait=self._wait_for_timeout ) assert isinstance(self.cluster, cluster.Cluster) def tearDown(self): if self.cluster: self.conn.clustering.delete_cluster(self.cluster.id) self.conn.clustering.wait_for_delete( self.cluster, wait=self._wait_for_timeout ) test_network.delete_network(self.conn, self.network, self.subnet) self.conn.clustering.delete_profile(self.profile) super().tearDown() def test_find(self): sot = self.conn.clustering.find_cluster(self.cluster.id) self.assertEqual(self.cluster.id, sot.id) def test_get(self): sot = self.conn.clustering.get_cluster(self.cluster) self.assertEqual(self.cluster.id, sot.id) def test_list(self): names = [o.name for o in self.conn.clustering.clusters()] self.assertIn(self.cluster_name, names) def test_update(self): new_cluster_name = self.getUniqueString() sot = self.conn.clustering.update_cluster( self.cluster, name=new_cluster_name, profile_only=False ) time.sleep(2) sot = self.conn.clustering.get_cluster(self.cluster) self.assertEqual(new_cluster_name, sot.name) def test_delete(self): cluster_delete_action = self.conn.clustering.delete_cluster( self.cluster.id ) self.conn.clustering.wait_for_delete( self.cluster, wait=self._wait_for_timeout ) action = self.conn.clustering.get_action(cluster_delete_action.id) self.assertEqual(action.target_id, self.cluster.id) self.assertEqual(action.action, 'CLUSTER_DELETE') self.assertEqual(action.status, 'SUCCEEDED') self.cluster = None def test_force_delete(self): cluster_delete_action = self.conn.clustering.delete_cluster( self.cluster.id, False, True ) self.conn.clustering.wait_for_delete( self.cluster, wait=self._wait_for_timeout ) action = self.conn.clustering.get_action(cluster_delete_action.id) self.assertEqual(action.target_id, self.cluster.id) self.assertEqual(action.action, 'CLUSTER_DELETE') self.assertEqual(action.status, 'SUCCEEDED') self.cluster = None ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3533654 openstacksdk-4.0.0/openstack/tests/functional/compute/0000775000175000017500000000000000000000000023176 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/compute/__init__.py0000664000175000017500000000000000000000000025275 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/compute/base.py0000664000175000017500000000130500000000000024461 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class BaseComputeTest(base.BaseFunctionalTest): _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_COMPUTE' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3573673 openstacksdk-4.0.0/openstack/tests/functional/compute/v2/0000775000175000017500000000000000000000000023525 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/compute/v2/__init__.py0000664000175000017500000000000000000000000025624 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/compute/v2/test_extension.py0000664000175000017500000000167200000000000027160 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestExtension(base.BaseFunctionalTest): def test_list(self): extensions = list(self.conn.compute.extensions()) self.assertGreater(len(extensions), 0) for ext in extensions: self.assertIsInstance(ext.name, str) self.assertIsInstance(ext.namespace, str) self.assertIsInstance(ext.alias, str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/compute/v2/test_flavor.py0000664000175000017500000001262000000000000026430 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack import exceptions from openstack.tests.functional import base class TestFlavor(base.BaseFunctionalTest): def setUp(self): super().setUp() self.new_item_name = self.getUniqueString('flavor') self.one_flavor = list(self.conn.compute.flavors())[0] def test_flavors(self): flavors = list(self.conn.compute.flavors()) self.assertGreater(len(flavors), 0) for flavor in flavors: self.assertIsInstance(flavor.id, str) self.assertIsInstance(flavor.name, str) self.assertIsInstance(flavor.disk, int) self.assertIsInstance(flavor.ram, int) self.assertIsInstance(flavor.vcpus, int) def test_find_flavors_by_id(self): rslt = self.conn.compute.find_flavor(self.one_flavor.id) self.assertEqual(rslt.id, self.one_flavor.id) def test_find_flavors_by_name(self): rslt = self.conn.compute.find_flavor(self.one_flavor.name) self.assertEqual(rslt.name, self.one_flavor.name) def test_find_flavors_no_match_ignore_true(self): rslt = self.conn.compute.find_flavor( "not a flavor", ignore_missing=True ) self.assertIsNone(rslt) def test_find_flavors_no_match_ignore_false(self): self.assertRaises( exceptions.NotFoundException, self.conn.compute.find_flavor, "not a flavor", ignore_missing=False, ) def test_list_flavors(self): pub_flavor_name = self.new_item_name + '_public' priv_flavor_name = self.new_item_name + '_private' public_kwargs = dict( name=pub_flavor_name, ram=1024, vcpus=2, disk=10, is_public=True ) private_kwargs = dict( name=priv_flavor_name, ram=1024, vcpus=2, disk=10, is_public=False ) # Create a public and private flavor. We expect both to be listed # for an operator. self.operator_cloud.compute.create_flavor(**public_kwargs) self.operator_cloud.compute.create_flavor(**private_kwargs) flavors = self.operator_cloud.compute.flavors() # Flavor list will include the standard devstack flavors. We just want # to make sure both of the flavors we just created are present. found = [] for f in flavors: # extra_specs should be added within list_flavors() self.assertIn('extra_specs', f) if f['name'] in (pub_flavor_name, priv_flavor_name): found.append(f) self.assertEqual(2, len(found)) def test_flavor_access(self): flavor_name = uuid.uuid4().hex flv = self.operator_cloud.compute.create_flavor( is_public=False, name=flavor_name, ram=128, vcpus=1, disk=0 ) self.addCleanup(self.conn.compute.delete_flavor, flv.id) # Validate the 'demo' user cannot see the new flavor flv_cmp = self.user_cloud.compute.find_flavor(flavor_name) self.assertIsNone(flv_cmp) # Validate we can see the new flavor ourselves flv_cmp = self.operator_cloud.compute.find_flavor(flavor_name) self.assertIsNotNone(flv_cmp) self.assertEqual(flavor_name, flv_cmp.name) project = self.operator_cloud.get_project('demo') self.assertIsNotNone(project) # Now give 'demo' access self.operator_cloud.compute.flavor_add_tenant_access( flv.id, project['id'] ) # Now see if the 'demo' user has access to it flv_cmp = self.user_cloud.compute.find_flavor(flavor_name) self.assertIsNotNone(flv_cmp) # Now remove 'demo' access and check we can't find it self.operator_cloud.compute.flavor_remove_tenant_access( flv.id, project['id'] ) flv_cmp = self.user_cloud.compute.find_flavor(flavor_name) self.assertIsNone(flv_cmp) def test_extra_props_calls(self): flavor_name = uuid.uuid4().hex flv = self.conn.compute.create_flavor( is_public=False, name=flavor_name, ram=128, vcpus=1, disk=0 ) self.addCleanup(self.conn.compute.delete_flavor, flv.id) # Create extra_specs specs = {'a': 'b'} self.conn.compute.create_flavor_extra_specs(flv, extra_specs=specs) # verify specs flv_cmp = self.conn.compute.fetch_flavor_extra_specs(flv) self.assertDictEqual(specs, flv_cmp.extra_specs) # update self.conn.compute.update_flavor_extra_specs_property(flv, 'c', 'd') val_cmp = self.conn.compute.get_flavor_extra_specs_property(flv, 'c') # fetch single prop self.assertEqual('d', val_cmp) # drop new prop self.conn.compute.delete_flavor_extra_specs_property(flv, 'c') # re-fetch and ensure prev state flv_cmp = self.conn.compute.fetch_flavor_extra_specs(flv) self.assertDictEqual(specs, flv_cmp.extra_specs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/compute/v2/test_hypervisor.py0000664000175000017500000000215500000000000027353 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestHypervisor(base.BaseFunctionalTest): def setUp(self): super().setUp() def test_list_hypervisors(self): rslt = list(self.conn.compute.hypervisors()) self.assertIsNotNone(rslt) rslt = list(self.conn.compute.hypervisors(details=True)) self.assertIsNotNone(rslt) def test_get_find_hypervisors(self): for hypervisor in self.conn.compute.hypervisors(): self.conn.compute.get_hypervisor(hypervisor.id) self.conn.compute.find_hypervisor(hypervisor.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/compute/v2/test_image.py0000664000175000017500000001005100000000000026215 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base from openstack.tests.functional.image.v2.test_image import TEST_IMAGE_NAME class TestImage(base.BaseFunctionalTest): def test_images(self): images = list(self.conn.compute.images()) self.assertGreater(len(images), 0) for image in images: self.assertIsInstance(image.id, str) def _get_non_test_image(self): images = self.conn.compute.images() image = next(images) if image.name == TEST_IMAGE_NAME: image = next(images) return image def test_find_image(self): image = self._get_non_test_image() self.assertIsNotNone(image) sot = self.conn.compute.find_image(image.id) self.assertEqual(image.id, sot.id) self.assertEqual(image.name, sot.name) def test_get_image(self): image = self._get_non_test_image() self.assertIsNotNone(image) sot = self.conn.compute.get_image(image.id) self.assertEqual(image.id, sot.id) self.assertEqual(image.name, sot.name) self.assertIsNotNone(image.links) self.assertIsNotNone(image.min_disk) self.assertIsNotNone(image.min_ram) self.assertIsNotNone(image.metadata) self.assertIsNotNone(image.progress) self.assertIsNotNone(image.status) def test_image_metadata(self): image = self._get_non_test_image() # delete pre-existing metadata self.conn.compute.delete_image_metadata(image, image.metadata.keys()) image = self.conn.compute.get_image_metadata(image) self.assertFalse(image.metadata) # get metadata image = self.conn.compute.get_image_metadata(image) self.assertFalse(image.metadata) # set no metadata self.conn.compute.set_image_metadata(image) image = self.conn.compute.get_image_metadata(image) self.assertFalse(image.metadata) # set empty metadata self.conn.compute.set_image_metadata(image, k0='') image = self.conn.compute.get_image_metadata(image) self.assertIn('k0', image.metadata) self.assertEqual('', image.metadata['k0']) # set metadata self.conn.compute.set_image_metadata(image, k1='v1') image = self.conn.compute.get_image_metadata(image) self.assertTrue(image.metadata) self.assertEqual(2, len(image.metadata)) self.assertIn('k1', image.metadata) self.assertEqual('v1', image.metadata['k1']) # set more metadata self.conn.compute.set_image_metadata(image, k2='v2') image = self.conn.compute.get_image_metadata(image) self.assertTrue(image.metadata) self.assertEqual(3, len(image.metadata)) self.assertIn('k1', image.metadata) self.assertEqual('v1', image.metadata['k1']) self.assertIn('k2', image.metadata) self.assertEqual('v2', image.metadata['k2']) # update metadata self.conn.compute.set_image_metadata(image, k1='v1.1') image = self.conn.compute.get_image_metadata(image) self.assertTrue(image.metadata) self.assertEqual(3, len(image.metadata)) self.assertIn('k1', image.metadata) self.assertEqual('v1.1', image.metadata['k1']) self.assertIn('k2', image.metadata) self.assertEqual('v2', image.metadata['k2']) # delete metadata self.conn.compute.delete_image_metadata(image, image.metadata.keys()) image = self.conn.compute.get_image_metadata(image) self.assertFalse(image.metadata) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/compute/v2/test_keypair.py0000664000175000017500000000506200000000000026605 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.compute.v2 import keypair from openstack.tests.functional import base class TestKeypair(base.BaseFunctionalTest): def setUp(self): super().setUp() # Keypairs can't have .'s in the name. Because why? self.NAME = self.getUniqueString().split('.')[-1] sot = self.conn.compute.create_keypair(name=self.NAME, type='ssh') assert isinstance(sot, keypair.Keypair) self.assertEqual(self.NAME, sot.name) self._keypair = sot def tearDown(self): sot = self.conn.compute.delete_keypair(self._keypair) self.assertIsNone(sot) super().tearDown() def test_find(self): sot = self.conn.compute.find_keypair(self.NAME) self.assertEqual(self.NAME, sot.name) self.assertEqual(self.NAME, sot.id) def test_get(self): sot = self.conn.compute.get_keypair(self.NAME) self.assertEqual(self.NAME, sot.name) self.assertEqual(self.NAME, sot.id) self.assertEqual('ssh', sot.type) def test_list(self): names = [o.name for o in self.conn.compute.keypairs()] self.assertIn(self.NAME, names) class TestKeypairAdmin(base.BaseFunctionalTest): def setUp(self): super().setUp() self._set_operator_cloud(interface='admin') self.NAME = self.getUniqueString().split('.')[-1] self.USER = self.operator_cloud.list_users()[0] sot = self.conn.compute.create_keypair( name=self.NAME, user_id=self.USER.id ) assert isinstance(sot, keypair.Keypair) self.assertEqual(self.NAME, sot.name) self.assertEqual(self.USER.id, sot.user_id) self._keypair = sot def tearDown(self): sot = self.conn.compute.delete_keypair(self._keypair) self.assertIsNone(sot) super().tearDown() def test_get(self): sot = self.conn.compute.get_keypair(self.NAME) self.assertEqual(self.NAME, sot.name) self.assertEqual(self.NAME, sot.id) self.assertEqual(self.USER.id, sot.user_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/compute/v2/test_limits.py0000664000175000017500000000175300000000000026445 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestLimits(base.BaseFunctionalTest): def test_limits(self): sot = self.conn.compute.get_limits() self.assertIsNotNone(sot.absolute['instances']) self.assertIsNotNone(sot.absolute['total_ram']) self.assertIsNotNone(sot.absolute['keypairs']) self.assertIsNotNone(sot.absolute['security_groups']) self.assertIsNotNone(sot.absolute['security_group_rules']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/compute/v2/test_quota_set.py0000664000175000017500000000305000000000000027140 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestQS(base.BaseFunctionalTest): def test_qs(self): sot = self.conn.compute.get_quota_set(self.conn.current_project_id) self.assertIsNotNone(sot.key_pairs) def test_qs_user(self): sot = self.conn.compute.get_quota_set( self.conn.current_project_id, user_id=self.conn.session.auth.get_user_id(self.conn.compute), ) self.assertIsNotNone(sot.key_pairs) def test_update(self): sot = self.conn.compute.get_quota_set(self.conn.current_project_id) self.conn.compute.update_quota_set( sot, query={ 'user_id': self.conn.session.auth.get_user_id( self.conn.compute ) }, key_pairs=100, ) def test_revert(self): self.conn.compute.revert_quota_set( self.conn.current_project_id, user_id=self.conn.session.auth.get_user_id(self.conn.compute), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/compute/v2/test_server.py0000664000175000017500000001565200000000000026455 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.compute.v2 import server from openstack.tests.functional.compute import base as ft_base from openstack.tests.functional.network.v2 import test_network class TestServerAdmin(ft_base.BaseComputeTest): def setUp(self): super().setUp() self._set_operator_cloud(interface='admin') self.NAME = 'needstobeshortandlowercase' self.USERDATA = 'SSdtIGFjdHVhbGx5IGEgZ29hdC4=' volume = self.conn.create_volume(1) sot = self.conn.compute.create_server( name=self.NAME, flavor_id=self.flavor.id, image_id=self.image.id, networks='none', user_data=self.USERDATA, block_device_mapping=[ { 'uuid': volume.id, 'source_type': 'volume', 'boot_index': 0, 'destination_type': 'volume', 'delete_on_termination': True, 'volume_size': 1, }, ], ) self.conn.compute.wait_for_server(sot, wait=self._wait_for_timeout) assert isinstance(sot, server.Server) self.assertEqual(self.NAME, sot.name) self.server = sot def tearDown(self): sot = self.conn.compute.delete_server(self.server.id) self.conn.compute.wait_for_delete( self.server, wait=self._wait_for_timeout ) self.assertIsNone(sot) super().tearDown() def test_get(self): sot = self.conn.compute.get_server(self.server.id) self.assertIsNotNone(sot.reservation_id) self.assertIsNotNone(sot.launch_index) self.assertIsNotNone(sot.ramdisk_id) self.assertIsNotNone(sot.kernel_id) self.assertEqual(self.NAME, sot.hostname) self.assertTrue(sot.root_device_name.startswith('/dev')) self.assertEqual(self.USERDATA, sot.user_data) self.assertTrue(sot.attached_volumes[0]['delete_on_termination']) class TestServer(ft_base.BaseComputeTest): def setUp(self): super().setUp() self.NAME = self.getUniqueString() self.network = None self.subnet = None self.cidr = '10.99.99.0/16' self.network, self.subnet = test_network.create_network( self.conn, self.NAME, self.cidr ) self.assertIsNotNone(self.network) sot = self.conn.compute.create_server( name=self.NAME, flavor_id=self.flavor.id, image_id=self.image.id, networks=[{"uuid": self.network.id}], ) self.conn.compute.wait_for_server(sot, wait=self._wait_for_timeout) assert isinstance(sot, server.Server) self.assertEqual(self.NAME, sot.name) self.server = sot def tearDown(self): sot = self.conn.compute.delete_server(self.server.id) self.assertIsNone(sot) # Need to wait for the stack to go away before network delete self.conn.compute.wait_for_delete( self.server, wait=self._wait_for_timeout ) test_network.delete_network(self.conn, self.network, self.subnet) super().tearDown() def test_find(self): sot = self.conn.compute.find_server(self.NAME) self.assertEqual(self.server.id, sot.id) def test_get(self): sot = self.conn.compute.get_server(self.server.id) self.assertEqual(self.NAME, sot.name) self.assertEqual(self.server.id, sot.id) def test_list(self): names = [o.name for o in self.conn.compute.servers()] self.assertIn(self.NAME, names) def test_server_metadata(self): test_server = self.conn.compute.get_server(self.server.id) # get metadata test_server = self.conn.compute.get_server_metadata(test_server) self.assertFalse(test_server.metadata) # set no metadata self.conn.compute.set_server_metadata(test_server) test_server = self.conn.compute.get_server_metadata(test_server) self.assertFalse(test_server.metadata) # set empty metadata self.conn.compute.set_server_metadata(test_server, k0='') server = self.conn.compute.get_server_metadata(test_server) self.assertTrue(server.metadata) # set metadata self.conn.compute.set_server_metadata(test_server, k1='v1') test_server = self.conn.compute.get_server_metadata(test_server) self.assertTrue(test_server.metadata) self.assertEqual(2, len(test_server.metadata)) self.assertIn('k0', test_server.metadata) self.assertEqual('', test_server.metadata['k0']) self.assertIn('k1', test_server.metadata) self.assertEqual('v1', test_server.metadata['k1']) # set more metadata self.conn.compute.set_server_metadata(test_server, k2='v2') test_server = self.conn.compute.get_server_metadata(test_server) self.assertTrue(test_server.metadata) self.assertEqual(3, len(test_server.metadata)) self.assertIn('k0', test_server.metadata) self.assertEqual('', test_server.metadata['k0']) self.assertIn('k1', test_server.metadata) self.assertEqual('v1', test_server.metadata['k1']) self.assertIn('k2', test_server.metadata) self.assertEqual('v2', test_server.metadata['k2']) # update metadata self.conn.compute.set_server_metadata(test_server, k1='v1.1') test_server = self.conn.compute.get_server_metadata(test_server) self.assertTrue(test_server.metadata) self.assertEqual(3, len(test_server.metadata)) self.assertIn('k0', test_server.metadata) self.assertEqual('', test_server.metadata['k0']) self.assertIn('k1', test_server.metadata) self.assertEqual('v1.1', test_server.metadata['k1']) self.assertIn('k2', test_server.metadata) self.assertEqual('v2', test_server.metadata['k2']) # delete metadata self.conn.compute.delete_server_metadata( test_server, test_server.metadata.keys() ) test_server = self.conn.compute.get_server_metadata(test_server) self.assertFalse(test_server.metadata) def test_server_remote_console(self): console = self.conn.compute.create_server_remote_console( self.server, protocol='vnc', type='novnc' ) self.assertEqual('vnc', console.protocol) self.assertEqual('novnc', console.type) self.assertTrue(console.url.startswith('http')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/compute/v2/test_service.py0000664000175000017500000000356100000000000026603 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestService(base.BaseFunctionalTest): def setUp(self): super().setUp() self._set_operator_cloud(interface='admin') def test_list(self): sot = list(self.conn.compute.services()) self.assertIsNotNone(sot) def test_disable_enable(self): for srv in self.conn.compute.services(): # only nova-compute can be updated if srv.name == 'nova-compute': self.conn.compute.disable_service(srv) self.conn.compute.enable_service(srv) def test_update(self): for srv in self.conn.compute.services(): if srv.name == 'nova-compute': self.conn.compute.update_service_forced_down( srv, None, None, True ) self.conn.compute.update_service_forced_down( srv, srv.host, srv.binary, False ) self.conn.compute.update_service(srv, status='enabled') def test_find(self): for srv in self.conn.compute.services(): if srv.name != 'nova-conductor': # In devstack there are 2 nova-conductor instances on same host self.conn.compute.find_service( srv.name, host=srv.host, ignore_missing=False ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/compute/v2/test_volume_attachment.py0000664000175000017500000001164400000000000030663 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v3 import volume as volume_ from openstack.compute.v2 import server as server_ from openstack.compute.v2 import volume_attachment as volume_attachment_ from openstack.tests.functional.compute import base as ft_base class TestServerVolumeAttachment(ft_base.BaseComputeTest): def setUp(self): super().setUp() if not self.user_cloud.has_service('block-storage'): self.skipTest('block-storage service not supported by cloud') self.server_name = self.getUniqueString() self.volume_name = self.getUniqueString() # create the server and volume server = self.user_cloud.compute.create_server( name=self.server_name, flavor_id=self.flavor.id, image_id=self.image.id, networks='none', ) self.user_cloud.compute.wait_for_server( server, wait=self._wait_for_timeout, ) self.assertIsInstance(server, server_.Server) self.assertEqual(self.server_name, server.name) volume = self.user_cloud.block_storage.create_volume( name=self.volume_name, size=1, ) self.user_cloud.block_storage.wait_for_status( volume, status='available', wait=self._wait_for_timeout, ) self.assertIsInstance(volume, volume_.Volume) self.assertEqual(self.volume_name, volume.name) self.server = server self.volume = volume def tearDown(self): self.user_cloud.compute.delete_server(self.server.id) self.user_cloud.compute.wait_for_delete( self.server, wait=self._wait_for_timeout, ) self.user_cloud.block_storage.delete_volume(self.volume.id) self.user_cloud.block_storage.wait_for_delete( self.volume, wait=self._wait_for_timeout, ) super().tearDown() def test_volume_attachment(self): # create the volume attachment volume_attachment = self.user_cloud.compute.create_volume_attachment( self.server, self.volume, ) self.assertIsInstance( volume_attachment, volume_attachment_.VolumeAttachment, ) self.user_cloud.block_storage.wait_for_status( self.volume, status='in-use', wait=self._wait_for_timeout, ) # list all attached volume attachments (there should only be one) volume_attachments = list( self.user_cloud.compute.volume_attachments(self.server) ) self.assertEqual(1, len(volume_attachments)) self.assertIsInstance( volume_attachments[0], volume_attachment_.VolumeAttachment, ) # update the volume attachment volume_attachment = self.user_cloud.compute.update_volume_attachment( self.server, self.volume, delete_on_termination=True, ) self.assertIsInstance( volume_attachment, volume_attachment_.VolumeAttachment, ) # retrieve details of the (updated) volume attachment volume_attachment = self.user_cloud.compute.get_volume_attachment( self.server, self.volume, ) self.assertIsInstance( volume_attachment, volume_attachment_.VolumeAttachment, ) self.assertTrue(volume_attachment.delete_on_termination) # delete the volume attachment result = self.user_cloud.compute.delete_volume_attachment( self.server, self.volume, ignore_missing=False, ) self.assertIsNone(result) self.user_cloud.block_storage.wait_for_status( self.volume, status='available', wait=self._wait_for_timeout, ) # Wait for the attachment to be deleted. # This is done to prevent a race between the BDM # record being deleted and we trying to delete the server. self.user_cloud.compute.wait_for_delete( volume_attachment, wait=self._wait_for_timeout, ) # Verify the server doesn't have any volume attachment volume_attachments = list( self.user_cloud.compute.volume_attachments(self.server) ) self.assertEqual(0, len(volume_attachments)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3573673 openstacksdk-4.0.0/openstack/tests/functional/dns/0000775000175000017500000000000000000000000022306 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/dns/__init__.py0000664000175000017500000000000000000000000024405 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3573673 openstacksdk-4.0.0/openstack/tests/functional/dns/v2/0000775000175000017500000000000000000000000022635 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/dns/v2/__init__.py0000664000175000017500000000000000000000000024734 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/dns/v2/test_zone.py0000664000175000017500000000716300000000000025230 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random from openstack import connection from openstack import exceptions from openstack.tests.functional import base from openstack import utils class TestZone(base.BaseFunctionalTest): def setUp(self): super().setUp() self.require_service('dns') self.conn = connection.from_config(cloud_name=base.TEST_CLOUD_NAME) # Note: zone deletion is not an immediate operation, so each time # chose a new zone name for a test # getUniqueString is not guaranteed to return unique string between # different tests of the same class. self.ZONE_NAME = f'example-{random.randint(1, 10000)}.org.' self.zone = self.conn.dns.create_zone( name=self.ZONE_NAME, email='joe@example.org', type='PRIMARY', ttl=7200, description='example zone', ) self.addCleanup(self.conn.dns.delete_zone, self.zone) def test_get_zone(self): zone = self.conn.dns.get_zone(self.zone) self.assertEqual(self.zone, zone) def test_list_zones(self): names = [f.name for f in self.conn.dns.zones()] self.assertIn(self.ZONE_NAME, names) def test_update_zone(self): current_ttl = self.conn.dns.get_zone(self.zone)['ttl'] self.conn.dns.update_zone(self.zone, ttl=current_ttl + 1) updated_zone_ttl = self.conn.dns.get_zone(self.zone)['ttl'] self.assertEqual( current_ttl + 1, updated_zone_ttl, 'Failed, updated TTL value is:{} instead of expected:{}'.format( updated_zone_ttl, current_ttl + 1 ), ) def test_create_rs(self): zone = self.conn.dns.get_zone(self.zone) self.assertIsNotNone( self.conn.dns.create_recordset( zone=zone, name=f'www.{zone.name}', type='A', description='Example zone rec', ttl=3600, records=['192.168.1.1'], ) ) def test_delete_zone_with_shares(self): # Make sure the API under test has shared zones support if not utils.supports_version(self.conn.dns, '2.1'): self.skipTest( 'Designate API version does not support shared zones.' ) zone_name = f'example-{random.randint(1, 10000)}.org.' zone = self.conn.dns.create_zone( name=zone_name, email='joe@example.org', type='PRIMARY', ttl=7200, description='example zone', ) self.addCleanup(self.conn.dns.delete_zone, zone) demo_project_id = self.operator_cloud.get_project('demo')['id'] zone_share = self.conn.dns.create_zone_share( zone, target_project_id=demo_project_id ) self.addCleanup(self.conn.dns.delete_zone_share, zone, zone_share) # Test that we cannot delete a zone with shares self.assertRaises( exceptions.BadRequestException, self.conn.dns.delete_zone, zone ) self.conn.dns.delete_zone(zone, delete_shares=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/dns/v2/test_zone_share.py0000664000175000017500000001350700000000000026411 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack import exceptions from openstack.tests.functional import base from openstack import utils class TestZoneShare(base.BaseFunctionalTest): def setUp(self): super().setUp() self.require_service('dns') if not self.user_cloud: self.skipTest("The demo cloud is required for this test") # Note: zone deletion is not an immediate operation, so each time # chose a new zone name for a test # getUniqueString is not guaranteed to return unique string between # different tests of the same class. self.ZONE_NAME = f'example-{uuid.uuid4().hex}.org.' # Make sure the API under test has shared zones support if not utils.supports_version(self.conn.dns, '2.1'): self.skipTest( 'Designate API version does not support shared zones.' ) self.zone = self.operator_cloud.dns.create_zone( name=self.ZONE_NAME, email='joe@example.org', type='PRIMARY', ttl=7200, description='example zone for sdk zone share tests', ) self.addCleanup( self.operator_cloud.dns.delete_zone, self.zone, delete_shares=True, ) self.project_id = self.operator_cloud.session.get_project_id() self.demo_project_id = self.user_cloud.session.get_project_id() def test_create_delete_zone_share(self): zone_share = self.operator_cloud.dns.create_zone_share( self.zone, target_project_id=self.demo_project_id ) self.addCleanup( self.operator_cloud.dns.delete_zone_share, self.zone, zone_share, ) self.assertEqual(self.zone.id, zone_share.zone_id) self.assertEqual(self.project_id, zone_share.project_id) self.assertEqual(self.demo_project_id, zone_share.target_project_id) self.assertIsNotNone(zone_share.id) self.assertIsNotNone(zone_share.created_at) self.assertIsNone(zone_share.updated_at) def test_get_zone_share(self): orig_zone_share = self.operator_cloud.dns.create_zone_share( self.zone, target_project_id=self.demo_project_id, ) self.addCleanup( self.operator_cloud.dns.delete_zone_share, self.zone, orig_zone_share, ) zone_share = self.operator_cloud.dns.get_zone_share( self.zone, orig_zone_share, ) self.assertEqual(self.zone.id, zone_share.zone_id) self.assertEqual(self.project_id, zone_share.project_id) self.assertEqual(self.demo_project_id, zone_share.target_project_id) self.assertEqual(orig_zone_share.id, zone_share.id) self.assertEqual(orig_zone_share.created_at, zone_share.created_at) self.assertEqual(orig_zone_share.updated_at, zone_share.updated_at) def test_find_zone_share(self): orig_zone_share = self.operator_cloud.dns.create_zone_share( self.zone, target_project_id=self.demo_project_id ) self.addCleanup( self.operator_cloud.dns.delete_zone_share, self.zone, orig_zone_share, ) zone_share = self.operator_cloud.dns.find_zone_share( self.zone, orig_zone_share.id, ) self.assertEqual(self.zone.id, zone_share.zone_id) self.assertEqual(self.project_id, zone_share.project_id) self.assertEqual(self.demo_project_id, zone_share.target_project_id) self.assertEqual(orig_zone_share.id, zone_share.id) self.assertEqual(orig_zone_share.created_at, zone_share.created_at) self.assertEqual(orig_zone_share.updated_at, zone_share.updated_at) def test_find_zone_share_ignore_missing(self): zone_share = self.operator_cloud.dns.find_zone_share( self.zone, 'bogus_id', ) self.assertIsNone(zone_share) def test_find_zone_share_ignore_missing_false(self): self.assertRaises( exceptions.NotFoundException, self.operator_cloud.dns.find_zone_share, self.zone, 'bogus_id', ignore_missing=False, ) def test_list_zone_shares(self): zone_share = self.operator_cloud.dns.create_zone_share( self.zone, target_project_id=self.demo_project_id, ) self.addCleanup( self.operator_cloud.dns.delete_zone_share, self.zone, zone_share, ) target_ids = [ o.target_project_id for o in self.operator_cloud.dns.zone_shares(self.zone) ] self.assertIn(self.demo_project_id, target_ids) def test_list_zone_shares_with_target_id(self): zone_share = self.operator_cloud.dns.create_zone_share( self.zone, target_project_id=self.demo_project_id, ) self.addCleanup( self.operator_cloud.dns.delete_zone_share, self.zone, zone_share, ) target_ids = [ o.target_project_id for o in self.operator_cloud.dns.zone_shares( self.zone, target_project_id=self.demo_project_id ) ] self.assertIn(self.demo_project_id, target_ids) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3613691 openstacksdk-4.0.0/openstack/tests/functional/examples/0000775000175000017500000000000000000000000023340 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/examples/__init__.py0000664000175000017500000000000000000000000025437 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/examples/test_compute.py0000664000175000017500000000327700000000000026436 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from examples.compute import create from examples.compute import delete from examples.compute import find as compute_find from examples.compute import list as compute_list from examples import connect from examples.network import find as network_find from examples.network import list as network_list from openstack.tests.functional import base class TestCompute(base.BaseFunctionalTest): """Test the compute examples The purpose of these tests is to ensure the examples run without erring out. """ def setUp(self): super().setUp() self.conn = connect.create_connection_from_config() def test_compute(self): compute_list.list_servers(self.conn) compute_list.list_images(self.conn) compute_list.list_flavors(self.conn) compute_list.list_keypairs(self.conn) network_list.list_networks(self.conn) compute_find.find_image(self.conn) compute_find.find_flavor(self.conn) compute_find.find_keypair(self.conn) network_find.find_network(self.conn) create.create_server(self.conn) delete.delete_keypair(self.conn) delete.delete_server(self.conn) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/examples/test_identity.py0000664000175000017500000000251400000000000026604 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from examples import connect from examples.identity import list as identity_list from openstack.tests.functional import base class TestIdentity(base.BaseFunctionalTest): """Test the identity examples The purpose of these tests is to ensure the examples run without erring out. """ def setUp(self): super().setUp() self.conn = connect.create_connection_from_config() def test_identity(self): identity_list.list_users(self.conn) identity_list.list_credentials(self.conn) identity_list.list_projects(self.conn) identity_list.list_domains(self.conn) identity_list.list_groups(self.conn) identity_list.list_services(self.conn) identity_list.list_endpoints(self.conn) identity_list.list_regions(self.conn) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/examples/test_image.py0000664000175000017500000000226200000000000026035 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from examples import connect from examples.image import create as image_create from examples.image import delete as image_delete from examples.image import list as image_list from openstack.tests.functional import base class TestImage(base.BaseFunctionalTest): """Test the image examples The purpose of these tests is to ensure the examples run without erring out. """ def setUp(self): super().setUp() self.conn = connect.create_connection_from_config() def test_image(self): image_list.list_images(self.conn) image_create.upload_image(self.conn) image_delete.delete_image(self.conn) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/examples/test_network.py0000664000175000017500000000275100000000000026447 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from examples import connect from examples.network import create as network_create from examples.network import delete as network_delete from examples.network import find as network_find from examples.network import list as network_list from openstack.tests.functional import base class TestNetwork(base.BaseFunctionalTest): """Test the network examples The purpose of these tests is to ensure the examples run without erring out. """ def setUp(self): super().setUp() self.conn = connect.create_connection_from_config() def test_network(self): network_list.list_networks(self.conn) network_list.list_subnets(self.conn) network_list.list_ports(self.conn) network_list.list_security_groups(self.conn) network_list.list_routers(self.conn) network_find.find_network(self.conn) network_create.create_network(self.conn) network_delete.delete_network(self.conn) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3613691 openstacksdk-4.0.0/openstack/tests/functional/identity/0000775000175000017500000000000000000000000023353 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/identity/__init__.py0000664000175000017500000000000000000000000025452 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3613691 openstacksdk-4.0.0/openstack/tests/functional/identity/v3/0000775000175000017500000000000000000000000023703 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/identity/v3/__init__.py0000664000175000017500000000000000000000000026002 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/identity/v3/test_access_rule.py0000664000175000017500000000611300000000000027605 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack.tests.functional import base class TestAccessRule(base.BaseFunctionalTest): def setUp(self): super().setUp() self.user_id = self.operator_cloud.current_user_id def _create_application_credential_with_access_rule(self): """create application credential with access_rule.""" app_cred = self.conn.identity.create_application_credential( user=self.user_id, name='app_cred', access_rules=[ { "path": "/v2.0/metrics", "service": "monitoring", "method": "GET", } ], ) self.addCleanup( self.conn.identity.delete_application_credential, self.user_id, app_cred['id'], ) return app_cred def test_get_access_rule(self): app_cred = self._create_application_credential_with_access_rule() access_rule_id = app_cred['access_rules'][0]['id'] access_rule = self.conn.identity.get_access_rule( user=self.user_id, access_rule=access_rule_id ) self.assertEqual(access_rule['id'], access_rule_id) self.assertEqual(access_rule['user_id'], self.user_id) def test_list_access_rules(self): app_cred = self._create_application_credential_with_access_rule() access_rule_id = app_cred['access_rules'][0]['id'] access_rules = self.conn.identity.access_rules(user=self.user_id) self.assertEqual(1, len(list(access_rules))) for access_rule in access_rules: self.assertEqual(app_cred['user_id'], self.user_id) self.assertEqual(access_rule_id, access_rule['id']) def test_delete_access_rule(self): app_cred = self._create_application_credential_with_access_rule() access_rule_id = app_cred['access_rules'][0]['id'] # This is expected to raise an exception since access_rule is still # in use for app_cred. self.assertRaises( exceptions.HttpException, self.conn.identity.delete_access_rule, user=self.user_id, access_rule=access_rule_id, ) # delete application credential first to delete access rule self.conn.identity.delete_application_credential( user=self.user_id, application_credential=app_cred['id'] ) # delete orphaned access rules self.conn.identity.delete_access_rule( user=self.user_id, access_rule=access_rule_id ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/identity/v3/test_application_credential.py0000664000175000017500000000537000000000000032016 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack.tests.functional import base class TestApplicationCredentials(base.BaseFunctionalTest): def setUp(self): super().setUp() self.user_id = self.operator_cloud.current_user_id def _create_application_credentials(self): app_creds = self.conn.identity.create_application_credential( user=self.user_id, name='app_cred' ) self.addCleanup( self.conn.identity.delete_application_credential, self.user_id, app_creds['id'], ) return app_creds def test_create_application_credentials(self): app_creds = self._create_application_credentials() self.assertEqual(app_creds['user_id'], self.user_id) def test_get_application_credential(self): app_creds = self._create_application_credentials() app_cred = self.conn.identity.get_application_credential( user=self.user_id, application_credential=app_creds['id'] ) self.assertEqual(app_cred['id'], app_creds['id']) self.assertEqual(app_cred['user_id'], self.user_id) def test_application_credentials(self): self._create_application_credentials() app_creds = self.conn.identity.application_credentials( user=self.user_id ) for app_cred in app_creds: self.assertEqual(app_cred['user_id'], self.user_id) def test_find_application_credential(self): app_creds = self._create_application_credentials() app_cred = self.conn.identity.find_application_credential( user=self.user_id, name_or_id=app_creds['id'] ) self.assertEqual(app_cred['id'], app_creds['id']) self.assertEqual(app_cred['user_id'], self.user_id) def test_delete_application_credential(self): app_creds = self._create_application_credentials() self.conn.identity.delete_application_credential( user=self.user_id, application_credential=app_creds['id'] ) self.assertRaises( exceptions.NotFoundException, self.conn.identity.get_application_credential, user=self.user_id, application_credential=app_creds['id'], ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/identity/v3/test_domain_config.py0000664000175000017500000000503300000000000030111 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.identity.v3 import domain as _domain from openstack.identity.v3 import domain_config as _domain_config from openstack.tests.functional import base class TestDomainConfig(base.BaseFunctionalTest): def setUp(self): super().setUp() self.domain_name = self.getUniqueString() # create the domain and domain config self.domain = self.operator_cloud.create_domain( name=self.domain_name, ) self.assertIsInstance(self.domain, _domain.Domain) self.addCleanup(self._delete_domain) def _delete_domain(self): self.operator_cloud.identity.update_domain( self.domain, enabled=False, ) self.operator_cloud.identity.delete_domain(self.domain) def test_domain_config(self): # create the domain config domain_config = self.operator_cloud.identity.create_domain_config( self.domain, identity={'driver': uuid.uuid4().hex}, ldap={'url': uuid.uuid4().hex}, ) self.assertIsInstance( domain_config, _domain_config.DomainConfig, ) # update the domain config ldap_url = uuid.uuid4().hex domain_config = self.operator_cloud.identity.update_domain_config( self.domain, ldap={'url': ldap_url}, ) self.assertIsInstance( domain_config, _domain_config.DomainConfig, ) # retrieve details of the (updated) domain config domain_config = self.operator_cloud.identity.get_domain_config( self.domain, ) self.assertIsInstance( domain_config, _domain_config.DomainConfig, ) self.assertEqual(ldap_url, domain_config.ldap.url) # delete the domain config result = self.operator_cloud.identity.delete_domain_config( self.domain, ignore_missing=False, ) self.assertIsNone(result) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3613691 openstacksdk-4.0.0/openstack/tests/functional/image/0000775000175000017500000000000000000000000022604 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/image/__init__.py0000664000175000017500000000000000000000000024703 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3613691 openstacksdk-4.0.0/openstack/tests/functional/image/v2/0000775000175000017500000000000000000000000023133 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/image/v2/__init__.py0000664000175000017500000000000000000000000025232 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/image/v2/base.py0000664000175000017500000000172700000000000024426 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class BaseImageTest(base.BaseFunctionalTest): _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_IMAGE' def setUp(self): super().setUp() self._set_user_cloud(image_api_version='2') self._set_operator_cloud(image_api_version='2') if not self.user_cloud.has_service('image', '2'): self.skipTest('image service not supported by cloud') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/image/v2/test_image.py0000664000175000017500000000603300000000000025630 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image.v2 import image as _image from openstack.tests.functional.image.v2 import base # NOTE(stephenfin): This is referenced in the Compute functional tests to avoid # attempts to boot from it. TEST_IMAGE_NAME = 'Test Image' class TestImage(base.BaseImageTest): def setUp(self): super().setUp() # there's a limit on name length self.image = self.conn.image.create_image( name=TEST_IMAGE_NAME, disk_format='raw', container_format='bare', properties={ 'description': 'This is not an image', }, data=open('CONTRIBUTING.rst'), ) self.assertIsInstance(self.image, _image.Image) self.assertEqual(TEST_IMAGE_NAME, self.image.name) def tearDown(self): # we do this in tearDown rather than via 'addCleanup' since we want to # wait for the deletion of the resource to ensure it completes self.conn.image.delete_image(self.image) self.conn.image.wait_for_delete(self.image) super().tearDown() def test_images(self): # get image image = self.conn.image.get_image(self.image.id) self.assertEqual(self.image.name, image.name) # find image image = self.conn.image.find_image(self.image.name) self.assertEqual(self.image.id, image.id) # list images = list(self.conn.image.images()) # there are many other images so we don't assert that this is the # *only* image present self.assertIn(self.image.id, {i.id for i in images}) # update image_name = self.getUniqueString() image = self.conn.image.update_image( self.image, name=image_name, ) self.assertIsInstance(image, _image.Image) image = self.conn.image.get_image(self.image.id) self.assertEqual(image_name, image.name) def test_tags(self): # add tag image = self.conn.image.get_image(self.image) self.conn.image.add_tag(image, 't1') self.conn.image.add_tag(image, 't2') # filter image by tags image = list(self.conn.image.images(tag=['t1', 't2']))[0] self.assertEqual(image.id, image.id) self.assertIn('t1', image.tags) self.assertIn('t2', image.tags) # remove tag self.conn.image.remove_tag(image, 't1') image = self.conn.image.get_image(self.image) self.assertIn('t2', image.tags) self.assertNotIn('t1', image.tags) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/image/v2/test_metadef_namespace.py0000664000175000017500000000632200000000000030170 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image.v2 import metadef_namespace as _metadef_namespace from openstack.tests.functional.image.v2 import base class TestMetadefNamespace(base.BaseImageTest): # TODO(stephenfin): We should use setUpClass here for MOAR SPEED!!! def setUp(self): super().setUp() # there's a limit on namespace length namespace = self.getUniqueString().split('.')[-1] self.metadef_namespace = self.conn.image.create_metadef_namespace( namespace=namespace, ) self.assertIsInstance( self.metadef_namespace, _metadef_namespace.MetadefNamespace, ) self.assertEqual(namespace, self.metadef_namespace.namespace) def tearDown(self): # we do this in tearDown rather than via 'addCleanup' since we want to # wait for the deletion of the resource to ensure it completes self.conn.image.delete_metadef_namespace(self.metadef_namespace) self.conn.image.wait_for_delete(self.metadef_namespace) super().tearDown() def test_metadef_namespace(self): # get metadef_namespace = self.conn.image.get_metadef_namespace( self.metadef_namespace.namespace ) self.assertEqual( self.metadef_namespace.namespace, metadef_namespace.namespace, ) # (no find_metadef_namespace method) # list metadef_namespaces = list(self.conn.image.metadef_namespaces()) # there are a load of default metadef namespaces so we don't assert # that this is the *only* metadef namespace present self.assertIn( self.metadef_namespace.namespace, {n.namespace for n in metadef_namespaces}, ) # update # there's a limit on display name and description lengths and no # inherent need for randomness so we use fixed strings metadef_namespace_display_name = 'A display name' metadef_namespace_description = 'A description' metadef_namespace = self.conn.image.update_metadef_namespace( self.metadef_namespace, display_name=metadef_namespace_display_name, description=metadef_namespace_description, ) self.assertIsInstance( metadef_namespace, _metadef_namespace.MetadefNamespace, ) metadef_namespace = self.conn.image.get_metadef_namespace( self.metadef_namespace.namespace ) self.assertEqual( metadef_namespace_display_name, metadef_namespace.display_name, ) self.assertEqual( metadef_namespace_description, metadef_namespace.description, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/image/v2/test_metadef_object.py0000664000175000017500000000701000000000000027475 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image.v2 import metadef_namespace as _metadef_namespace from openstack.image.v2 import metadef_object as _metadef_object from openstack.tests.functional.image.v2 import base class TestMetadefObject(base.BaseImageTest): def setUp(self): super().setUp() # create namespace for object namespace = self.getUniqueString().split('.')[-1] self.metadef_namespace = self.conn.image.create_metadef_namespace( namespace=namespace, ) self.assertIsInstance( self.metadef_namespace, _metadef_namespace.MetadefNamespace, ) self.assertEqual(namespace, self.metadef_namespace.namespace) # create object object = self.getUniqueString().split('.')[-1] self.metadef_object = self.conn.image.create_metadef_object( name=object, namespace=self.metadef_namespace, ) self.assertIsInstance( self.metadef_object, _metadef_object.MetadefObject, ) self.assertEqual(object, self.metadef_object.name) def tearDown(self): self.conn.image.delete_metadef_object( self.metadef_object, self.metadef_object.namespace_name, ) self.conn.image.wait_for_delete(self.metadef_object) self.conn.image.delete_metadef_namespace(self.metadef_namespace) self.conn.image.wait_for_delete(self.metadef_namespace) super().tearDown() def test_metadef_objects(self): # get metadef_object = self.conn.image.get_metadef_object( self.metadef_object.name, self.metadef_namespace, ) self.assertEqual( self.metadef_object.namespace_name, metadef_object.namespace_name, ) self.assertEqual( self.metadef_object.name, metadef_object.name, ) # list metadef_objects = list( self.conn.image.metadef_objects(self.metadef_object.namespace_name) ) # there are a load of default metadef objects so we don't assert # that this is the *only* metadef objects present self.assertIn( self.metadef_object.name, {o.name for o in metadef_objects}, ) # update metadef_object_new_name = 'New object name' metadef_object_new_description = 'New object description' metadef_object = self.conn.image.update_metadef_object( self.metadef_object.name, namespace=self.metadef_object.namespace_name, name=metadef_object_new_name, description=metadef_object_new_description, ) self.assertIsInstance( metadef_object, _metadef_object.MetadefObject, ) self.assertEqual( metadef_object_new_name, metadef_object.name, ) self.assertEqual( metadef_object_new_description, metadef_object.description, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/image/v2/test_metadef_property.py0000664000175000017500000001141000000000000030112 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import string from openstack.image.v2 import metadef_namespace as _metadef_namespace from openstack.image.v2 import metadef_property as _metadef_property from openstack.tests.functional.image.v2 import base class TestMetadefProperty(base.BaseImageTest): def setUp(self): super().setUp() # there's a limit on namespace length namespace = 'test_' + ''.join( random.choice(string.ascii_lowercase) for _ in range(75) ) self.metadef_namespace = self.conn.image.create_metadef_namespace( namespace=namespace, ) self.assertIsInstance( self.metadef_namespace, _metadef_namespace.MetadefNamespace, ) self.assertEqual(namespace, self.metadef_namespace.namespace) # there's a limit on property length property_name = 'test_' + ''.join( random.choice(string.ascii_lowercase) for _ in range(75) ) self.attrs = { 'name': property_name, 'title': property_name, 'type': 'string', 'description': 'Web Server port', 'enum': ["80", "443"], } self.metadef_property = self.conn.image.create_metadef_property( self.metadef_namespace.namespace, **self.attrs ) self.assertIsInstance( self.metadef_property, _metadef_property.MetadefProperty ) self.assertEqual(self.attrs['name'], self.metadef_property.name) self.assertEqual(self.attrs['title'], self.metadef_property.title) self.assertEqual(self.attrs['type'], self.metadef_property.type) self.assertEqual( self.attrs['description'], self.metadef_property.description ) self.assertEqual(self.attrs['enum'], self.metadef_property.enum) def tearDown(self): # we do this in tearDown rather than via 'addCleanup' since we want to # wait for the deletion of the resource to ensure it completes self.conn.image.delete_metadef_property( self.metadef_property, self.metadef_namespace ) self.conn.image.delete_metadef_namespace(self.metadef_namespace) self.conn.image.wait_for_delete(self.metadef_namespace) super().tearDown() def test_metadef_property(self): # get metadef property metadef_property = self.conn.image.get_metadef_property( self.metadef_property, self.metadef_namespace ) self.assertIsNotNone(metadef_property) self.assertIsInstance( metadef_property, _metadef_property.MetadefProperty ) self.assertEqual(self.attrs['name'], metadef_property.name) self.assertEqual(self.attrs['title'], metadef_property.title) self.assertEqual(self.attrs['type'], metadef_property.type) self.assertEqual( self.attrs['description'], metadef_property.description ) self.assertEqual(self.attrs['enum'], metadef_property.enum) # (no find_metadef_property method) # list metadef_properties = list( self.conn.image.metadef_properties(self.metadef_namespace) ) self.assertIsNotNone(metadef_properties) self.assertIsInstance( metadef_properties[0], _metadef_property.MetadefProperty ) # update self.attrs['title'] = ''.join( random.choice(string.ascii_lowercase) for _ in range(10) ) self.attrs['description'] = ''.join( random.choice(string.ascii_lowercase) for _ in range(10) ) metadef_property = self.conn.image.update_metadef_property( self.metadef_property, self.metadef_namespace.namespace, **self.attrs ) self.assertIsNotNone(metadef_property) self.assertIsInstance( metadef_property, _metadef_property.MetadefProperty, ) metadef_property = self.conn.image.get_metadef_property( self.metadef_property.name, self.metadef_namespace ) self.assertEqual( self.attrs['title'], metadef_property.title, ) self.assertEqual( self.attrs['description'], metadef_property.description, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/image/v2/test_metadef_resource_type.py0000664000175000017500000000562400000000000031130 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image.v2 import metadef_namespace as _metadef_namespace from openstack.image.v2 import metadef_resource_type as _metadef_resource_type from openstack.tests.functional.image.v2 import base class TestMetadefResourceType(base.BaseImageTest): def setUp(self): super().setUp() # there's a limit on namespace length namespace = self.getUniqueString().split('.')[-1] self.metadef_namespace = self.conn.image.create_metadef_namespace( namespace=namespace, ) self.assertIsInstance( self.metadef_namespace, _metadef_namespace.MetadefNamespace, ) self.assertEqual(namespace, self.metadef_namespace.namespace) resource_type_name = 'test-resource-type' resource_type = {'name': resource_type_name} self.metadef_resource_type = ( self.conn.image.create_metadef_resource_type_association( metadef_namespace=namespace, **resource_type ) ) self.assertIsInstance( self.metadef_resource_type, _metadef_resource_type.MetadefResourceTypeAssociation, ) self.assertEqual(resource_type_name, self.metadef_resource_type.name) def tearDown(self): # we do this in tearDown rather than via 'addCleanup' since we want to # wait for the deletion of the resource to ensure it completes self.conn.image.delete_metadef_namespace(self.metadef_namespace) self.conn.image.wait_for_delete(self.metadef_namespace) super().tearDown() def test_metadef_resource_types(self): # list resource type associations associations = list( self.conn.image.metadef_resource_type_associations( metadef_namespace=self.metadef_namespace ) ) self.assertIn( self.metadef_resource_type.name, {a.name for a in associations} ) # (no find_metadef_resource_type_association method) # list resource types resource_types = list(self.conn.image.metadef_resource_types()) self.assertIn( self.metadef_resource_type.name, {t.name for t in resource_types} ) # delete self.conn.image.delete_metadef_resource_type_association( self.metadef_resource_type, metadef_namespace=self.metadef_namespace, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/image/v2/test_metadef_schema.py0000664000175000017500000000610100000000000027467 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image.v2 import metadef_schema as _metadef_schema from openstack.tests.functional.image.v2 import base class TestMetadefSchema(base.BaseImageTest): def test_get_metadef_namespace_schema(self): metadef_schema = self.conn.image.get_metadef_namespace_schema() self.assertIsNotNone(metadef_schema) self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) def test_get_metadef_namespaces_schema(self): metadef_schema = self.conn.image.get_metadef_namespaces_schema() self.assertIsNotNone(metadef_schema) self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) def test_get_metadef_resource_type_schema(self): metadef_schema = self.conn.image.get_metadef_resource_type_schema() self.assertIsNotNone(metadef_schema) self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) def test_get_metadef_resource_types_schema(self): metadef_schema = self.conn.image.get_metadef_resource_types_schema() self.assertIsNotNone(metadef_schema) self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) def test_get_metadef_object_schema(self): metadef_schema = self.conn.image.get_metadef_object_schema() self.assertIsNotNone(metadef_schema) self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) def test_get_metadef_objects_schema(self): metadef_schema = self.conn.image.get_metadef_objects_schema() self.assertIsNotNone(metadef_schema) self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) def test_get_metadef_property_schema(self): metadef_schema = self.conn.image.get_metadef_property_schema() self.assertIsNotNone(metadef_schema) self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) def test_get_metadef_properties_schema(self): metadef_schema = self.conn.image.get_metadef_properties_schema() self.assertIsNotNone(metadef_schema) self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) def test_get_metadef_tag_schema(self): metadef_schema = self.conn.image.get_metadef_tag_schema() self.assertIsNotNone(metadef_schema) self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) def test_get_metadef_tags_schema(self): metadef_schema = self.conn.image.get_metadef_tags_schema() self.assertIsNotNone(metadef_schema) self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/image/v2/test_schema.py0000664000175000017500000000261200000000000026005 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image.v2 import schema as _schema from openstack.tests.functional.image.v2 import base class TestSchema(base.BaseImageTest): def test_get_images_schema(self): schema = self.conn.image.get_images_schema() self.assertIsNotNone(schema) self.assertIsInstance(schema, _schema.Schema) def test_get_image_schema(self): schema = self.conn.image.get_image_schema() self.assertIsNotNone(schema) self.assertIsInstance(schema, _schema.Schema) def test_get_members_schema(self): schema = self.conn.image.get_members_schema() self.assertIsNotNone(schema) self.assertIsInstance(schema, _schema.Schema) def test_get_member_schema(self): schema = self.conn.image.get_member_schema() self.assertIsNotNone(schema) self.assertIsInstance(schema, _schema.Schema) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/image/v2/test_task.py0000664000175000017500000000221000000000000025501 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional.image.v2 import base class TestTask(base.BaseImageTest): def test_tasks(self): tasks = list(self.conn.image.tasks()) # NOTE(stephenfin): Yes, this is a dumb test. Basically all that we're # checking is that the API endpoint is correct. It would be nice to # have a proper check here that includes creation of tasks but we don't # currently have the ability to do this and I'm not even sure if tasks # are still really a supported thing. A potential future work item, # perhaps. self.assertIsInstance(tasks, list) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3653712 openstacksdk-4.0.0/openstack/tests/functional/instance_ha/0000775000175000017500000000000000000000000023776 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/instance_ha/__init__.py0000664000175000017500000000000000000000000026075 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/instance_ha/test_host.py0000664000175000017500000000522100000000000026364 0ustar00zuulzuul00000000000000# Copyright (C) 2018 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty from openstack.compute.v2 import hypervisor from openstack import connection from openstack.tests.functional import base HYPERVISORS: ty.List[hypervisor.Hypervisor] = [] def hypervisors(): global HYPERVISORS if HYPERVISORS: return True HYPERVISORS = connection.Connection.list_hypervisors( connection.from_config(cloud_name=base.TEST_CLOUD_NAME) ) return bool(HYPERVISORS) class TestHost(base.BaseFunctionalTest): def setUp(self): super().setUp() self.require_service('instance-ha') self.NAME = self.getUniqueString() if not hypervisors(): self.skipTest( "Skip TestHost as there are no hypervisors " "configured in nova" ) # Create segment self.segment = self.conn.ha.create_segment( name=self.NAME, recovery_method='auto', service_type='COMPUTE' ) # Create valid host self.NAME = HYPERVISORS[0].name self.host = self.conn.ha.create_host( segment_id=self.segment.uuid, name=self.NAME, type='COMPUTE', control_attributes='SSH', ) # Delete host self.addCleanup( self.conn.ha.delete_host, self.segment.uuid, self.host.uuid ) # Delete segment self.addCleanup(self.conn.ha.delete_segment, self.segment.uuid) def test_list(self): names = [ o.name for o in self.conn.ha.hosts( self.segment.uuid, failover_segment_id=self.segment.uuid, type='COMPUTE', ) ] self.assertIn(self.NAME, names) def test_update(self): updated_host = self.conn.ha.update_host( self.host['uuid'], segment_id=self.segment.uuid, on_maintenance='True', ) get_host = self.conn.ha.get_host( updated_host.uuid, updated_host.segment_id ) self.assertEqual(True, get_host.on_maintenance) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/instance_ha/test_segment.py0000664000175000017500000000301700000000000027052 0ustar00zuulzuul00000000000000# Copyright (C) 2018 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestSegment(base.BaseFunctionalTest): def setUp(self): super().setUp() self.require_service('instance-ha') self.NAME = self.getUniqueString() # Create segment self.segment = self.conn.ha.create_segment( name=self.NAME, recovery_method='auto', service_type='COMPUTE' ) # Delete segment self.addCleanup(self.conn.ha.delete_segment, self.segment['uuid']) def test_list(self): names = [o.name for o in self.conn.ha.segments(recovery_method='auto')] self.assertIn(self.NAME, names) def test_update(self): updated_segment = self.conn.ha.update_segment( self.segment['uuid'], name='UPDATED-NAME' ) get_updated_segment = self.conn.ha.get_segment(updated_segment.uuid) self.assertEqual('UPDATED-NAME', get_updated_segment.name) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3653712 openstacksdk-4.0.0/openstack/tests/functional/load_balancer/0000775000175000017500000000000000000000000024270 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/load_balancer/__init__.py0000664000175000017500000000000000000000000026367 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3653712 openstacksdk-4.0.0/openstack/tests/functional/load_balancer/v2/0000775000175000017500000000000000000000000024617 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/load_balancer/v2/__init__.py0000664000175000017500000000000000000000000026716 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/load_balancer/v2/test_load_balancer.py0000664000175000017500000010075700000000000031010 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.load_balancer.v2 import availability_zone from openstack.load_balancer.v2 import availability_zone_profile from openstack.load_balancer.v2 import flavor from openstack.load_balancer.v2 import flavor_profile from openstack.load_balancer.v2 import health_monitor from openstack.load_balancer.v2 import l7_policy from openstack.load_balancer.v2 import l7_rule from openstack.load_balancer.v2 import listener from openstack.load_balancer.v2 import load_balancer from openstack.load_balancer.v2 import member from openstack.load_balancer.v2 import pool from openstack.load_balancer.v2 import quota from openstack.tests.functional import base class TestLoadBalancer(base.BaseFunctionalTest): HM_ID = None L7POLICY_ID = None LB_ID = None LISTENER_ID = None MEMBER_ID = None POOL_ID = None VIP_SUBNET_ID = None PROJECT_ID = None FLAVOR_PROFILE_ID = None FLAVOR_ID = None AVAILABILITY_ZONE_PROFILE_ID = None AMPHORA_ID = None PROTOCOL = 'HTTP' PROTOCOL_PORT = 80 LB_ALGORITHM = 'ROUND_ROBIN' MEMBER_ADDRESS = '192.0.2.16' WEIGHT = 10 DELAY = 2 TIMEOUT = 1 MAX_RETRY = 3 HM_TYPE = 'HTTP' ACTION = 'REDIRECT_TO_URL' REDIRECT_URL = 'http://www.example.com' COMPARE_TYPE = 'CONTAINS' L7RULE_TYPE = 'HOST_NAME' L7RULE_VALUE = 'example' AMPHORA = 'amphora' FLAVOR_DATA = '{"loadbalancer_topology": "SINGLE"}' AVAILABILITY_ZONE_DATA = '{"compute_zone": "nova"}' DESCRIPTION = 'Test description' _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_LOAD_BALANCER' # TODO(shade): Creating load balancers can be slow on some hosts due to # nova instance boot times (up to ten minutes). This used to # use setUpClass, but that's a whole other pile of bad, so # we may need to engineer something pleasing here. def setUp(self): super().setUp() self.require_service('load-balancer') self.HM_NAME = self.getUniqueString() self.L7POLICY_NAME = self.getUniqueString() self.LB_NAME = self.getUniqueString() self.LISTENER_NAME = self.getUniqueString() self.MEMBER_NAME = self.getUniqueString() self.POOL_NAME = self.getUniqueString() self.UPDATE_NAME = self.getUniqueString() self.UPDATE_DESCRIPTION = self.getUniqueString() self.FLAVOR_PROFILE_NAME = self.getUniqueString() self.FLAVOR_NAME = self.getUniqueString() self.AVAILABILITY_ZONE_PROFILE_NAME = self.getUniqueString() self.AVAILABILITY_ZONE_NAME = self.getUniqueString() subnets = list(self.conn.network.subnets()) self.VIP_SUBNET_ID = subnets[0].id self.PROJECT_ID = self.conn.session.get_project_id() test_quota = self.conn.load_balancer.update_quota( self.PROJECT_ID, **{ 'load_balancer': 100, 'pool': 100, 'listener': 100, 'health_monitor': 100, 'member': 100, } ) assert isinstance(test_quota, quota.Quota) self.assertEqual(self.PROJECT_ID, test_quota.id) test_flavor_profile = self.conn.load_balancer.create_flavor_profile( name=self.FLAVOR_PROFILE_NAME, provider_name=self.AMPHORA, flavor_data=self.FLAVOR_DATA, ) assert isinstance(test_flavor_profile, flavor_profile.FlavorProfile) self.assertEqual(self.FLAVOR_PROFILE_NAME, test_flavor_profile.name) self.FLAVOR_PROFILE_ID = test_flavor_profile.id test_flavor = self.conn.load_balancer.create_flavor( name=self.FLAVOR_NAME, flavor_profile_id=self.FLAVOR_PROFILE_ID, is_enabled=True, description=self.DESCRIPTION, ) assert isinstance(test_flavor, flavor.Flavor) self.assertEqual(self.FLAVOR_NAME, test_flavor.name) self.FLAVOR_ID = test_flavor.id test_az_profile = ( self.conn.load_balancer.create_availability_zone_profile( name=self.AVAILABILITY_ZONE_PROFILE_NAME, provider_name=self.AMPHORA, availability_zone_data=self.AVAILABILITY_ZONE_DATA, ) ) assert isinstance( test_az_profile, availability_zone_profile.AvailabilityZoneProfile ) self.assertEqual( self.AVAILABILITY_ZONE_PROFILE_NAME, test_az_profile.name ) self.AVAILABILITY_ZONE_PROFILE_ID = test_az_profile.id test_az = self.conn.load_balancer.create_availability_zone( name=self.AVAILABILITY_ZONE_NAME, availability_zone_profile_id=self.AVAILABILITY_ZONE_PROFILE_ID, is_enabled=True, description=self.DESCRIPTION, ) assert isinstance(test_az, availability_zone.AvailabilityZone) self.assertEqual(self.AVAILABILITY_ZONE_NAME, test_az.name) test_lb = self.conn.load_balancer.create_load_balancer( name=self.LB_NAME, vip_subnet_id=self.VIP_SUBNET_ID, project_id=self.PROJECT_ID, ) assert isinstance(test_lb, load_balancer.LoadBalancer) self.assertEqual(self.LB_NAME, test_lb.name) # Wait for the LB to go ACTIVE. On non-virtualization enabled hosts # it can take nova up to ten minutes to boot a VM. self.conn.load_balancer.wait_for_load_balancer( test_lb.id, interval=1, wait=self._wait_for_timeout ) self.LB_ID = test_lb.id amphorae = self.conn.load_balancer.amphorae(loadbalancer_id=self.LB_ID) for amp in amphorae: self.AMPHORA_ID = amp.id test_listener = self.conn.load_balancer.create_listener( name=self.LISTENER_NAME, protocol=self.PROTOCOL, protocol_port=self.PROTOCOL_PORT, loadbalancer_id=self.LB_ID, ) assert isinstance(test_listener, listener.Listener) self.assertEqual(self.LISTENER_NAME, test_listener.name) self.LISTENER_ID = test_listener.id self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_pool = self.conn.load_balancer.create_pool( name=self.POOL_NAME, protocol=self.PROTOCOL, lb_algorithm=self.LB_ALGORITHM, listener_id=self.LISTENER_ID, ) assert isinstance(test_pool, pool.Pool) self.assertEqual(self.POOL_NAME, test_pool.name) self.POOL_ID = test_pool.id self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_member = self.conn.load_balancer.create_member( pool=self.POOL_ID, name=self.MEMBER_NAME, address=self.MEMBER_ADDRESS, protocol_port=self.PROTOCOL_PORT, weight=self.WEIGHT, ) assert isinstance(test_member, member.Member) self.assertEqual(self.MEMBER_NAME, test_member.name) self.MEMBER_ID = test_member.id self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_hm = self.conn.load_balancer.create_health_monitor( pool_id=self.POOL_ID, name=self.HM_NAME, delay=self.DELAY, timeout=self.TIMEOUT, max_retries=self.MAX_RETRY, type=self.HM_TYPE, ) assert isinstance(test_hm, health_monitor.HealthMonitor) self.assertEqual(self.HM_NAME, test_hm.name) self.HM_ID = test_hm.id self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_l7policy = self.conn.load_balancer.create_l7_policy( listener_id=self.LISTENER_ID, name=self.L7POLICY_NAME, action=self.ACTION, redirect_url=self.REDIRECT_URL, ) assert isinstance(test_l7policy, l7_policy.L7Policy) self.assertEqual(self.L7POLICY_NAME, test_l7policy.name) self.L7POLICY_ID = test_l7policy.id self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_l7rule = self.conn.load_balancer.create_l7_rule( l7_policy=self.L7POLICY_ID, compare_type=self.COMPARE_TYPE, type=self.L7RULE_TYPE, value=self.L7RULE_VALUE, ) assert isinstance(test_l7rule, l7_rule.L7Rule) self.assertEqual(self.COMPARE_TYPE, test_l7rule.compare_type) self.L7RULE_ID = test_l7rule.id self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) def tearDown(self): self.conn.load_balancer.get_load_balancer(self.LB_ID) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) self.conn.load_balancer.delete_quota( self.PROJECT_ID, ignore_missing=False ) self.conn.load_balancer.delete_l7_rule( self.L7RULE_ID, l7_policy=self.L7POLICY_ID, ignore_missing=False ) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) self.conn.load_balancer.delete_l7_policy( self.L7POLICY_ID, ignore_missing=False ) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) self.conn.load_balancer.delete_health_monitor( self.HM_ID, ignore_missing=False ) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) self.conn.load_balancer.delete_member( self.MEMBER_ID, self.POOL_ID, ignore_missing=False ) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) self.conn.load_balancer.delete_pool(self.POOL_ID, ignore_missing=False) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) self.conn.load_balancer.delete_listener( self.LISTENER_ID, ignore_missing=False ) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) self.conn.load_balancer.delete_load_balancer( self.LB_ID, ignore_missing=False ) super().tearDown() self.conn.load_balancer.delete_flavor( self.FLAVOR_ID, ignore_missing=False ) self.conn.load_balancer.delete_flavor_profile( self.FLAVOR_PROFILE_ID, ignore_missing=False ) self.conn.load_balancer.delete_availability_zone( self.AVAILABILITY_ZONE_NAME, ignore_missing=False ) self.conn.load_balancer.delete_availability_zone_profile( self.AVAILABILITY_ZONE_PROFILE_ID, ignore_missing=False ) def test_lb_find(self): test_lb = self.conn.load_balancer.find_load_balancer(self.LB_NAME) self.assertEqual(self.LB_ID, test_lb.id) def test_lb_get(self): test_lb = self.conn.load_balancer.get_load_balancer(self.LB_ID) self.assertEqual(self.LB_NAME, test_lb.name) self.assertEqual(self.LB_ID, test_lb.id) self.assertEqual(self.VIP_SUBNET_ID, test_lb.vip_subnet_id) def test_lb_get_stats(self): test_lb_stats = self.conn.load_balancer.get_load_balancer_statistics( self.LB_ID ) self.assertEqual(0, test_lb_stats.active_connections) self.assertEqual(0, test_lb_stats.bytes_in) self.assertEqual(0, test_lb_stats.bytes_out) self.assertEqual(0, test_lb_stats.request_errors) self.assertEqual(0, test_lb_stats.total_connections) def test_lb_list(self): names = [lb.name for lb in self.conn.load_balancer.load_balancers()] self.assertIn(self.LB_NAME, names) def test_lb_update(self): self.conn.load_balancer.update_load_balancer( self.LB_ID, name=self.UPDATE_NAME ) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_lb = self.conn.load_balancer.get_load_balancer(self.LB_ID) self.assertEqual(self.UPDATE_NAME, test_lb.name) self.conn.load_balancer.update_load_balancer( self.LB_ID, name=self.LB_NAME ) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_lb = self.conn.load_balancer.get_load_balancer(self.LB_ID) self.assertEqual(self.LB_NAME, test_lb.name) def test_lb_failover(self): self.conn.load_balancer.failover_load_balancer(self.LB_ID) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_lb = self.conn.load_balancer.get_load_balancer(self.LB_ID) self.assertEqual(self.LB_NAME, test_lb.name) def test_listener_find(self): test_listener = self.conn.load_balancer.find_listener( self.LISTENER_NAME ) self.assertEqual(self.LISTENER_ID, test_listener.id) def test_listener_get(self): test_listener = self.conn.load_balancer.get_listener(self.LISTENER_ID) self.assertEqual(self.LISTENER_NAME, test_listener.name) self.assertEqual(self.LISTENER_ID, test_listener.id) self.assertEqual(self.PROTOCOL, test_listener.protocol) self.assertEqual(self.PROTOCOL_PORT, test_listener.protocol_port) def test_listener_get_stats(self): test_listener_stats = self.conn.load_balancer.get_listener_statistics( self.LISTENER_ID ) self.assertEqual(0, test_listener_stats.active_connections) self.assertEqual(0, test_listener_stats.bytes_in) self.assertEqual(0, test_listener_stats.bytes_out) self.assertEqual(0, test_listener_stats.request_errors) self.assertEqual(0, test_listener_stats.total_connections) def test_listener_list(self): names = [ls.name for ls in self.conn.load_balancer.listeners()] self.assertIn(self.LISTENER_NAME, names) def test_listener_update(self): self.conn.load_balancer.get_load_balancer(self.LB_ID) self.conn.load_balancer.update_listener( self.LISTENER_ID, name=self.UPDATE_NAME ) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_listener = self.conn.load_balancer.get_listener(self.LISTENER_ID) self.assertEqual(self.UPDATE_NAME, test_listener.name) self.conn.load_balancer.update_listener( self.LISTENER_ID, name=self.LISTENER_NAME ) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_listener = self.conn.load_balancer.get_listener(self.LISTENER_ID) self.assertEqual(self.LISTENER_NAME, test_listener.name) def test_pool_find(self): test_pool = self.conn.load_balancer.find_pool(self.POOL_NAME) self.assertEqual(self.POOL_ID, test_pool.id) def test_pool_get(self): test_pool = self.conn.load_balancer.get_pool(self.POOL_ID) self.assertEqual(self.POOL_NAME, test_pool.name) self.assertEqual(self.POOL_ID, test_pool.id) self.assertEqual(self.PROTOCOL, test_pool.protocol) def test_pool_list(self): names = [pool.name for pool in self.conn.load_balancer.pools()] self.assertIn(self.POOL_NAME, names) def test_pool_update(self): self.conn.load_balancer.get_load_balancer(self.LB_ID) self.conn.load_balancer.update_pool( self.POOL_ID, name=self.UPDATE_NAME ) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_pool = self.conn.load_balancer.get_pool(self.POOL_ID) self.assertEqual(self.UPDATE_NAME, test_pool.name) self.conn.load_balancer.update_pool(self.POOL_ID, name=self.POOL_NAME) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_pool = self.conn.load_balancer.get_pool(self.POOL_ID) self.assertEqual(self.POOL_NAME, test_pool.name) def test_member_find(self): test_member = self.conn.load_balancer.find_member( self.MEMBER_NAME, self.POOL_ID ) self.assertEqual(self.MEMBER_ID, test_member.id) def test_member_get(self): test_member = self.conn.load_balancer.get_member( self.MEMBER_ID, self.POOL_ID ) self.assertEqual(self.MEMBER_NAME, test_member.name) self.assertEqual(self.MEMBER_ID, test_member.id) self.assertEqual(self.MEMBER_ADDRESS, test_member.address) self.assertEqual(self.PROTOCOL_PORT, test_member.protocol_port) self.assertEqual(self.WEIGHT, test_member.weight) def test_member_list(self): names = [ mb.name for mb in self.conn.load_balancer.members(self.POOL_ID) ] self.assertIn(self.MEMBER_NAME, names) def test_member_update(self): self.conn.load_balancer.get_load_balancer(self.LB_ID) self.conn.load_balancer.update_member( self.MEMBER_ID, self.POOL_ID, name=self.UPDATE_NAME ) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_member = self.conn.load_balancer.get_member( self.MEMBER_ID, self.POOL_ID ) self.assertEqual(self.UPDATE_NAME, test_member.name) self.conn.load_balancer.update_member( self.MEMBER_ID, self.POOL_ID, name=self.MEMBER_NAME ) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_member = self.conn.load_balancer.get_member( self.MEMBER_ID, self.POOL_ID ) self.assertEqual(self.MEMBER_NAME, test_member.name) def test_health_monitor_find(self): test_hm = self.conn.load_balancer.find_health_monitor(self.HM_NAME) self.assertEqual(self.HM_ID, test_hm.id) def test_health_monitor_get(self): test_hm = self.conn.load_balancer.get_health_monitor(self.HM_ID) self.assertEqual(self.HM_NAME, test_hm.name) self.assertEqual(self.HM_ID, test_hm.id) self.assertEqual(self.DELAY, test_hm.delay) self.assertEqual(self.TIMEOUT, test_hm.timeout) self.assertEqual(self.MAX_RETRY, test_hm.max_retries) self.assertEqual(self.HM_TYPE, test_hm.type) def test_health_monitor_list(self): names = [hm.name for hm in self.conn.load_balancer.health_monitors()] self.assertIn(self.HM_NAME, names) def test_health_monitor_update(self): self.conn.load_balancer.get_load_balancer(self.LB_ID) self.conn.load_balancer.update_health_monitor( self.HM_ID, name=self.UPDATE_NAME ) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_hm = self.conn.load_balancer.get_health_monitor(self.HM_ID) self.assertEqual(self.UPDATE_NAME, test_hm.name) self.conn.load_balancer.update_health_monitor( self.HM_ID, name=self.HM_NAME ) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_hm = self.conn.load_balancer.get_health_monitor(self.HM_ID) self.assertEqual(self.HM_NAME, test_hm.name) def test_l7_policy_find(self): test_l7_policy = self.conn.load_balancer.find_l7_policy( self.L7POLICY_NAME ) self.assertEqual(self.L7POLICY_ID, test_l7_policy.id) def test_l7_policy_get(self): test_l7_policy = self.conn.load_balancer.get_l7_policy( self.L7POLICY_ID ) self.assertEqual(self.L7POLICY_NAME, test_l7_policy.name) self.assertEqual(self.L7POLICY_ID, test_l7_policy.id) self.assertEqual(self.ACTION, test_l7_policy.action) def test_l7_policy_list(self): names = [l7.name for l7 in self.conn.load_balancer.l7_policies()] self.assertIn(self.L7POLICY_NAME, names) def test_l7_policy_update(self): self.conn.load_balancer.get_load_balancer(self.LB_ID) self.conn.load_balancer.update_l7_policy( self.L7POLICY_ID, name=self.UPDATE_NAME ) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_l7_policy = self.conn.load_balancer.get_l7_policy( self.L7POLICY_ID ) self.assertEqual(self.UPDATE_NAME, test_l7_policy.name) self.conn.load_balancer.update_l7_policy( self.L7POLICY_ID, name=self.L7POLICY_NAME ) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_l7_policy = self.conn.load_balancer.get_l7_policy( self.L7POLICY_ID ) self.assertEqual(self.L7POLICY_NAME, test_l7_policy.name) def test_l7_rule_find(self): test_l7_rule = self.conn.load_balancer.find_l7_rule( self.L7RULE_ID, self.L7POLICY_ID ) self.assertEqual(self.L7RULE_ID, test_l7_rule.id) self.assertEqual(self.L7RULE_TYPE, test_l7_rule.type) def test_l7_rule_get(self): test_l7_rule = self.conn.load_balancer.get_l7_rule( self.L7RULE_ID, l7_policy=self.L7POLICY_ID ) self.assertEqual(self.L7RULE_ID, test_l7_rule.id) self.assertEqual(self.COMPARE_TYPE, test_l7_rule.compare_type) self.assertEqual(self.L7RULE_TYPE, test_l7_rule.type) self.assertEqual(self.L7RULE_VALUE, test_l7_rule.rule_value) def test_l7_rule_list(self): ids = [ l7.id for l7 in self.conn.load_balancer.l7_rules( l7_policy=self.L7POLICY_ID ) ] self.assertIn(self.L7RULE_ID, ids) def test_l7_rule_update(self): self.conn.load_balancer.get_load_balancer(self.LB_ID) self.conn.load_balancer.update_l7_rule( self.L7RULE_ID, l7_policy=self.L7POLICY_ID, rule_value=self.UPDATE_NAME, ) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_l7_rule = self.conn.load_balancer.get_l7_rule( self.L7RULE_ID, l7_policy=self.L7POLICY_ID ) self.assertEqual(self.UPDATE_NAME, test_l7_rule.rule_value) self.conn.load_balancer.update_l7_rule( self.L7RULE_ID, l7_policy=self.L7POLICY_ID, rule_value=self.L7RULE_VALUE, ) self.conn.load_balancer.wait_for_load_balancer( self.LB_ID, wait=self._wait_for_timeout ) test_l7_rule = self.conn.load_balancer.get_l7_rule( self.L7RULE_ID, l7_policy=self.L7POLICY_ID, ) self.assertEqual(self.L7RULE_VALUE, test_l7_rule.rule_value) def test_quota_list(self): for qot in self.conn.load_balancer.quotas(): self.assertIsNotNone(qot.project_id) def test_quota_get(self): test_quota = self.conn.load_balancer.get_quota(self.PROJECT_ID) self.assertEqual(self.PROJECT_ID, test_quota.id) def test_quota_update(self): attrs = {'load_balancer': 12345, 'pool': 67890} for project_quota in self.conn.load_balancer.quotas(): self.conn.load_balancer.update_quota(project_quota, **attrs) new_quota = self.conn.load_balancer.get_quota( project_quota.project_id ) self.assertEqual(12345, new_quota.load_balancers) self.assertEqual(67890, new_quota.pools) def test_default_quota(self): self.conn.load_balancer.get_quota_default() def test_providers(self): providers = self.conn.load_balancer.providers() # Make sure our default provider is in the list self.assertTrue( any(prov['name'] == self.AMPHORA for prov in providers) ) def test_provider_flavor_capabilities(self): capabilities = self.conn.load_balancer.provider_flavor_capabilities( self.AMPHORA ) # Make sure a known capability is in the default provider self.assertTrue( any(cap['name'] == 'loadbalancer_topology' for cap in capabilities) ) def test_flavor_profile_find(self): test_profile = self.conn.load_balancer.find_flavor_profile( self.FLAVOR_PROFILE_NAME ) self.assertEqual(self.FLAVOR_PROFILE_ID, test_profile.id) def test_flavor_profile_get(self): test_flavor_profile = self.conn.load_balancer.get_flavor_profile( self.FLAVOR_PROFILE_ID ) self.assertEqual(self.FLAVOR_PROFILE_NAME, test_flavor_profile.name) self.assertEqual(self.FLAVOR_PROFILE_ID, test_flavor_profile.id) self.assertEqual(self.AMPHORA, test_flavor_profile.provider_name) self.assertEqual(self.FLAVOR_DATA, test_flavor_profile.flavor_data) def test_flavor_profile_list(self): names = [fv.name for fv in self.conn.load_balancer.flavor_profiles()] self.assertIn(self.FLAVOR_PROFILE_NAME, names) def test_flavor_profile_update(self): self.conn.load_balancer.update_flavor_profile( self.FLAVOR_PROFILE_ID, name=self.UPDATE_NAME ) test_flavor_profile = self.conn.load_balancer.get_flavor_profile( self.FLAVOR_PROFILE_ID ) self.assertEqual(self.UPDATE_NAME, test_flavor_profile.name) self.conn.load_balancer.update_flavor_profile( self.FLAVOR_PROFILE_ID, name=self.FLAVOR_PROFILE_NAME ) test_flavor_profile = self.conn.load_balancer.get_flavor_profile( self.FLAVOR_PROFILE_ID ) self.assertEqual(self.FLAVOR_PROFILE_NAME, test_flavor_profile.name) def test_flavor_find(self): test_flavor = self.conn.load_balancer.find_flavor(self.FLAVOR_NAME) self.assertEqual(self.FLAVOR_ID, test_flavor.id) def test_flavor_get(self): test_flavor = self.conn.load_balancer.get_flavor(self.FLAVOR_ID) self.assertEqual(self.FLAVOR_NAME, test_flavor.name) self.assertEqual(self.FLAVOR_ID, test_flavor.id) self.assertEqual(self.DESCRIPTION, test_flavor.description) self.assertEqual(self.FLAVOR_PROFILE_ID, test_flavor.flavor_profile_id) def test_flavor_list(self): names = [fv.name for fv in self.conn.load_balancer.flavors()] self.assertIn(self.FLAVOR_NAME, names) def test_flavor_update(self): self.conn.load_balancer.update_flavor( self.FLAVOR_ID, name=self.UPDATE_NAME ) test_flavor = self.conn.load_balancer.get_flavor(self.FLAVOR_ID) self.assertEqual(self.UPDATE_NAME, test_flavor.name) self.conn.load_balancer.update_flavor( self.FLAVOR_ID, name=self.FLAVOR_NAME ) test_flavor = self.conn.load_balancer.get_flavor(self.FLAVOR_ID) self.assertEqual(self.FLAVOR_NAME, test_flavor.name) def test_amphora_list(self): amp_ids = [amp.id for amp in self.conn.load_balancer.amphorae()] self.assertIn(self.AMPHORA_ID, amp_ids) def test_amphora_find(self): test_amphora = self.conn.load_balancer.find_amphora(self.AMPHORA_ID) self.assertEqual(self.AMPHORA_ID, test_amphora.id) def test_amphora_get(self): test_amphora = self.conn.load_balancer.get_amphora(self.AMPHORA_ID) self.assertEqual(self.AMPHORA_ID, test_amphora.id) def test_amphora_configure(self): self.conn.load_balancer.configure_amphora(self.AMPHORA_ID) test_amp = self.conn.load_balancer.get_amphora(self.AMPHORA_ID) self.assertEqual(self.AMPHORA_ID, test_amp.id) def test_amphora_failover(self): self.conn.load_balancer.failover_amphora(self.AMPHORA_ID) test_amp = self.conn.load_balancer.get_amphora(self.AMPHORA_ID) self.assertEqual(self.AMPHORA_ID, test_amp.id) def test_availability_zone_profile_find(self): test_profile = self.conn.load_balancer.find_availability_zone_profile( self.AVAILABILITY_ZONE_PROFILE_NAME ) self.assertEqual(self.AVAILABILITY_ZONE_PROFILE_ID, test_profile.id) def test_availability_zone_profile_get(self): test_availability_zone_profile = ( self.conn.load_balancer.get_availability_zone_profile( self.AVAILABILITY_ZONE_PROFILE_ID ) ) self.assertEqual( self.AVAILABILITY_ZONE_PROFILE_NAME, test_availability_zone_profile.name, ) self.assertEqual( self.AVAILABILITY_ZONE_PROFILE_ID, test_availability_zone_profile.id, ) self.assertEqual( self.AMPHORA, test_availability_zone_profile.provider_name ) self.assertEqual( self.AVAILABILITY_ZONE_DATA, test_availability_zone_profile.availability_zone_data, ) def test_availability_zone_profile_list(self): names = [ az.name for az in self.conn.load_balancer.availability_zone_profiles() ] self.assertIn(self.AVAILABILITY_ZONE_PROFILE_NAME, names) def test_availability_zone_profile_update(self): self.conn.load_balancer.update_availability_zone_profile( self.AVAILABILITY_ZONE_PROFILE_ID, name=self.UPDATE_NAME ) test_availability_zone_profile = ( self.conn.load_balancer.get_availability_zone_profile( self.AVAILABILITY_ZONE_PROFILE_ID ) ) self.assertEqual(self.UPDATE_NAME, test_availability_zone_profile.name) self.conn.load_balancer.update_availability_zone_profile( self.AVAILABILITY_ZONE_PROFILE_ID, name=self.AVAILABILITY_ZONE_PROFILE_NAME, ) test_availability_zone_profile = ( self.conn.load_balancer.get_availability_zone_profile( self.AVAILABILITY_ZONE_PROFILE_ID ) ) self.assertEqual( self.AVAILABILITY_ZONE_PROFILE_NAME, test_availability_zone_profile.name, ) def test_availability_zone_find(self): test_availability_zone = ( self.conn.load_balancer.find_availability_zone( self.AVAILABILITY_ZONE_NAME ) ) self.assertEqual( self.AVAILABILITY_ZONE_NAME, test_availability_zone.name ) def test_availability_zone_get(self): test_availability_zone = self.conn.load_balancer.get_availability_zone( self.AVAILABILITY_ZONE_NAME ) self.assertEqual( self.AVAILABILITY_ZONE_NAME, test_availability_zone.name ) self.assertEqual(self.DESCRIPTION, test_availability_zone.description) self.assertEqual( self.AVAILABILITY_ZONE_PROFILE_ID, test_availability_zone.availability_zone_profile_id, ) def test_availability_zone_list(self): names = [ az.name for az in self.conn.load_balancer.availability_zones() ] self.assertIn(self.AVAILABILITY_ZONE_NAME, names) def test_availability_zone_update(self): self.conn.load_balancer.update_availability_zone( self.AVAILABILITY_ZONE_NAME, description=self.UPDATE_DESCRIPTION ) test_availability_zone = self.conn.load_balancer.get_availability_zone( self.AVAILABILITY_ZONE_NAME ) self.assertEqual( self.UPDATE_DESCRIPTION, test_availability_zone.description ) self.conn.load_balancer.update_availability_zone( self.AVAILABILITY_ZONE_NAME, description=self.DESCRIPTION ) test_availability_zone = self.conn.load_balancer.get_availability_zone( self.AVAILABILITY_ZONE_NAME ) self.assertEqual(self.DESCRIPTION, test_availability_zone.description) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3653712 openstacksdk-4.0.0/openstack/tests/functional/network/0000775000175000017500000000000000000000000023213 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/__init__.py0000664000175000017500000000000000000000000025312 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.377377 openstacksdk-4.0.0/openstack/tests/functional/network/v2/0000775000175000017500000000000000000000000023542 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/__init__.py0000664000175000017500000000000000000000000025641 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_address_group.py0000664000175000017500000000671500000000000030025 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import address_group as _address_group from openstack.tests.functional import base class TestAddressGroup(base.BaseFunctionalTest): ADDRESS_GROUP_ID = None ADDRESSES = ["10.0.0.1/32", "2001:db8::/32"] def setUp(self): super().setUp() # Skip the tests if address group extension is not enabled. if not self.user_cloud.network.find_extension("address-group"): self.skipTest("Network Address Group extension disabled") self.ADDRESS_GROUP_NAME = self.getUniqueString() self.ADDRESS_GROUP_DESCRIPTION = self.getUniqueString() self.ADDRESS_GROUP_NAME_UPDATED = self.getUniqueString() self.ADDRESS_GROUP_DESCRIPTION_UPDATED = self.getUniqueString() address_group = self.user_cloud.network.create_address_group( name=self.ADDRESS_GROUP_NAME, description=self.ADDRESS_GROUP_DESCRIPTION, addresses=self.ADDRESSES, ) assert isinstance(address_group, _address_group.AddressGroup) self.assertEqual(self.ADDRESS_GROUP_NAME, address_group.name) self.assertEqual( self.ADDRESS_GROUP_DESCRIPTION, address_group.description ) self.assertCountEqual(self.ADDRESSES, address_group.addresses) self.ADDRESS_GROUP_ID = address_group.id def tearDown(self): sot = self.user_cloud.network.delete_address_group( self.ADDRESS_GROUP_ID ) self.assertIsNone(sot) super().tearDown() def test_find(self): sot = self.user_cloud.network.find_address_group( self.ADDRESS_GROUP_NAME ) self.assertEqual(self.ADDRESS_GROUP_ID, sot.id) def test_get(self): sot = self.user_cloud.network.get_address_group(self.ADDRESS_GROUP_ID) self.assertEqual(self.ADDRESS_GROUP_NAME, sot.name) def test_list(self): names = [ag.name for ag in self.user_cloud.network.address_groups()] self.assertIn(self.ADDRESS_GROUP_NAME, names) def test_update(self): sot = self.user_cloud.network.update_address_group( self.ADDRESS_GROUP_ID, name=self.ADDRESS_GROUP_NAME_UPDATED, description=self.ADDRESS_GROUP_DESCRIPTION_UPDATED, ) self.assertEqual(self.ADDRESS_GROUP_NAME_UPDATED, sot.name) self.assertEqual( self.ADDRESS_GROUP_DESCRIPTION_UPDATED, sot.description ) def test_add_remove_addresses(self): addrs = ["127.0.0.1/32", "fe80::/10"] sot = self.user_cloud.network.add_addresses_to_address_group( self.ADDRESS_GROUP_ID, addrs ) updated_addrs = self.ADDRESSES.copy() updated_addrs.extend(addrs) self.assertCountEqual(updated_addrs, sot.addresses) sot = self.user_cloud.network.remove_addresses_from_address_group( self.ADDRESS_GROUP_ID, addrs ) self.assertCountEqual(self.ADDRESSES, sot.addresses) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_address_scope.py0000664000175000017500000000450600000000000027776 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import address_scope as _address_scope from openstack.tests.functional import base class TestAddressScope(base.BaseFunctionalTest): ADDRESS_SCOPE_ID = None IS_SHARED = False IP_VERSION = 4 def setUp(self): super().setUp() self.ADDRESS_SCOPE_NAME = self.getUniqueString() self.ADDRESS_SCOPE_NAME_UPDATED = self.getUniqueString() address_scope = self.user_cloud.network.create_address_scope( ip_version=self.IP_VERSION, name=self.ADDRESS_SCOPE_NAME, shared=self.IS_SHARED, ) assert isinstance(address_scope, _address_scope.AddressScope) self.assertEqual(self.ADDRESS_SCOPE_NAME, address_scope.name) self.ADDRESS_SCOPE_ID = address_scope.id def tearDown(self): sot = self.user_cloud.network.delete_address_scope( self.ADDRESS_SCOPE_ID ) self.assertIsNone(sot) super().tearDown() def test_find(self): sot = self.user_cloud.network.find_address_scope( self.ADDRESS_SCOPE_NAME ) self.assertEqual(self.ADDRESS_SCOPE_ID, sot.id) def test_get(self): sot = self.user_cloud.network.get_address_scope(self.ADDRESS_SCOPE_ID) self.assertEqual(self.ADDRESS_SCOPE_NAME, sot.name) self.assertEqual(self.IS_SHARED, sot.is_shared) self.assertEqual(self.IP_VERSION, sot.ip_version) def test_list(self): names = [o.name for o in self.user_cloud.network.address_scopes()] self.assertIn(self.ADDRESS_SCOPE_NAME, names) def test_update(self): sot = self.user_cloud.network.update_address_scope( self.ADDRESS_SCOPE_ID, name=self.ADDRESS_SCOPE_NAME_UPDATED ) self.assertEqual(self.ADDRESS_SCOPE_NAME_UPDATED, sot.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_agent.py0000664000175000017500000000350200000000000026251 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.network.v2 import agent from openstack.tests.functional import base class TestAgent(base.BaseFunctionalTest): AGENT: agent.Agent DESC = "test description" def validate_uuid(self, s): try: uuid.UUID(s) except Exception: return False return True def setUp(self): super().setUp() if not self.user_cloud._has_neutron_extension("agent"): self.skipTest("Neutron agent extension is required for this test") agent_list = list(self.user_cloud.network.agents()) if len(agent_list) == 0: self.skipTest("No agents available") self.AGENT = agent_list[0] assert isinstance(self.AGENT, agent.Agent) def test_list(self): agent_list = list(self.user_cloud.network.agents()) self.AGENT = agent_list[0] assert isinstance(self.AGENT, agent.Agent) self.assertTrue(self.validate_uuid(self.AGENT.id)) def test_get(self): sot = self.user_cloud.network.get_agent(self.AGENT.id) self.assertEqual(self.AGENT.id, sot.id) def test_update(self): sot = self.user_cloud.network.update_agent( self.AGENT.id, description=self.DESC ) self.assertEqual(self.DESC, sot.description) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_agent_add_remove_network.py0000664000175000017500000000452500000000000032215 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import agent from openstack.network.v2 import network from openstack.tests.functional import base class TestAgentNetworks(base.BaseFunctionalTest): NETWORK_ID: str AGENT: agent.Agent AGENT_ID: str def setUp(self): super().setUp() if not self.user_cloud._has_neutron_extension("agent"): self.skipTest("Neutron agent extension is required for this test") self.NETWORK_NAME = self.getUniqueString("network") net = self.user_cloud.network.create_network(name=self.NETWORK_NAME) self.addCleanup(self.user_cloud.network.delete_network, net.id) assert isinstance(net, network.Network) self.NETWORK_ID = net.id agent_list = list(self.user_cloud.network.agents()) agents = [ agent for agent in agent_list if agent.agent_type == "DHCP agent" ] if len(agent_list) == 0: self.skipTest("No agents available") self.AGENT = agents[0] self.AGENT_ID = self.AGENT.id def test_add_remove_agent(self): net = self.AGENT.add_agent_to_network( self.user_cloud.network, network_id=self.NETWORK_ID ) self._verify_add(net) net = self.AGENT.remove_agent_from_network( self.user_cloud.network, network_id=self.NETWORK_ID ) self._verify_remove(net) def _verify_add(self, network): net = self.user_cloud.network.dhcp_agent_hosting_networks( self.AGENT_ID ) net_ids = [n.id for n in net] self.assertIn(self.NETWORK_ID, net_ids) def _verify_remove(self, network): net = self.user_cloud.network.dhcp_agent_hosting_networks( self.AGENT_ID ) net_ids = [n.id for n in net] self.assertNotIn(self.NETWORK_ID, net_ids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_agent_add_remove_router.py0000664000175000017500000000416000000000000032037 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import agent from openstack.network.v2 import router from openstack.tests.functional import base class TestAgentRouters(base.BaseFunctionalTest): ROUTER: router.Router AGENT: agent.Agent def setUp(self): super().setUp() if not self.user_cloud._has_neutron_extension("agent"): self.skipTest("Neutron agent extension is required for this test") self.ROUTER_NAME = "router-name-" + self.getUniqueString("router-name") self.ROUTER = self.user_cloud.network.create_router( name=self.ROUTER_NAME ) self.addCleanup(self.user_cloud.network.delete_router, self.ROUTER) assert isinstance(self.ROUTER, router.Router) agent_list = list(self.user_cloud.network.agents()) agents = [ agent for agent in agent_list if agent.agent_type == "L3 agent" ] if len(agent_list) == 0: self.skipTest("No agents available") self.AGENT = agents[0] def test_add_router_to_agent(self): self.user_cloud.network.add_router_to_agent(self.AGENT, self.ROUTER) rots = self.user_cloud.network.agent_hosted_routers(self.AGENT) routers = [router.id for router in rots] self.assertIn(self.ROUTER.id, routers) def test_remove_router_from_agent(self): self.user_cloud.network.remove_router_from_agent( self.AGENT, self.ROUTER ) rots = self.user_cloud.network.agent_hosted_routers(self.AGENT) routers = [router.id for router in rots] self.assertNotIn(self.ROUTER.id, routers) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_auto_allocated_topology.py0000664000175000017500000001050300000000000032066 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestAutoAllocatedTopology(base.BaseFunctionalTest): NETWORK_NAME = "auto_allocated_network" NETWORK_ID = None PROJECT_ID = None def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") if not self.operator_cloud._has_neutron_extension( "auto-allocated-topology" ): self.skipTest( "Neutron auto-allocated-topology extension is " "required for this test" ) project = self._create_project() self.PROJECT_ID = project['id'] self.test_cloud = self.operator_cloud.connect_as_project(project) # Dry run will only pass if there is a public network self._set_network_external() def tearDown(self): res = self.test_cloud.network.delete_auto_allocated_topology( self.PROJECT_ID ) self.assertIsNone(res) self._destroy_project() super().tearDown() def _create_project(self): project_name = 'auto_allocated_topology_test_project' project = self.operator_cloud.get_project(project_name) if not project: params = { 'name': project_name, 'description': ( 'test project used only for the ' 'TestAutoAllocatedTopology tests class' ), } if self.identity_version == '3': params['domain_id'] = self.operator_cloud.get_domain( 'default' )['id'] project = self.operator_cloud.create_project(**params) user_id = self.operator_cloud.current_user_id # Grant the current user access to the project role_assignment = self.operator_cloud.list_role_assignments( {'user': user_id, 'project': project['id']} ) if not role_assignment: self.operator_cloud.grant_role( 'member', user=user_id, project=project['id'], wait=True ) return project def _destroy_project(self): self.operator_cloud.revoke_role( 'member', user=self.operator_cloud.current_user_id, project=self.PROJECT_ID, ) self.operator_cloud.delete_project(self.PROJECT_ID) def test_auto_allocated_topology(self): # First test validation with the 'dry-run' call # Dry run option will return "dry-run=pass" in the 'id' resource top = self.test_cloud.network.validate_auto_allocated_topology( self.PROJECT_ID ) self.assertEqual(self.PROJECT_ID, top.project) self.assertEqual("dry-run=pass", top.id) # test show auto_allocated_network without project id in the request top = self.test_cloud.network.get_auto_allocated_topology() project = self.test_cloud.session.get_project_id() network = self.test_cloud.network.get_network(top.id) self.assertEqual(top.project_id, project) self.assertEqual(top.id, network.id) # test show auto_allocated_network with project id in the request top = self.test_cloud.network.get_auto_allocated_topology( self.PROJECT_ID ) network = self.test_cloud.network.get_network(top.id) self.assertEqual(top.project_id, network.project_id) self.assertEqual(top.id, network.id) self.assertEqual(network.name, "auto_allocated_network") def _set_network_external(self): networks = self.test_cloud.network.networks() for network in networks: if network.name == "public": self.test_cloud.network.update_network( network, is_default=True ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_availability_zone.py0000664000175000017500000000174200000000000030664 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestAvailabilityZone(base.BaseFunctionalTest): def test_list(self): availability_zones = list(self.user_cloud.network.availability_zones()) if len(availability_zones) > 0: for az in availability_zones: self.assertIsInstance(az.name, str) self.assertIsInstance(az.resource, str) self.assertIsInstance(az.state, str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_bgp.py0000664000175000017500000001234200000000000025725 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import bgp_peer as _bgp_peer from openstack.network.v2 import bgp_speaker as _bgp_speaker from openstack.tests.functional import base class TestBGPSpeaker(base.BaseFunctionalTest): def setUp(self): super().setUp() self.LOCAL_AS = 101 self.IP_VERSION = 4 self.REMOTE_AS = 42 self.PEER_IP = '172.200.12.3' self.SPEAKER_NAME = 'my_speaker' + self.getUniqueString() self.PEER_NAME = 'my_peer' + self.getUniqueString() if not self.user_cloud.network.find_extension("bgp"): self.skipTest("Neutron BGP Dynamic Routing Extension disabled") bgp_speaker = self.operator_cloud.network.create_bgp_speaker( ip_version=self.IP_VERSION, local_as=self.LOCAL_AS, name=self.SPEAKER_NAME, ) assert isinstance(bgp_speaker, _bgp_speaker.BgpSpeaker) self.SPEAKER = bgp_speaker bgp_peer = self.operator_cloud.network.create_bgp_peer( name=self.PEER_NAME, auth_type='none', remote_as=self.REMOTE_AS, peer_ip=self.PEER_IP, ) assert isinstance(bgp_peer, _bgp_peer.BgpPeer) self.PEER = bgp_peer def tearDown(self): sot = self.operator_cloud.network.delete_bgp_peer(self.PEER.id) self.assertIsNone(sot) sot = self.operator_cloud.network.delete_bgp_speaker(self.SPEAKER.id) self.assertIsNone(sot) super().tearDown() def test_find_bgp_speaker(self): sot = self.operator_cloud.network.find_bgp_speaker(self.SPEAKER.name) self.assertEqual(self.IP_VERSION, sot.ip_version) self.assertEqual(self.LOCAL_AS, sot.local_as) # Check defaults self.assertTrue(sot.advertise_floating_ip_host_routes) self.assertTrue(sot.advertise_tenant_networks) def test_get_bgp_speaker(self): sot = self.operator_cloud.network.get_bgp_speaker(self.SPEAKER.id) self.assertEqual(self.IP_VERSION, sot.ip_version) self.assertEqual(self.LOCAL_AS, sot.local_as) def test_list_bgp_speakers(self): speaker_ids = [ sp.id for sp in self.operator_cloud.network.bgp_speakers() ] self.assertIn(self.SPEAKER.id, speaker_ids) def test_update_bgp_speaker(self): sot = self.operator_cloud.network.update_bgp_speaker( self.SPEAKER.id, advertise_floating_ip_host_routes=False ) self.assertFalse(sot.advertise_floating_ip_host_routes) def test_find_bgp_peer(self): sot = self.operator_cloud.network.find_bgp_peer(self.PEER.name) self.assertEqual(self.PEER_IP, sot.peer_ip) self.assertEqual(self.REMOTE_AS, sot.remote_as) def test_get_bgp_peer(self): sot = self.operator_cloud.network.get_bgp_peer(self.PEER.id) self.assertEqual(self.PEER_IP, sot.peer_ip) self.assertEqual(self.REMOTE_AS, sot.remote_as) def test_list_bgp_peers(self): peer_ids = [pe.id for pe in self.operator_cloud.network.bgp_peers()] self.assertIn(self.PEER.id, peer_ids) def test_update_bgp_peer(self): name = 'new_peer_name' + self.getUniqueString() sot = self.operator_cloud.network.update_bgp_peer( self.PEER.id, name=name ) self.assertEqual(name, sot.name) def test_add_remove_peer_to_speaker(self): self.operator_cloud.network.add_bgp_peer_to_speaker( self.SPEAKER.id, self.PEER.id ) sot = self.operator_cloud.network.get_bgp_speaker(self.SPEAKER.id) self.assertEqual([self.PEER.id], sot.peers) # Remove the peer self.operator_cloud.network.remove_bgp_peer_from_speaker( self.SPEAKER.id, self.PEER.id ) sot = self.operator_cloud.network.get_bgp_speaker(self.SPEAKER.id) self.assertEqual([], sot.peers) def test_add_remove_gw_network_to_speaker(self): net_name = 'my_network' + self.getUniqueString() net = self.user_cloud.create_network(name=net_name) self.operator_cloud.network.add_gateway_network_to_speaker( self.SPEAKER.id, net.id ) sot = self.operator_cloud.network.get_bgp_speaker(self.SPEAKER.id) self.assertEqual([net.id], sot.networks) # Remove the network self.operator_cloud.network.remove_gateway_network_from_speaker( self.SPEAKER.id, net.id ) sot = self.operator_cloud.network.get_bgp_speaker(self.SPEAKER.id) self.assertEqual([], sot.networks) def test_get_advertised_routes_of_speaker(self): sot = self.operator_cloud.network.get_advertised_routes_of_speaker( self.SPEAKER.id ) self.assertEqual({'advertised_routes': []}, sot) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_bgpvpn.py0000664000175000017500000001732400000000000026456 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import bgpvpn as _bgpvpn from openstack.network.v2 import ( bgpvpn_network_association as _bgpvpn_net_assoc, ) from openstack.network.v2 import bgpvpn_port_association as _bgpvpn_port_assoc from openstack.network.v2 import ( bgpvpn_router_association as _bgpvpn_router_assoc, ) from openstack.network.v2 import network as _network from openstack.network.v2 import port as _port from openstack.network.v2 import router as _router from openstack.network.v2 import subnet as _subnet from openstack.tests.functional import base class TestBGPVPN(base.BaseFunctionalTest): def setUp(self): super().setUp() self.BGPVPN_NAME = 'my_bgpvpn' + self.getUniqueString() self.NET_NAME = 'my_net' + self.getUniqueString() self.SUBNET_NAME = 'my_subnet' + self.getUniqueString() self.PORT_NAME = 'my_port' + self.getUniqueString() self.ROUTER_NAME = 'my_router' + self.getUniqueString() self.CIDR = "10.101.0.0/24" self.ROUTE_DISTINGUISHERS = ['64512:1777', '64512:1888', '64512:1999'] self.VNI = 1000 self.ROUTE_TARGETS = ('64512:1444',) self.IMPORT_TARGETS = ('64512:1555',) self.EXPORT_TARGETS = '64512:1666' self.TYPE = 'l3' if not self.user_cloud.network.find_extension("bgpvpn"): self.skipTest("Neutron BGPVPN Extension disabled") bgpvpn = self.operator_cloud.network.create_bgpvpn( name=self.BGPVPN_NAME, route_distinguishers=self.ROUTE_DISTINGUISHERS, route_targets=self.ROUTE_TARGETS, import_targets=self.IMPORT_TARGETS, export_targets=self.EXPORT_TARGETS, ) assert isinstance(bgpvpn, _bgpvpn.BgpVpn) self.BGPVPN = bgpvpn net = self.operator_cloud.network.create_network(name=self.NET_NAME) assert isinstance(net, _network.Network) self.NETWORK = net subnet = self.operator_cloud.network.create_subnet( name=self.SUBNET_NAME, ip_version=4, network_id=self.NETWORK.id, cidr=self.CIDR, ) assert isinstance(subnet, _subnet.Subnet) self.SUBNET = subnet port = self.operator_cloud.network.create_port( name=self.PORT_NAME, network_id=self.NETWORK.id ) assert isinstance(port, _port.Port) self.PORT = port router = self.operator_cloud.network.create_router( name=self.ROUTER_NAME ) assert isinstance(router, _router.Router) self.ROUTER = router net_assoc = ( self.operator_cloud.network.create_bgpvpn_network_association( self.BGPVPN, network_id=self.NETWORK.id ) ) assert isinstance( net_assoc, _bgpvpn_net_assoc.BgpVpnNetworkAssociation ) self.NET_ASSOC = net_assoc port_assoc = ( self.operator_cloud.network.create_bgpvpn_port_association( self.BGPVPN, port_id=self.PORT.id ) ) assert isinstance(port_assoc, _bgpvpn_port_assoc.BgpVpnPortAssociation) self.PORT_ASSOC = port_assoc router_assoc = ( self.operator_cloud.network.create_bgpvpn_router_association( self.BGPVPN, router_id=self.ROUTER.id ) ) assert isinstance( router_assoc, _bgpvpn_router_assoc.BgpVpnRouterAssociation ) self.ROUTER_ASSOC = router_assoc def tearDown(self): sot = self.operator_cloud.network.delete_bgpvpn(self.BGPVPN.id) self.assertIsNone(sot) sot = self.operator_cloud.network.delete_bgpvpn_network_association( self.BGPVPN.id, self.NET_ASSOC.id ) self.assertIsNone(sot) sot = self.operator_cloud.network.delete_bgpvpn_port_association( self.BGPVPN.id, self.PORT_ASSOC.id ) self.assertIsNone(sot) sot = self.operator_cloud.network.delete_bgpvpn_router_association( self.BGPVPN.id, self.ROUTER_ASSOC.id ) self.assertIsNone(sot) sot = self.operator_cloud.network.delete_router(self.ROUTER) self.assertIsNone(sot) sot = self.operator_cloud.network.delete_port(self.PORT) self.assertIsNone(sot) sot = self.operator_cloud.network.delete_subnet(self.SUBNET) self.assertIsNone(sot) sot = self.operator_cloud.network.delete_network(self.NETWORK) self.assertIsNone(sot) super().tearDown() def test_find_bgpvpn(self): sot = self.operator_cloud.network.find_bgpvpn(self.BGPVPN.name) self.assertEqual(list(self.ROUTE_TARGETS), sot.route_targets) self.assertEqual(list(self.IMPORT_TARGETS), sot.import_targets) # Check defaults self.assertEqual(self.TYPE, sot.type) def test_get_bgpvpn(self): sot = self.operator_cloud.network.get_bgpvpn(self.BGPVPN.id) self.assertEqual(list(self.ROUTE_TARGETS), sot.route_targets) self.assertEqual([self.EXPORT_TARGETS], sot.export_targets) self.assertEqual(list(self.IMPORT_TARGETS), sot.import_targets) def test_list_bgpvpns(self): bgpvpn_ids = [ bgpvpn.id for bgpvpn in self.operator_cloud.network.bgpvpns() ] self.assertIn(self.BGPVPN.id, bgpvpn_ids) def test_update_bgpvpn(self): sot = self.operator_cloud.network.update_bgpvpn( self.BGPVPN.id, import_targets='64512:1333' ) self.assertEqual(['64512:1333'], sot.import_targets) def test_get_bgpvpnnetwork_association(self): sot = self.operator_cloud.network.get_bgpvpn_network_association( self.BGPVPN.id, self.NET_ASSOC.id ) self.assertEqual(self.NETWORK.id, sot.network_id) def test_list_bgpvpn_network_associations(self): net_assoc_ids = [ net_assoc.id for net_assoc in ( self.operator_cloud.network.bgpvpn_network_associations( self.BGPVPN.id ) ) ] self.assertIn(self.NET_ASSOC.id, net_assoc_ids) def test_get_bgpvpn_port_association(self): sot = self.operator_cloud.network.get_bgpvpn_port_association( self.BGPVPN.id, self.PORT_ASSOC.id ) self.assertEqual(self.PORT.id, sot.port_id) def test_list_bgpvpn_port_associations(self): port_assoc_ids = [ port_assoc.id for port_assoc in ( self.operator_cloud.network.bgpvpn_port_associations( self.BGPVPN.id ) ) ] self.assertIn(self.PORT_ASSOC.id, port_assoc_ids) def test_get_bgpvpn_router_association(self): sot = self.operator_cloud.network.get_bgpvpn_router_association( self.BGPVPN.id, self.ROUTER_ASSOC.id ) self.assertEqual(self.ROUTER.id, sot.router_id) def test_list_bgpvpn_router_associations(self): router_assoc_ids = [ router_assoc.id for router_assoc in ( self.operator_cloud.network.bgpvpn_router_associations( self.BGPVPN.id ) ) ] self.assertIn(self.ROUTER_ASSOC.id, router_assoc_ids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_default_security_group_rule.py0000664000175000017500000000576000000000000033001 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random from openstack.network.v2 import default_security_group_rule from openstack.tests.functional import base class TestDefaultSecurityGroupRule(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.user_cloud._has_neutron_extension( "security-groups-default-rules" ): self.skipTest( "Neutron security-groups-default-rules extension " "is required for this test" ) self.IPV4 = random.choice(["IPv4", "IPv6"]) self.PROTO = random.choice(["tcp", "udp"]) self.PORT = random.randint(1, 65535) self.DIR = random.choice(["ingress", "egress"]) self.USED_IN_DEFAULT_SG = random.choice([True, False]) self.USED_IN_NON_DEFAULT_SG = random.choice([True, False]) rul = self.operator_cloud.network.create_default_security_group_rule( direction=self.DIR, ethertype=self.IPV4, port_range_max=self.PORT, port_range_min=self.PORT, protocol=self.PROTO, used_in_default_sg=self.USED_IN_DEFAULT_SG, used_in_non_default_sg=self.USED_IN_NON_DEFAULT_SG, ) assert isinstance( rul, default_security_group_rule.DefaultSecurityGroupRule ) self.RULE_ID = rul.id def tearDown(self): sot = self.operator_cloud.network.delete_default_security_group_rule( self.RULE_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_find(self): sot = self.operator_cloud.network.find_default_security_group_rule( self.RULE_ID ) self.assertEqual(self.RULE_ID, sot.id) def test_get(self): sot = self.operator_cloud.network.get_default_security_group_rule( self.RULE_ID ) self.assertEqual(self.RULE_ID, sot.id) self.assertEqual(self.DIR, sot.direction) self.assertEqual(self.PROTO, sot.protocol) self.assertEqual(self.PORT, sot.port_range_min) self.assertEqual(self.PORT, sot.port_range_max) self.assertEqual(self.USED_IN_DEFAULT_SG, sot.used_in_default_sg) self.assertEqual( self.USED_IN_NON_DEFAULT_SG, sot.used_in_non_default_sg ) def test_list(self): ids = [ o.id for o in self.operator_cloud.network.default_security_group_rules() ] self.assertIn(self.RULE_ID, ids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_dvr_router.py0000664000175000017500000000444300000000000027353 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import router from openstack.tests.functional import base class TestDVRRouter(base.BaseFunctionalTest): ID = None def setUp(self): super().setUp() if not self.operator_cloud: # Current policies forbid regular user use it self.skipTest("Operator cloud is required for this test") if not self.operator_cloud._has_neutron_extension("dvr"): self.skipTest("dvr service not supported by cloud") self.NAME = self.getUniqueString() self.UPDATE_NAME = self.getUniqueString() sot = self.operator_cloud.network.create_router( name=self.NAME, distributed=True ) assert isinstance(sot, router.Router) self.assertEqual(self.NAME, sot.name) self.ID = sot.id def tearDown(self): sot = self.operator_cloud.network.delete_router( self.ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_find(self): sot = self.operator_cloud.network.find_router(self.NAME) self.assertEqual(self.ID, sot.id) def test_get(self): sot = self.operator_cloud.network.get_router(self.ID) self.assertEqual(self.NAME, sot.name) self.assertEqual(self.ID, sot.id) self.assertTrue(sot.is_distributed) def test_list(self): names = [o.name for o in self.operator_cloud.network.routers()] self.assertIn(self.NAME, names) dvr = [o.is_distributed for o in self.operator_cloud.network.routers()] self.assertTrue(dvr) def test_update(self): sot = self.operator_cloud.network.update_router( self.ID, name=self.UPDATE_NAME ) self.assertEqual(self.UPDATE_NAME, sot.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_extension.py0000664000175000017500000000206400000000000027171 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestExtension(base.BaseFunctionalTest): def test_list(self): extensions = list(self.user_cloud.network.extensions()) self.assertGreater(len(extensions), 0) for ext in extensions: self.assertIsInstance(ext.name, str) self.assertIsInstance(ext.alias, str) def test_find(self): extension = self.user_cloud.network.find_extension("external-net") self.assertEqual("Neutron external network", extension.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_firewall_group.py0000664000175000017500000000351200000000000030175 0ustar00zuulzuul00000000000000# Copyright (c) 2018 China Telecom Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import firewall_group from openstack.tests.functional import base class TestFirewallGroup(base.BaseFunctionalTest): ID = None def setUp(self): super().setUp() if not self.user_cloud._has_neutron_extension("fwaas_v2"): self.skipTest("fwaas_v2 service not supported by cloud") self.NAME = self.getUniqueString() sot = self.user_cloud.network.create_firewall_group(name=self.NAME) assert isinstance(sot, firewall_group.FirewallGroup) self.assertEqual(self.NAME, sot.name) self.ID = sot.id def tearDown(self): sot = self.user_cloud.network.delete_firewall_group( self.ID, ignore_missing=False ) self.assertIs(None, sot) super().tearDown() def test_find(self): sot = self.user_cloud.network.find_firewall_group(self.NAME) self.assertEqual(self.ID, sot.id) def test_get(self): sot = self.user_cloud.network.get_firewall_group(self.ID) self.assertEqual(self.NAME, sot.name) self.assertEqual(self.ID, sot.id) def test_list(self): names = [o.name for o in self.user_cloud.network.firewall_groups()] self.assertIn(self.NAME, names) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_firewall_policy.py0000664000175000017500000000352400000000000030343 0ustar00zuulzuul00000000000000# Copyright (c) 2018 China Telecom Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import firewall_policy from openstack.tests.functional import base class TestFirewallPolicy(base.BaseFunctionalTest): ID = None def setUp(self): super().setUp() if not self.user_cloud._has_neutron_extension("fwaas_v2"): self.skipTest("fwaas_v2 service not supported by cloud") self.NAME = self.getUniqueString() sot = self.user_cloud.network.create_firewall_policy(name=self.NAME) assert isinstance(sot, firewall_policy.FirewallPolicy) self.assertEqual(self.NAME, sot.name) self.ID = sot.id def tearDown(self): sot = self.user_cloud.network.delete_firewall_policy( self.ID, ignore_missing=False ) self.assertIs(None, sot) super().tearDown() def test_find(self): sot = self.user_cloud.network.find_firewall_policy(self.NAME) self.assertEqual(self.ID, sot.id) def test_get(self): sot = self.user_cloud.network.get_firewall_policy(self.ID) self.assertEqual(self.NAME, sot.name) self.assertEqual(self.ID, sot.id) def test_list(self): names = [o.name for o in self.user_cloud.network.firewall_policies()] self.assertIn(self.NAME, names) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_firewall_rule.py0000664000175000017500000000516500000000000030016 0ustar00zuulzuul00000000000000# Copyright (c) 2018 China Telecom Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import firewall_rule from openstack.tests.functional import base class TestFirewallRule(base.BaseFunctionalTest): ACTION = "allow" DEST_IP = "10.0.0.0/24" DEST_PORT = "80" IP_VERSION = 4 PROTOCOL = "tcp" SOUR_IP = "10.0.1.0/24" SOUR_PORT = "8000" ID = None def setUp(self): super().setUp() if not self.user_cloud._has_neutron_extension("fwaas_v2"): self.skipTest("fwaas_v2 service not supported by cloud") self.NAME = self.getUniqueString() sot = self.user_cloud.network.create_firewall_rule( name=self.NAME, action=self.ACTION, source_port=self.SOUR_PORT, destination_port=self.DEST_PORT, source_ip_address=self.SOUR_IP, destination_ip_address=self.DEST_IP, ip_version=self.IP_VERSION, protocol=self.PROTOCOL, ) assert isinstance(sot, firewall_rule.FirewallRule) self.assertEqual(self.NAME, sot.name) self.ID = sot.id def tearDown(self): sot = self.user_cloud.network.delete_firewall_rule( self.ID, ignore_missing=False ) self.assertIs(None, sot) super().tearDown() def test_find(self): sot = self.user_cloud.network.find_firewall_rule(self.NAME) self.assertEqual(self.ID, sot.id) def test_get(self): sot = self.user_cloud.network.get_firewall_rule(self.ID) self.assertEqual(self.ID, sot.id) self.assertEqual(self.NAME, sot.name) self.assertEqual(self.ACTION, sot.action) self.assertEqual(self.DEST_IP, sot.destination_ip_address) self.assertEqual(self.DEST_PORT, sot.destination_port) self.assertEqual(self.IP_VERSION, sot.ip_version) self.assertEqual(self.SOUR_IP, sot.source_ip_address) self.assertEqual(self.SOUR_PORT, sot.source_port) def test_list(self): ids = [o.id for o in self.user_cloud.network.firewall_rules()] self.assertIn(self.ID, ids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_firewall_rule_insert_remove_policy.py0000664000175000017500000000752300000000000034336 0ustar00zuulzuul00000000000000# Copyright (c) 2018 China Telecom Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.network.v2 import firewall_policy from openstack.network.v2 import firewall_rule from openstack.tests.functional import base class TestFirewallPolicyRuleAssociations(base.BaseFunctionalTest): POLICY_NAME = uuid.uuid4().hex RULE1_NAME = uuid.uuid4().hex RULE2_NAME = uuid.uuid4().hex POLICY_ID = None RULE1_ID = None RULE2_ID = None def setUp(self): super().setUp() if not self.user_cloud._has_neutron_extension("fwaas_v2"): self.skipTest("fwaas_v2 service not supported by cloud") rul1 = self.user_cloud.network.create_firewall_rule( name=self.RULE1_NAME ) assert isinstance(rul1, firewall_rule.FirewallRule) self.assertEqual(self.RULE1_NAME, rul1.name) rul2 = self.user_cloud.network.create_firewall_rule( name=self.RULE2_NAME ) assert isinstance(rul2, firewall_rule.FirewallRule) self.assertEqual(self.RULE2_NAME, rul2.name) pol = self.user_cloud.network.create_firewall_policy( name=self.POLICY_NAME ) assert isinstance(pol, firewall_policy.FirewallPolicy) self.assertEqual(self.POLICY_NAME, pol.name) self.RULE1_ID = rul1.id self.RULE2_ID = rul2.id self.POLICY_ID = pol.id def tearDown(self): sot = self.user_cloud.network.delete_firewall_policy( self.POLICY_ID, ignore_missing=False ) self.assertIs(None, sot) sot = self.user_cloud.network.delete_firewall_rule( self.RULE1_ID, ignore_missing=False ) self.assertIs(None, sot) sot = self.user_cloud.network.delete_firewall_rule( self.RULE2_ID, ignore_missing=False ) self.assertIs(None, sot) super().tearDown() def test_insert_rule_into_policy(self): policy = self.user_cloud.network.insert_rule_into_policy( self.POLICY_ID, firewall_rule_id=self.RULE1_ID ) self.assertIn(self.RULE1_ID, policy["firewall_rules"]) policy = self.user_cloud.network.insert_rule_into_policy( self.POLICY_ID, firewall_rule_id=self.RULE2_ID, insert_before=self.RULE1_ID, ) self.assertEqual(self.RULE1_ID, policy["firewall_rules"][1]) self.assertEqual(self.RULE2_ID, policy["firewall_rules"][0]) def test_remove_rule_from_policy(self): # insert rules into policy before we remove it again policy = self.user_cloud.network.insert_rule_into_policy( self.POLICY_ID, firewall_rule_id=self.RULE1_ID ) self.assertIn(self.RULE1_ID, policy["firewall_rules"]) policy = self.user_cloud.network.insert_rule_into_policy( self.POLICY_ID, firewall_rule_id=self.RULE2_ID ) self.assertIn(self.RULE2_ID, policy["firewall_rules"]) policy = self.user_cloud.network.remove_rule_from_policy( self.POLICY_ID, firewall_rule_id=self.RULE1_ID ) self.assertNotIn(self.RULE1_ID, policy["firewall_rules"]) policy = self.user_cloud.network.remove_rule_from_policy( self.POLICY_ID, firewall_rule_id=self.RULE2_ID ) self.assertNotIn(self.RULE2_ID, policy["firewall_rules"]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_flavor.py0000664000175000017500000000727600000000000026460 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import flavor from openstack.tests.functional import base class TestFlavor(base.BaseFunctionalTest): UPDATE_NAME = "UPDATED-NAME" SERVICE_TYPE = "FLAVORS" ID = None SERVICE_PROFILE_DESCRIPTION = "DESCRIPTION" METAINFO = "FlAVOR_PROFILE_METAINFO" def setUp(self): super().setUp() if not self.user_cloud._has_neutron_extension("flavors"): self.skipTest("Neutron flavor extension is required for this test") self.FLAVOR_NAME = self.getUniqueString("flavor") if self.operator_cloud: flavors = self.operator_cloud.network.create_flavor( name=self.FLAVOR_NAME, service_type=self.SERVICE_TYPE ) assert isinstance(flavors, flavor.Flavor) self.assertEqual(self.FLAVOR_NAME, flavors.name) self.assertEqual(self.SERVICE_TYPE, flavors.service_type) self.ID = flavors.id self.service_profiles = ( self.operator_cloud.network.create_service_profile( description=self.SERVICE_PROFILE_DESCRIPTION, metainfo=self.METAINFO, ) ) def tearDown(self): if self.operator_cloud and self.ID: flavors = self.operator_cloud.network.delete_flavor( self.ID, ignore_missing=True ) self.assertIsNone(flavors) service_profiles = self.user_cloud.network.delete_service_profile( self.ID, ignore_missing=True ) self.assertIsNone(service_profiles) super().tearDown() def test_find(self): if self.ID: flavors = self.user_cloud.network.find_flavor(self.FLAVOR_NAME) self.assertEqual(self.ID, flavors.id) else: self.user_cloud.network.find_flavor("definitely_missing") def test_get(self): if not self.ID: self.skipTest("Operator cloud required for this test") flavors = self.user_cloud.network.get_flavor(self.ID) self.assertEqual(self.FLAVOR_NAME, flavors.name) self.assertEqual(self.ID, flavors.id) def test_list(self): names = [f.name for f in self.user_cloud.network.flavors()] if self.ID: self.assertIn(self.FLAVOR_NAME, names) def test_update(self): if not self.operator_cloud: self.skipTest("Operator cloud required for this test") flavor = self.operator_cloud.network.update_flavor( self.ID, name=self.UPDATE_NAME ) self.assertEqual(self.UPDATE_NAME, flavor.name) def test_associate_disassociate_flavor_with_service_profile(self): if not self.operator_cloud: self.skipTest("Operator cloud required for this test") response = ( self.operator_cloud.network.associate_flavor_with_service_profile( self.ID, self.service_profiles.id ) ) self.assertIsNotNone(response) response = self.operator_cloud.network.disassociate_flavor_from_service_profile( # noqa: E501 self.ID, self.service_profiles.id ) self.assertIsNone(response) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_floating_ip.py0000664000175000017500000002026200000000000027450 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import floating_ip from openstack.network.v2 import network from openstack.network.v2 import port from openstack.network.v2 import router from openstack.network.v2 import subnet from openstack.tests.functional import base class TestFloatingIP(base.BaseFunctionalTest): IPV4 = 4 EXT_CIDR = "10.100.0.0/24" INT_CIDR = "10.101.0.0/24" EXT_NET_ID: str INT_NET_ID: str EXT_SUB_ID: str INT_SUB_ID: str ROT_ID: str PORT_ID: str FIP: floating_ip.FloatingIP DNS_DOMAIN = "example.org." DNS_NAME = "fip1" def setUp(self): super().setUp() if not self.user_cloud._has_neutron_extension("external-net"): self.skipTest( "Neutron external-net extension is required for this test" ) self.TIMEOUT_SCALING_FACTOR = 1.5 self.ROT_NAME = self.getUniqueString() self.INT_NET_NAME = self.getUniqueString() self.INT_SUB_NAME = self.getUniqueString() self.is_dns_supported = False # Find External Network for net in self.user_cloud.network.networks(is_router_external=True): self.EXT_NET_ID = net.id # Find subnet of the chosen external net for sub in self.user_cloud.network.subnets(network_id=self.EXT_NET_ID): self.EXT_SUB_ID = sub.id if not self.EXT_NET_ID and self.operator_cloud: # There is no existing external net, but operator # credentials available # WARNING: this external net is not dropped # Create External Network net = self._create_network( self.EXT_NET_NAME, **{"router:external": True} ) self.EXT_NET_ID = net.id sub = self._create_subnet( self.EXT_SUB_NAME, self.EXT_NET_ID, self.EXT_CIDR ) self.EXT_SUB_ID = sub.id # Create Internal Network net = self._create_network(self.INT_NET_NAME) self.INT_NET_ID = net.id sub = self._create_subnet( self.INT_SUB_NAME, self.INT_NET_ID, self.INT_CIDR ) self.INT_SUB_ID = sub.id # Create Router sot = self.user_cloud.network.create_router( name=self.ROT_NAME, **{"external_gateway_info": {"network_id": self.EXT_NET_ID}} ) assert isinstance(sot, router.Router) self.assertEqual(self.ROT_NAME, sot.name) self.ROT_ID = sot.id self.ROT = sot # Add Router's Interface to Internal Network sot = self.ROT.add_interface( self.user_cloud.network, subnet_id=self.INT_SUB_ID ) self.assertEqual(sot["subnet_id"], self.INT_SUB_ID) # Create Port in Internal Network prt = self.user_cloud.network.create_port(network_id=self.INT_NET_ID) assert isinstance(prt, port.Port) self.PORT_ID = prt.id self.PORT = prt # Create Floating IP. fip_args = dict( floating_network_id=self.EXT_NET_ID, ) if self.user_cloud._has_neutron_extension( "dns-integration" ) and self.user_cloud.has_service("dns"): self.is_dns_supported = True fip_args.update( dict(dns_domain=self.DNS_DOMAIN, dns_name=self.DNS_NAME) ) fip = self.user_cloud.network.create_ip(**fip_args) assert isinstance(fip, floating_ip.FloatingIP) self.FIP = fip def tearDown(self): sot = self.user_cloud.network.delete_ip( self.FIP.id, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_port( self.PORT_ID, ignore_missing=False ) self.assertIsNone(sot) sot = self.ROT.remove_interface( self.user_cloud.network, subnet_id=self.INT_SUB_ID ) self.assertEqual(sot["subnet_id"], self.INT_SUB_ID) sot = self.user_cloud.network.delete_router( self.ROT_ID, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_subnet( self.INT_SUB_ID, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_network( self.INT_NET_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def _create_network(self, name, **args): self.name = name net = self.user_cloud.network.create_network(name=name, **args) assert isinstance(net, network.Network) self.assertEqual(self.name, net.name) return net def _create_subnet(self, name, net_id, cidr): self.name = name self.net_id = net_id self.cidr = cidr sub = self.user_cloud.network.create_subnet( name=self.name, ip_version=self.IPV4, network_id=self.net_id, cidr=self.cidr, ) assert isinstance(sub, subnet.Subnet) self.assertEqual(self.name, sub.name) return sub def test_find_by_id(self): sot = self.user_cloud.network.find_ip(self.FIP.id) self.assertEqual(self.FIP.id, sot.id) def test_find_by_ip_address(self): sot = self.user_cloud.network.find_ip(self.FIP.floating_ip_address) self.assertEqual(self.FIP.floating_ip_address, sot.floating_ip_address) self.assertEqual(self.FIP.floating_ip_address, sot.name) def test_find_available_ip(self): sot = self.user_cloud.network.find_available_ip() self.assertIsNotNone(sot.id) self.assertIsNone(sot.port_id) self.assertIsNone(sot.port_details) def test_get(self): sot = self.user_cloud.network.get_ip(self.FIP.id) self.assertEqual(self.EXT_NET_ID, sot.floating_network_id) self.assertEqual(self.FIP.id, sot.id) self.assertEqual(self.FIP.floating_ip_address, sot.floating_ip_address) self.assertEqual(self.FIP.fixed_ip_address, sot.fixed_ip_address) self.assertEqual(self.FIP.port_id, sot.port_id) self.assertEqual(self.FIP.port_details, sot.port_details) self.assertEqual(self.FIP.router_id, sot.router_id) if self.is_dns_supported: self.assertEqual(self.DNS_DOMAIN, sot.dns_domain) self.assertEqual(self.DNS_NAME, sot.dns_name) def test_list(self): ids = [o.id for o in self.user_cloud.network.ips()] self.assertIn(self.FIP.id, ids) def test_update(self): sot = self.user_cloud.network.update_ip( self.FIP.id, port_id=self.PORT_ID ) self.assertEqual(self.PORT_ID, sot.port_id) self._assert_port_details(self.PORT, sot.port_details) self.assertEqual(self.FIP.id, sot.id) def test_set_tags(self): sot = self.user_cloud.network.get_ip(self.FIP.id) self.assertEqual([], sot.tags) self.user_cloud.network.set_tags(sot, ["blue"]) sot = self.user_cloud.network.get_ip(self.FIP.id) self.assertEqual(["blue"], sot.tags) self.user_cloud.network.set_tags(sot, []) sot = self.user_cloud.network.get_ip(self.FIP.id) self.assertEqual([], sot.tags) def _assert_port_details(self, port, port_details): self.assertEqual(port.name, port_details["name"]) self.assertEqual(port.network_id, port_details["network_id"]) self.assertEqual(port.mac_address, port_details["mac_address"]) self.assertEqual( port.is_admin_state_up, port_details["admin_state_up"] ) self.assertEqual(port.status, port_details["status"]) self.assertEqual(port.device_id, port_details["device_id"]) self.assertEqual(port.device_owner, port_details["device_owner"]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_l3_conntrack_helper.py0000664000175000017500000000503400000000000031074 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import l3_conntrack_helper as _l3_conntrack_helper from openstack.network.v2 import router from openstack.tests.functional import base class TestL3ConntrackHelper(base.BaseFunctionalTest): PROTOCOL = "udp" HELPER = "tftp" PORT = 69 ROT_ID = None def setUp(self): super().setUp() if not self.user_cloud.network.find_extension("l3-conntrack-helper"): self.skipTest("L3 conntrack helper extension disabled") self.ROT_NAME = self.getUniqueString() # Create Router sot = self.user_cloud.network.create_router(name=self.ROT_NAME) self.assertIsInstance(sot, router.Router) self.assertEqual(self.ROT_NAME, sot.name) self.ROT_ID = sot.id self.ROT = sot # Create conntrack helper ct_helper = self.user_cloud.network.create_conntrack_helper( router=self.ROT, protocol=self.PROTOCOL, helper=self.HELPER, port=self.PORT, ) self.assertIsInstance(ct_helper, _l3_conntrack_helper.ConntrackHelper) self.CT_HELPER = ct_helper def tearDown(self): sot = self.user_cloud.network.delete_router( self.ROT_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_get(self): sot = self.user_cloud.network.get_conntrack_helper( self.CT_HELPER, self.ROT_ID ) self.assertEqual(self.PROTOCOL, sot.protocol) self.assertEqual(self.HELPER, sot.helper) self.assertEqual(self.PORT, sot.port) def test_list(self): helper_ids = [ o.id for o in self.user_cloud.network.conntrack_helpers(self.ROT_ID) ] self.assertIn(self.CT_HELPER.id, helper_ids) def test_update(self): NEW_PORT = 90 sot = self.user_cloud.network.update_conntrack_helper( self.CT_HELPER.id, self.ROT_ID, port=NEW_PORT ) self.assertEqual(NEW_PORT, sot.port) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_local_ip.py0000664000175000017500000000503200000000000026735 0ustar00zuulzuul00000000000000# Copyright 2021 Huawei, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from openstack.network.v2 import local_ip as _local_ip from openstack.tests.functional import base class TestLocalIP(base.BaseFunctionalTest): LOCAL_IP_ID = None def setUp(self): super().setUp() if not self.user_cloud.network.find_extension("local_ip"): self.skipTest("Local IP extension disabled") self.LOCAL_IP_NAME = self.getUniqueString() self.LOCAL_IP_DESCRIPTION = self.getUniqueString() self.LOCAL_IP_NAME_UPDATED = self.getUniqueString() self.LOCAL_IP_DESCRIPTION_UPDATED = self.getUniqueString() local_ip = self.user_cloud.network.create_local_ip( name=self.LOCAL_IP_NAME, description=self.LOCAL_IP_DESCRIPTION, ) assert isinstance(local_ip, _local_ip.LocalIP) self.assertEqual(self.LOCAL_IP_NAME, local_ip.name) self.assertEqual(self.LOCAL_IP_DESCRIPTION, local_ip.description) self.LOCAL_IP_ID = local_ip.id def tearDown(self): sot = self.user_cloud.network.delete_local_ip(self.LOCAL_IP_ID) self.assertIsNone(sot) super().tearDown() def test_find(self): sot = self.user_cloud.network.find_local_ip(self.LOCAL_IP_NAME) self.assertEqual(self.LOCAL_IP_ID, sot.id) def test_get(self): sot = self.user_cloud.network.get_local_ip(self.LOCAL_IP_ID) self.assertEqual(self.LOCAL_IP_NAME, sot.name) def test_list(self): names = [ local_ip.name for local_ip in self.user_cloud.network.local_ips() ] self.assertIn(self.LOCAL_IP_NAME, names) def test_update(self): sot = self.user_cloud.network.update_local_ip( self.LOCAL_IP_ID, name=self.LOCAL_IP_NAME_UPDATED, description=self.LOCAL_IP_DESCRIPTION_UPDATED, ) self.assertEqual(self.LOCAL_IP_NAME_UPDATED, sot.name) self.assertEqual(self.LOCAL_IP_DESCRIPTION_UPDATED, sot.description) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_local_ip_association.py0000664000175000017500000000522500000000000031335 0ustar00zuulzuul00000000000000# Copyright 2021 Huawei, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from openstack.network.v2 import local_ip_association as _local_ip_association from openstack.tests.functional import base class TestLocalIPAssociation(base.BaseFunctionalTest): LOCAL_IP_ID = None FIXED_PORT_ID = None FIXED_IP = None def setUp(self): super().setUp() if not self.user_cloud.network.find_extension("local_ip"): self.skipTest("Local IP extension disabled") self.LOCAL_IP_ID = self.getUniqueString() self.FIXED_PORT_ID = self.getUniqueString() self.FIXED_IP = self.getUniqueString() local_ip_association = ( self.user_cloud.network.create_local_ip_association( local_ip=self.LOCAL_IP_ID, fixed_port_id=self.FIXED_PORT_ID, fixed_ip=self.FIXED_IP, ) ) assert isinstance( local_ip_association, _local_ip_association.LocalIPAssociation ) self.assertEqual(self.LOCAL_IP_ID, local_ip_association.local_ip_id) self.assertEqual( self.FIXED_PORT_ID, local_ip_association.fixed_port_id ) self.assertEqual(self.FIXED_IP, local_ip_association.fixed_ip) def tearDown(self): sot = self.user_cloud.network.delete_local_ip_association( self.LOCAL_IP_ID, self.FIXED_PORT_ID ) self.assertIsNone(sot) super().tearDown() def test_find(self): sot = self.user_cloud.network.find_local_ip_association( self.FIXED_PORT_ID, self.LOCAL_IP_ID ) self.assertEqual(self.FIXED_PORT_ID, sot.fixed_port_id) def test_get(self): sot = self.user_cloud.network.get_local_ip_association( self.FIXED_PORT_ID, self.LOCAL_IP_ID ) self.assertEqual(self.FIXED_PORT_ID, sot.fixed_port_id) def test_list(self): fixed_port_id = [ obj.fixed_port_id for obj in self.user_cloud.network.local_ip_associations( self.LOCAL_IP_ID ) ] self.assertIn(self.FIXED_PORT_ID, fixed_port_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_ndp_proxy.py0000664000175000017500000001363500000000000027205 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import ndp_proxy as _ndp_proxy from openstack.network.v2 import network from openstack.network.v2 import port from openstack.network.v2 import router from openstack.network.v2 import subnet from openstack.tests.functional import base class TestNDPProxy(base.BaseFunctionalTest): IPV6 = 6 EXT_CIDR = "2002::1:0/112" INT_CIDR = "2002::2:0/112" EXT_NET_ID = None INT_NET_ID = None EXT_SUB_ID = None INT_SUB_ID = None ROT_ID = None INTERNAL_PORT_ID = None def setUp(self): super().setUp() if not self.user_cloud.network.find_extension("l3-ndp-proxy"): self.skipTest("L3 ndp proxy extension disabled") self.ROT_NAME = self.getUniqueString() self.EXT_NET_NAME = self.getUniqueString() self.EXT_SUB_NAME = self.getUniqueString() self.INT_NET_NAME = self.getUniqueString() self.INT_SUB_NAME = self.getUniqueString() # Find External Network for net in self.user_cloud.network.networks(is_router_external=True): self.EXT_NET_ID = net.id # Find subnet of the chosen external net for sub in self.user_cloud.network.subnets(network_id=self.EXT_NET_ID): self.EXT_SUB_ID = sub.id if not self.EXT_NET_ID and self.operator_cloud: # There is no existing external net, but operator # credentials available # WARNING: this external net is not dropped # Create External Network net = self._create_network( self.EXT_NET_NAME, **{"router:external": True} ) self.EXT_NET_ID = net.id sub = self._create_subnet( self.EXT_SUB_NAME, self.EXT_NET_ID, self.EXT_CIDR ) self.EXT_SUB_ID = sub.id # Create Router sot = self.user_cloud.network.create_router( name=self.ROT_NAME, **{ "external_gateway_info": {"network_id": self.EXT_NET_ID}, "enable_ndp_proxy": True, }, ) assert isinstance(sot, router.Router) self.assertEqual(self.ROT_NAME, sot.name) self.ROT_ID = sot.id self.ROT = sot # Add Router's Interface to Internal Network sot = self.ROT.add_interface( self.user_cloud.network, subnet_id=self.INT_SUB_ID ) self.assertEqual(sot["subnet_id"], self.INT_SUB_ID) # Create Port in Internal Network prt = self.user_cloud.network.create_port(network_id=self.INT_NET_ID) assert isinstance(prt, port.Port) self.INTERNAL_PORT_ID = prt.id self.INTERNAL_IP_ADDRESS = prt.fixed_ips[0]["ip_address"] # Create ndp proxy np = self.user_cloud.network.create_ndp_proxy( router_id=self.ROT_ID, port_id=self.INTERNAL_PORT_ID ) assert isinstance(np, _ndp_proxy.NDPProxy) self.NP = np def tearDown(self): sot = self.user_cloud.network.delete_ndp_proxy( self.NP.id, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_port( self.INTERNAL_PORT_ID, ignore_missing=False ) self.assertIsNone(sot) sot = self.ROT.remove_interface( self.user_cloud.network, subnet_id=self.INT_SUB_ID ) self.assertEqual(sot["subnet_id"], self.INT_SUB_ID) sot = self.user_cloud.network.delete_router( self.ROT_ID, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_subnet( self.INT_SUB_ID, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_network( self.INT_NET_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def _create_network(self, name, **args): self.name = name net = self.user_cloud.network.create_network(name=name, **args) assert isinstance(net, network.Network) self.assertEqual(self.name, net.name) return net def _create_subnet(self, name, net_id, cidr): self.name = name self.net_id = net_id self.cidr = cidr sub = self.user_cloud.network.create_subnet( name=self.name, ip_version=self.IPV6, network_id=self.net_id, cidr=self.cidr, ) assert isinstance(sub, subnet.Subnet) self.assertEqual(self.name, sub.name) return sub def test_find(self): sot = self.user_cloud.network.find_ndp_proxy(self.NP.id) self.assertEqual(self.ROT_ID, sot.router_id) self.assertEqual(self.INTERNAL_PORT_ID, sot.port_id) self.assertEqual(self.INTERNAL_IP_ADDRESS, sot.ip_address) def test_get(self): sot = self.user_cloud.network.get_ndp_proxy(self.NP.id) self.assertEqual(self.ROT_ID, sot.router_id) self.assertEqual(self.INTERNAL_PORT_ID, sot.port_id) self.assertEqual(self.INTERNAL_IP_ADDRESS, sot.ip_address) def test_list(self): np_ids = [o.id for o in self.user_cloud.network.ndp_proxies()] self.assertIn(self.NP.id, np_ids) def test_update(self): description = "balabalbala" sot = self.user_cloud.network.update_ndp_proxy( self.NP.id, description=description ) self.assertEqual(description, sot.description) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_network.py0000664000175000017500000000631000000000000026644 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import network from openstack.tests.functional import base def create_network(conn, name, cidr): try: network = conn.network.create_network(name=name) subnet = conn.network.create_subnet( name=name, ip_version=4, network_id=network.id, cidr=cidr ) return (network, subnet) except Exception as e: print(str(e)) pass return (None, None) def delete_network(conn, network, subnet): if subnet: conn.network.delete_subnet(subnet) if network: conn.network.delete_network(network) class TestNetwork(base.BaseFunctionalTest): ID = None def setUp(self): super().setUp() self.NAME = self.getUniqueString() sot = self.user_cloud.network.create_network(name=self.NAME) assert isinstance(sot, network.Network) self.assertEqual(self.NAME, sot.name) self.ID = sot.id def tearDown(self): sot = self.user_cloud.network.delete_network( self.ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_find(self): sot = self.user_cloud.network.find_network(self.NAME) self.assertEqual(self.ID, sot.id) def test_find_with_filter(self): if not self.operator_cloud: self.skipTest("Operator cloud required for this test") project_id_1 = "1" project_id_2 = "2" sot1 = self.operator_cloud.network.create_network( name=self.NAME, project_id=project_id_1 ) sot2 = self.operator_cloud.network.create_network( name=self.NAME, project_id=project_id_2 ) sot = self.operator_cloud.network.find_network( self.NAME, project_id=project_id_1 ) self.assertEqual(project_id_1, sot.project_id) self.operator_cloud.network.delete_network(sot1.id) self.operator_cloud.network.delete_network(sot2.id) def test_get(self): sot = self.user_cloud.network.get_network(self.ID) self.assertEqual(self.NAME, sot.name) self.assertEqual(self.ID, sot.id) def test_list(self): names = [o.name for o in self.user_cloud.network.networks()] self.assertIn(self.NAME, names) def test_set_tags(self): sot = self.user_cloud.network.get_network(self.ID) self.assertEqual([], sot.tags) self.user_cloud.network.set_tags(sot, ["blue"]) sot = self.user_cloud.network.get_network(self.ID) self.assertEqual(["blue"], sot.tags) self.user_cloud.network.set_tags(sot, []) sot = self.user_cloud.network.get_network(self.ID) self.assertEqual([], sot.tags) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_network_ip_availability.py0000664000175000017500000000620500000000000032071 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import network from openstack.network.v2 import port from openstack.network.v2 import subnet from openstack.tests.functional import base class TestNetworkIPAvailability(base.BaseFunctionalTest): IPV4 = 4 CIDR = "10.100.0.0/24" NET_ID = None SUB_ID = None PORT_ID = None def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud required for this test") if not self.operator_cloud._has_neutron_extension( "network-ip-availability" ): self.skipTest( "Neutron network-ip-availability extension is required " "for this test" ) self.NET_NAME = self.getUniqueString() self.SUB_NAME = self.getUniqueString() self.PORT_NAME = self.getUniqueString() self.UPDATE_NAME = self.getUniqueString() net = self.operator_cloud.network.create_network(name=self.NET_NAME) assert isinstance(net, network.Network) self.assertEqual(self.NET_NAME, net.name) self.NET_ID = net.id sub = self.operator_cloud.network.create_subnet( name=self.SUB_NAME, ip_version=self.IPV4, network_id=self.NET_ID, cidr=self.CIDR, ) assert isinstance(sub, subnet.Subnet) self.assertEqual(self.SUB_NAME, sub.name) self.SUB_ID = sub.id prt = self.operator_cloud.network.create_port( name=self.PORT_NAME, network_id=self.NET_ID ) assert isinstance(prt, port.Port) self.assertEqual(self.PORT_NAME, prt.name) self.PORT_ID = prt.id def tearDown(self): sot = self.operator_cloud.network.delete_port(self.PORT_ID) self.assertIsNone(sot) sot = self.operator_cloud.network.delete_subnet(self.SUB_ID) self.assertIsNone(sot) sot = self.operator_cloud.network.delete_network(self.NET_ID) self.assertIsNone(sot) super().tearDown() def test_find(self): sot = self.operator_cloud.network.find_network_ip_availability( self.NET_ID ) self.assertEqual(self.NET_ID, sot.network_id) def test_get(self): sot = self.operator_cloud.network.get_network_ip_availability( self.NET_ID ) self.assertEqual(self.NET_ID, sot.network_id) self.assertEqual(self.NET_NAME, sot.network_name) def test_list(self): ids = [ o.network_id for o in self.operator_cloud.network.network_ip_availabilities() ] self.assertIn(self.NET_ID, ids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_network_segment_range.py0000664000175000017500000001056200000000000031546 0ustar00zuulzuul00000000000000# Copyright (c) 2018, Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import network_segment_range from openstack.tests.functional import base class TestNetworkSegmentRange(base.BaseFunctionalTest): NETWORK_SEGMENT_RANGE_ID = None NAME = "test_name" DEFAULT = False SHARED = False PROJECT_ID = "2018" NETWORK_TYPE = "vlan" PHYSICAL_NETWORK = "phys_net" MINIMUM = 100 MAXIMUM = 200 def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud required for this test") # NOTE(kailun): The network segment range extension is not yet enabled # by default. # Skip the tests if not enabled. if not self.operator_cloud.network.find_extension( "network-segment-range" ): self.skipTest("Network Segment Range extension disabled") test_seg_range = ( self.operator_cloud.network.create_network_segment_range( name=self.NAME, default=self.DEFAULT, shared=self.SHARED, project_id=self.PROJECT_ID, network_type=self.NETWORK_TYPE, physical_network=self.PHYSICAL_NETWORK, minimum=self.MINIMUM, maximum=self.MAXIMUM, ) ) self.assertIsInstance( test_seg_range, network_segment_range.NetworkSegmentRange ) self.NETWORK_SEGMENT_RANGE_ID = test_seg_range.id self.assertEqual(self.NAME, test_seg_range.name) self.assertEqual(self.DEFAULT, test_seg_range.default) self.assertEqual(self.SHARED, test_seg_range.shared) self.assertEqual(self.PROJECT_ID, test_seg_range.project_id) self.assertEqual(self.NETWORK_TYPE, test_seg_range.network_type) self.assertEqual( self.PHYSICAL_NETWORK, test_seg_range.physical_network ) self.assertEqual(self.MINIMUM, test_seg_range.minimum) self.assertEqual(self.MAXIMUM, test_seg_range.maximum) def tearDown(self): super().tearDown() def test_create_delete(self): del_test_seg_range = ( self.operator_cloud.network.delete_network_segment_range( self.NETWORK_SEGMENT_RANGE_ID ) ) self.assertIsNone(del_test_seg_range) def test_find(self): test_seg_range = ( self.operator_cloud.network.find_network_segment_range( self.NETWORK_SEGMENT_RANGE_ID ) ) self.assertEqual(self.NETWORK_SEGMENT_RANGE_ID, test_seg_range.id) def test_get(self): test_seg_range = self.operator_cloud.network.get_network_segment_range( self.NETWORK_SEGMENT_RANGE_ID ) self.assertEqual(self.NETWORK_SEGMENT_RANGE_ID, test_seg_range.id) self.assertEqual(self.NAME, test_seg_range.name) self.assertEqual(self.DEFAULT, test_seg_range.default) self.assertEqual(self.SHARED, test_seg_range.shared) self.assertEqual(self.PROJECT_ID, test_seg_range.project_id) self.assertEqual(self.NETWORK_TYPE, test_seg_range.network_type) self.assertEqual( self.PHYSICAL_NETWORK, test_seg_range.physical_network ) self.assertEqual(self.MINIMUM, test_seg_range.minimum) self.assertEqual(self.MAXIMUM, test_seg_range.maximum) def test_list(self): ids = [ o.id for o in self.operator_cloud.network.network_segment_ranges( name=None ) ] self.assertIn(self.NETWORK_SEGMENT_RANGE_ID, ids) def test_update(self): update_seg_range = self.operator_cloud.network.update_segment( self.NETWORK_SEGMENT_RANGE_ID, name="update_test_name" ) self.assertEqual("update_test_name", update_seg_range.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_port.py0000664000175000017500000000657600000000000026155 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import network from openstack.network.v2 import port from openstack.network.v2 import subnet from openstack.tests.functional import base class TestPort(base.BaseFunctionalTest): IPV4 = 4 CIDR = "10.100.0.0/24" NET_ID = None SUB_ID = None PORT_ID = None def setUp(self): super().setUp() self.NET_NAME = self.getUniqueString() self.SUB_NAME = self.getUniqueString() self.PORT_NAME = self.getUniqueString() self.UPDATE_NAME = self.getUniqueString() net = self.user_cloud.network.create_network(name=self.NET_NAME) assert isinstance(net, network.Network) self.assertEqual(self.NET_NAME, net.name) self.NET_ID = net.id sub = self.user_cloud.network.create_subnet( name=self.SUB_NAME, ip_version=self.IPV4, network_id=self.NET_ID, cidr=self.CIDR, ) assert isinstance(sub, subnet.Subnet) self.assertEqual(self.SUB_NAME, sub.name) self.SUB_ID = sub.id prt = self.user_cloud.network.create_port( name=self.PORT_NAME, network_id=self.NET_ID ) assert isinstance(prt, port.Port) self.assertEqual(self.PORT_NAME, prt.name) self.PORT_ID = prt.id def tearDown(self): sot = self.user_cloud.network.delete_port( self.PORT_ID, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_subnet( self.SUB_ID, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_network( self.NET_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_find(self): sot = self.user_cloud.network.find_port(self.PORT_NAME) self.assertEqual(self.PORT_ID, sot.id) def test_get(self): sot = self.user_cloud.network.get_port(self.PORT_ID) self.assertEqual(self.PORT_ID, sot.id) self.assertEqual(self.PORT_NAME, sot.name) self.assertEqual(self.NET_ID, sot.network_id) def test_list(self): ids = [o.id for o in self.user_cloud.network.ports()] self.assertIn(self.PORT_ID, ids) def test_update(self): sot = self.user_cloud.network.update_port( self.PORT_ID, name=self.UPDATE_NAME ) self.assertEqual(self.UPDATE_NAME, sot.name) def test_set_tags(self): sot = self.user_cloud.network.get_port(self.PORT_ID) self.assertEqual([], sot.tags) self.user_cloud.network.set_tags(sot, ["blue"]) sot = self.user_cloud.network.get_port(self.PORT_ID) self.assertEqual(["blue"], sot.tags) self.user_cloud.network.set_tags(sot, []) sot = self.user_cloud.network.get_port(self.PORT_ID) self.assertEqual([], sot.tags) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_port_forwarding.py0000664000175000017500000001723400000000000030370 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import floating_ip from openstack.network.v2 import network from openstack.network.v2 import port from openstack.network.v2 import port_forwarding as _port_forwarding from openstack.network.v2 import router from openstack.network.v2 import subnet from openstack.tests.functional import base class TestPortForwarding(base.BaseFunctionalTest): IPV4 = 4 FIP_ID = None EXT_CIDR = "10.100.0.0/24" INT_CIDR = "10.101.0.0/24" EXT_NET_ID = None INT_NET_ID = None EXT_SUB_ID = None INT_SUB_ID = None ROT_ID = None INTERNAL_PORT_ID = None INTERNAL_IP_ADDRESS = None INTERNAL_PORT = 8080 EXTERNAL_PORT = 80 PROTOCOL = "tcp" DESCRIPTION = "description" def setUp(self): super().setUp() if not self.user_cloud._has_neutron_extension("external-net"): self.skipTest( "Neutron external-net extension is required for this test" ) if not self.user_cloud.network.find_extension( "floating-ip-port-forwarding" ): self.skipTest("Floating IP Port Forwarding extension disabled") self.ROT_NAME = self.getUniqueString() self.INT_NET_NAME = self.getUniqueString() self.INT_SUB_NAME = self.getUniqueString() self.EXT_NET_ID = None self.EXT_SUB_ID = None # Find External Network for net in self.user_cloud.network.networks(is_router_external=True): self.EXT_NET_ID = net.id # Find subnet of the chosen external net for sub in self.user_cloud.network.subnets(network_id=self.EXT_NET_ID): self.EXT_SUB_ID = sub.id if not self.EXT_NET_ID and self.operator_cloud: # There is no existing external net, but operator # credentials available # WARNING: this external net is not dropped # Create External Network net = self._create_network( self.EXT_NET_NAME, **{"router:external": True} ) self.EXT_NET_ID = net.id sub = self._create_subnet( self.EXT_SUB_NAME, self.EXT_NET_ID, self.EXT_CIDR ) self.EXT_SUB_ID = sub.id # Create Internal Network net = self._create_network(self.INT_NET_NAME) self.INT_NET_ID = net.id sub = self._create_subnet( self.INT_SUB_NAME, self.INT_NET_ID, self.INT_CIDR ) self.INT_SUB_ID = sub.id # Create Router sot = self.user_cloud.network.create_router( name=self.ROT_NAME, **{"external_gateway_info": {"network_id": self.EXT_NET_ID}} ) assert isinstance(sot, router.Router) self.assertEqual(self.ROT_NAME, sot.name) self.ROT_ID = sot.id self.ROT = sot # Add Router's Interface to Internal Network sot = self.ROT.add_interface( self.user_cloud.network, subnet_id=self.INT_SUB_ID ) self.assertEqual(sot["subnet_id"], self.INT_SUB_ID) # Create Port in Internal Network prt = self.user_cloud.network.create_port(network_id=self.INT_NET_ID) assert isinstance(prt, port.Port) self.INTERNAL_PORT_ID = prt.id self.INTERNAL_IP_ADDRESS = prt.fixed_ips[0]["ip_address"] # Create Floating IP. fip = self.user_cloud.network.create_ip( floating_network_id=self.EXT_NET_ID ) assert isinstance(fip, floating_ip.FloatingIP) self.FIP_ID = fip.id # Create Port Forwarding pf = self.user_cloud.network.create_port_forwarding( floatingip_id=self.FIP_ID, internal_port_id=self.INTERNAL_PORT_ID, internal_ip_address=self.INTERNAL_IP_ADDRESS, internal_port=self.INTERNAL_PORT, external_port=self.EXTERNAL_PORT, protocol=self.PROTOCOL, description=self.DESCRIPTION, ) assert isinstance(pf, _port_forwarding.PortForwarding) self.PF = pf def tearDown(self): sot = self.user_cloud.network.delete_port_forwarding( self.PF, self.FIP_ID, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_ip( self.FIP_ID, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_port( self.INTERNAL_PORT_ID, ignore_missing=False ) self.assertIsNone(sot) sot = self.ROT.remove_interface( self.user_cloud.network, subnet_id=self.INT_SUB_ID ) self.assertEqual(sot["subnet_id"], self.INT_SUB_ID) sot = self.user_cloud.network.delete_router( self.ROT_ID, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_subnet( self.INT_SUB_ID, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_network( self.INT_NET_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def _create_network(self, name, **args): self.name = name net = self.user_cloud.network.create_network(name=name, **args) assert isinstance(net, network.Network) self.assertEqual(self.name, net.name) return net def _create_subnet(self, name, net_id, cidr): self.name = name self.net_id = net_id self.cidr = cidr sub = self.user_cloud.network.create_subnet( name=self.name, ip_version=self.IPV4, network_id=self.net_id, cidr=self.cidr, ) assert isinstance(sub, subnet.Subnet) self.assertEqual(self.name, sub.name) return sub def test_find(self): sot = self.user_cloud.network.find_port_forwarding( self.PF.id, self.FIP_ID ) self.assertEqual(self.INTERNAL_PORT_ID, sot.internal_port_id) self.assertEqual(self.INTERNAL_IP_ADDRESS, sot.internal_ip_address) self.assertEqual(self.INTERNAL_PORT, sot.internal_port) self.assertEqual(self.EXTERNAL_PORT, sot.external_port) self.assertEqual(self.PROTOCOL, sot.protocol) self.assertEqual(self.DESCRIPTION, sot.description) def test_get(self): sot = self.user_cloud.network.get_port_forwarding(self.PF, self.FIP_ID) self.assertEqual(self.INTERNAL_PORT_ID, sot.internal_port_id) self.assertEqual(self.INTERNAL_IP_ADDRESS, sot.internal_ip_address) self.assertEqual(self.INTERNAL_PORT, sot.internal_port) self.assertEqual(self.EXTERNAL_PORT, sot.external_port) self.assertEqual(self.PROTOCOL, sot.protocol) self.assertEqual(self.DESCRIPTION, sot.description) def test_list(self): pf_ids = [ o.id for o in self.user_cloud.network.port_forwardings(self.FIP_ID) ] self.assertIn(self.PF.id, pf_ids) def test_update(self): NEW_EXTERNAL_PORT = 90 sot = self.user_cloud.network.update_port_forwarding( self.PF.id, self.FIP_ID, external_port=NEW_EXTERNAL_PORT ) self.assertEqual(NEW_EXTERNAL_PORT, sot.external_port) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_qos_bandwidth_limit_rule.py0000664000175000017500000001044500000000000032232 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import ( qos_bandwidth_limit_rule as _qos_bandwidth_limit_rule, ) from openstack.tests.functional import base class TestQoSBandwidthLimitRule(base.BaseFunctionalTest): QOS_POLICY_ID = None QOS_IS_SHARED = False QOS_POLICY_DESCRIPTION = "QoS policy description" RULE_MAX_KBPS = 1500 RULE_MAX_KBPS_NEW = 1800 RULE_MAX_BURST_KBPS = 1100 RULE_MAX_BURST_KBPS_NEW = 1300 RULE_DIRECTION = "egress" RULE_DIRECTION_NEW = "ingress" def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud required for this test") # Skip the tests if qos-bw-limit-direction extension is not enabled. if not self.operator_cloud.network.find_extension( "qos-bw-limit-direction" ): self.skipTest("Network qos-bw-limit-direction extension disabled") self.QOS_POLICY_NAME = self.getUniqueString() self.RULE_ID = self.getUniqueString() qos_policy = self.operator_cloud.network.create_qos_policy( description=self.QOS_POLICY_DESCRIPTION, name=self.QOS_POLICY_NAME, shared=self.QOS_IS_SHARED, ) self.QOS_POLICY_ID = qos_policy.id qos_rule = self.operator_cloud.network.create_qos_bandwidth_limit_rule( self.QOS_POLICY_ID, max_kbps=self.RULE_MAX_KBPS, max_burst_kbps=self.RULE_MAX_BURST_KBPS, direction=self.RULE_DIRECTION, ) assert isinstance( qos_rule, _qos_bandwidth_limit_rule.QoSBandwidthLimitRule ) self.assertEqual(self.RULE_MAX_KBPS, qos_rule.max_kbps) self.assertEqual(self.RULE_MAX_BURST_KBPS, qos_rule.max_burst_kbps) self.assertEqual(self.RULE_DIRECTION, qos_rule.direction) self.RULE_ID = qos_rule.id def tearDown(self): rule = self.operator_cloud.network.delete_qos_minimum_bandwidth_rule( self.RULE_ID, self.QOS_POLICY_ID ) qos_policy = self.operator_cloud.network.delete_qos_policy( self.QOS_POLICY_ID ) self.assertIsNone(rule) self.assertIsNone(qos_policy) super().tearDown() def test_find(self): sot = self.operator_cloud.network.find_qos_bandwidth_limit_rule( self.RULE_ID, self.QOS_POLICY_ID ) self.assertEqual(self.RULE_ID, sot.id) self.assertEqual(self.RULE_MAX_KBPS, sot.max_kbps) self.assertEqual(self.RULE_MAX_BURST_KBPS, sot.max_burst_kbps) self.assertEqual(self.RULE_DIRECTION, sot.direction) def test_get(self): sot = self.operator_cloud.network.get_qos_bandwidth_limit_rule( self.RULE_ID, self.QOS_POLICY_ID ) self.assertEqual(self.RULE_ID, sot.id) self.assertEqual(self.QOS_POLICY_ID, sot.qos_policy_id) self.assertEqual(self.RULE_MAX_KBPS, sot.max_kbps) self.assertEqual(self.RULE_MAX_BURST_KBPS, sot.max_burst_kbps) self.assertEqual(self.RULE_DIRECTION, sot.direction) def test_list(self): rule_ids = [ o.id for o in self.operator_cloud.network.qos_bandwidth_limit_rules( self.QOS_POLICY_ID ) ] self.assertIn(self.RULE_ID, rule_ids) def test_update(self): sot = self.operator_cloud.network.update_qos_bandwidth_limit_rule( self.RULE_ID, self.QOS_POLICY_ID, max_kbps=self.RULE_MAX_KBPS_NEW, max_burst_kbps=self.RULE_MAX_BURST_KBPS_NEW, direction=self.RULE_DIRECTION_NEW, ) self.assertEqual(self.RULE_MAX_KBPS_NEW, sot.max_kbps) self.assertEqual(self.RULE_MAX_BURST_KBPS_NEW, sot.max_burst_kbps) self.assertEqual(self.RULE_DIRECTION_NEW, sot.direction) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_qos_dscp_marking_rule.py0000664000175000017500000000627100000000000031533 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import ( qos_dscp_marking_rule as _qos_dscp_marking_rule, ) from openstack.tests.functional import base class TestQoSDSCPMarkingRule(base.BaseFunctionalTest): QOS_POLICY_ID = None QOS_IS_SHARED = False QOS_POLICY_DESCRIPTION = "QoS policy description" RULE_DSCP_MARK = 36 RULE_DSCP_MARK_NEW = 40 def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") # Skip the tests if qos extension is not enabled. if not self.conn.network.find_extension("qos"): self.skipTest("Network qos extension disabled") self.QOS_POLICY_NAME = self.getUniqueString() self.RULE_ID = self.getUniqueString() qos_policy = self.conn.network.create_qos_policy( description=self.QOS_POLICY_DESCRIPTION, name=self.QOS_POLICY_NAME, shared=self.QOS_IS_SHARED, ) self.QOS_POLICY_ID = qos_policy.id qos_rule = self.conn.network.create_qos_dscp_marking_rule( self.QOS_POLICY_ID, dscp_mark=self.RULE_DSCP_MARK, ) assert isinstance(qos_rule, _qos_dscp_marking_rule.QoSDSCPMarkingRule) self.assertEqual(self.RULE_DSCP_MARK, qos_rule.dscp_mark) self.RULE_ID = qos_rule.id def tearDown(self): rule = self.conn.network.delete_qos_minimum_bandwidth_rule( self.RULE_ID, self.QOS_POLICY_ID ) qos_policy = self.conn.network.delete_qos_policy(self.QOS_POLICY_ID) self.assertIsNone(rule) self.assertIsNone(qos_policy) super().tearDown() def test_find(self): sot = self.conn.network.find_qos_dscp_marking_rule( self.RULE_ID, self.QOS_POLICY_ID ) self.assertEqual(self.RULE_ID, sot.id) self.assertEqual(self.RULE_DSCP_MARK, sot.dscp_mark) def test_get(self): sot = self.conn.network.get_qos_dscp_marking_rule( self.RULE_ID, self.QOS_POLICY_ID ) self.assertEqual(self.RULE_ID, sot.id) self.assertEqual(self.QOS_POLICY_ID, sot.qos_policy_id) self.assertEqual(self.RULE_DSCP_MARK, sot.dscp_mark) def test_list(self): rule_ids = [ o.id for o in self.conn.network.qos_dscp_marking_rules( self.QOS_POLICY_ID ) ] self.assertIn(self.RULE_ID, rule_ids) def test_update(self): sot = self.conn.network.update_qos_dscp_marking_rule( self.RULE_ID, self.QOS_POLICY_ID, dscp_mark=self.RULE_DSCP_MARK_NEW ) self.assertEqual(self.RULE_DSCP_MARK_NEW, sot.dscp_mark) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_qos_minimum_bandwidth_rule.py0000664000175000017500000000737000000000000032572 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import ( qos_minimum_bandwidth_rule as _qos_minimum_bandwidth_rule, ) from openstack.tests.functional import base class TestQoSMinimumBandwidthRule(base.BaseFunctionalTest): QOS_POLICY_ID = None QOS_IS_SHARED = False QOS_POLICY_DESCRIPTION = "QoS policy description" RULE_ID = None RULE_MIN_KBPS = 1200 RULE_MIN_KBPS_NEW = 1800 RULE_DIRECTION = "egress" def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") # Skip the tests if qos-bw-limit-direction extension is not enabled. if not self.operator_cloud.network.find_extension( "qos-bw-limit-direction" ): self.skipTest("Network qos-bw-limit-direction extension disabled") self.QOS_POLICY_NAME = self.getUniqueString() qos_policy = self.operator_cloud.network.create_qos_policy( description=self.QOS_POLICY_DESCRIPTION, name=self.QOS_POLICY_NAME, shared=self.QOS_IS_SHARED, ) self.QOS_POLICY_ID = qos_policy.id qos_min_bw_rule = ( self.operator_cloud.network.create_qos_minimum_bandwidth_rule( self.QOS_POLICY_ID, direction=self.RULE_DIRECTION, min_kbps=self.RULE_MIN_KBPS, ) ) assert isinstance( qos_min_bw_rule, _qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, ) self.assertEqual(self.RULE_MIN_KBPS, qos_min_bw_rule.min_kbps) self.assertEqual(self.RULE_DIRECTION, qos_min_bw_rule.direction) self.RULE_ID = qos_min_bw_rule.id def tearDown(self): rule = self.operator_cloud.network.delete_qos_minimum_bandwidth_rule( self.RULE_ID, self.QOS_POLICY_ID ) qos_policy = self.operator_cloud.network.delete_qos_policy( self.QOS_POLICY_ID ) self.assertIsNone(rule) self.assertIsNone(qos_policy) super().tearDown() def test_find(self): sot = self.operator_cloud.network.find_qos_minimum_bandwidth_rule( self.RULE_ID, self.QOS_POLICY_ID ) self.assertEqual(self.RULE_ID, sot.id) self.assertEqual(self.RULE_DIRECTION, sot.direction) self.assertEqual(self.RULE_MIN_KBPS, sot.min_kbps) def test_get(self): sot = self.operator_cloud.network.get_qos_minimum_bandwidth_rule( self.RULE_ID, self.QOS_POLICY_ID ) self.assertEqual(self.RULE_ID, sot.id) self.assertEqual(self.QOS_POLICY_ID, sot.qos_policy_id) self.assertEqual(self.RULE_DIRECTION, sot.direction) self.assertEqual(self.RULE_MIN_KBPS, sot.min_kbps) def test_list(self): rule_ids = [ o.id for o in self.operator_cloud.network.qos_minimum_bandwidth_rules( self.QOS_POLICY_ID ) ] self.assertIn(self.RULE_ID, rule_ids) def test_update(self): sot = self.operator_cloud.network.update_qos_minimum_bandwidth_rule( self.RULE_ID, self.QOS_POLICY_ID, min_kbps=self.RULE_MIN_KBPS_NEW ) self.assertEqual(self.RULE_MIN_KBPS_NEW, sot.min_kbps) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_qos_minimum_packet_rate_rule.py0000664000175000017500000000762200000000000033110 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import ( qos_minimum_packet_rate_rule as _qos_minimum_packet_rate_rule, ) from openstack.tests.functional import base class TestQoSMinimumPacketRateRule(base.BaseFunctionalTest): QOS_POLICY_ID = None QOS_IS_SHARED = False QOS_POLICY_DESCRIPTION = "QoS policy description" RULE_ID = None RULE_MIN_KPPS = 1200 RULE_MIN_KPPS_NEW = 1800 RULE_DIRECTION = "egress" RULE_DIRECTION_NEW = "ingress" def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") # Skip the tests if qos-pps-minimum extension is not enabled. if not self.operator_cloud.network.find_extension("qos-pps-minimum"): self.skipTest("Network qos-pps-minimum extension disabled") self.QOS_POLICY_NAME = self.getUniqueString() qos_policy = self.operator_cloud.network.create_qos_policy( description=self.QOS_POLICY_DESCRIPTION, name=self.QOS_POLICY_NAME, shared=self.QOS_IS_SHARED, ) self.QOS_POLICY_ID = qos_policy.id qos_min_pps_rule = ( self.operator_cloud.network.create_qos_minimum_packet_rate_rule( self.QOS_POLICY_ID, direction=self.RULE_DIRECTION, min_kpps=self.RULE_MIN_KPPS, ) ) assert isinstance( qos_min_pps_rule, _qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, ) self.assertEqual(self.RULE_MIN_KPPS, qos_min_pps_rule.min_kpps) self.assertEqual(self.RULE_DIRECTION, qos_min_pps_rule.direction) self.RULE_ID = qos_min_pps_rule.id def tearDown(self): rule = self.operator_cloud.network.delete_qos_minimum_packet_rate_rule( self.RULE_ID, self.QOS_POLICY_ID ) qos_policy = self.operator_cloud.network.delete_qos_policy( self.QOS_POLICY_ID ) self.assertIsNone(rule) self.assertIsNone(qos_policy) super().tearDown() def test_find(self): sot = self.operator_cloud.network.find_qos_minimum_packet_rate_rule( self.RULE_ID, self.QOS_POLICY_ID ) self.assertEqual(self.RULE_ID, sot.id) self.assertEqual(self.RULE_DIRECTION, sot.direction) self.assertEqual(self.RULE_MIN_KPPS, sot.min_kpps) def test_get(self): sot = self.operator_cloud.network.get_qos_minimum_packet_rate_rule( self.RULE_ID, self.QOS_POLICY_ID ) self.assertEqual(self.RULE_ID, sot.id) self.assertEqual(self.QOS_POLICY_ID, sot.qos_policy_id) self.assertEqual(self.RULE_DIRECTION, sot.direction) self.assertEqual(self.RULE_MIN_KPPS, sot.min_kpps) def test_list(self): rule_ids = [ o.id for o in self.operator_cloud.network.qos_minimum_packet_rate_rules( self.QOS_POLICY_ID ) ] self.assertIn(self.RULE_ID, rule_ids) def test_update(self): sot = self.operator_cloud.network.update_qos_minimum_packet_rate_rule( self.RULE_ID, self.QOS_POLICY_ID, min_kpps=self.RULE_MIN_KPPS_NEW, direction=self.RULE_DIRECTION_NEW, ) self.assertEqual(self.RULE_MIN_KPPS_NEW, sot.min_kpps) self.assertEqual(self.RULE_DIRECTION_NEW, sot.direction) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_qos_policy.py0000664000175000017500000000635500000000000027345 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty from openstack.network.v2 import qos_policy as _qos_policy from openstack.tests.functional import base class TestQoSPolicy(base.BaseFunctionalTest): QOS_POLICY_ID = None IS_SHARED = False IS_DEFAULT = False RULES: ty.List[str] = [] QOS_POLICY_DESCRIPTION = "QoS policy description" def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") # Skip the tests if qos extension is not enabled. if not self.operator_cloud.network.find_extension("qos"): self.skipTest("Network qos extension disabled") self.QOS_POLICY_NAME = self.getUniqueString() self.QOS_POLICY_NAME_UPDATED = self.getUniqueString() qos = self.operator_cloud.network.create_qos_policy( description=self.QOS_POLICY_DESCRIPTION, name=self.QOS_POLICY_NAME, shared=self.IS_SHARED, is_default=self.IS_DEFAULT, ) assert isinstance(qos, _qos_policy.QoSPolicy) self.assertEqual(self.QOS_POLICY_NAME, qos.name) self.QOS_POLICY_ID = qos.id def tearDown(self): sot = self.operator_cloud.network.delete_qos_policy(self.QOS_POLICY_ID) self.assertIsNone(sot) super().tearDown() def test_find(self): sot = self.operator_cloud.network.find_qos_policy(self.QOS_POLICY_NAME) self.assertEqual(self.QOS_POLICY_ID, sot.id) def test_get(self): sot = self.operator_cloud.network.get_qos_policy(self.QOS_POLICY_ID) self.assertEqual(self.QOS_POLICY_NAME, sot.name) self.assertEqual(self.IS_SHARED, sot.is_shared) self.assertEqual(self.RULES, sot.rules) self.assertEqual(self.QOS_POLICY_DESCRIPTION, sot.description) self.assertEqual(self.IS_DEFAULT, sot.is_default) def test_list(self): names = [o.name for o in self.operator_cloud.network.qos_policies()] self.assertIn(self.QOS_POLICY_NAME, names) def test_update(self): sot = self.operator_cloud.network.update_qos_policy( self.QOS_POLICY_ID, name=self.QOS_POLICY_NAME_UPDATED ) self.assertEqual(self.QOS_POLICY_NAME_UPDATED, sot.name) def test_set_tags(self): sot = self.operator_cloud.network.get_qos_policy(self.QOS_POLICY_ID) self.assertEqual([], sot.tags) self.operator_cloud.network.set_tags(sot, ["blue"]) sot = self.operator_cloud.network.get_qos_policy(self.QOS_POLICY_ID) self.assertEqual(["blue"], sot.tags) self.operator_cloud.network.set_tags(sot, []) sot = self.operator_cloud.network.get_qos_policy(self.QOS_POLICY_ID) self.assertEqual([], sot.tags) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_qos_rule_type.py0000664000175000017500000000335400000000000030052 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestQoSRuleType(base.BaseFunctionalTest): QOS_RULE_TYPE = "bandwidth_limit" def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud is required for this test") # Skip the tests if qos-rule-type-details extension is not enabled. if not self.operator_cloud.network.find_extension( "qos-rule-type-details" ): self.skipTest("Network qos-rule-type-details extension disabled") def test_find(self): sot = self.operator_cloud.network.find_qos_rule_type( self.QOS_RULE_TYPE ) self.assertEqual(self.QOS_RULE_TYPE, sot.type) self.assertIsInstance(sot.drivers, list) def test_get(self): sot = self.operator_cloud.network.get_qos_rule_type(self.QOS_RULE_TYPE) self.assertEqual(self.QOS_RULE_TYPE, sot.type) self.assertIsInstance(sot.drivers, list) def test_list(self): rule_types = list(self.operator_cloud.network.qos_rule_types()) self.assertGreater(len(rule_types), 0) for rule_type in rule_types: self.assertIsInstance(rule_type.type, str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_quota.py0000664000175000017500000000337400000000000026313 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestQuota(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.operator_cloud: self.skipTest("Operator cloud required for this test") def test_list(self): for qot in self.operator_cloud.network.quotas(): self.assertIsNotNone(qot.project_id) self.assertIsNotNone(qot.networks) def test_list_details(self): expected_keys = ["limit", "used", "reserved"] project_id = self.operator_cloud.session.get_project_id() quota_details = self.operator_cloud.network.get_quota( project_id, details=True ) for details in quota_details._body.attributes.values(): for expected_key in expected_keys: self.assertTrue(expected_key in details.keys()) def test_set(self): attrs = {"networks": 123456789} for project_quota in self.operator_cloud.network.quotas(): self.operator_cloud.network.update_quota(project_quota, **attrs) new_quota = self.operator_cloud.network.get_quota( project_quota.project_id ) self.assertEqual(123456789, new_quota.networks) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_rbac_policy.py0000664000175000017500000000645200000000000027450 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import network from openstack.network.v2 import rbac_policy from openstack.tests.functional import base class TestRBACPolicy(base.BaseFunctionalTest): ACTION = "access_as_shared" OBJ_TYPE = "network" TARGET_TENANT_ID = "*" NET_ID = None ID = None def setUp(self): super().setUp() if not self.user_cloud._has_neutron_extension("rbac-policies"): self.skipTest( "Neutron rbac-policies extension is required for this test" ) self.NET_NAME = self.getUniqueString("net") self.UPDATE_NAME = self.getUniqueString() net = self.user_cloud.network.create_network(name=self.NET_NAME) assert isinstance(net, network.Network) self.NET_ID = net.id if self.operator_cloud: sot = self.operator_cloud.network.create_rbac_policy( action=self.ACTION, object_type=self.OBJ_TYPE, target_tenant=self.TARGET_TENANT_ID, object_id=self.NET_ID, ) assert isinstance(sot, rbac_policy.RBACPolicy) self.ID = sot.id else: sot = self.user_cloud.network.create_rbac_policy( action=self.ACTION, object_type=self.OBJ_TYPE, target_tenant=self.user_cloud.current_project_id, object_id=self.NET_ID, ) assert isinstance(sot, rbac_policy.RBACPolicy) self.ID = sot.id def tearDown(self): if self.operator_cloud: sot = self.operator_cloud.network.delete_rbac_policy( self.ID, ignore_missing=False ) else: sot = self.user_cloud.network.delete_rbac_policy( self.ID, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_network( self.NET_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_find(self): if self.operator_cloud: sot = self.operator_cloud.network.find_rbac_policy(self.ID) else: sot = self.user_cloud.network.find_rbac_policy(self.ID) self.assertEqual(self.ID, sot.id) def test_get(self): if self.operator_cloud: sot = self.operator_cloud.network.get_rbac_policy(self.ID) else: sot = self.user_cloud.network.get_rbac_policy(self.ID) self.assertEqual(self.ID, sot.id) def test_list(self): if self.operator_cloud: ids = [o.id for o in self.operator_cloud.network.rbac_policies()] else: ids = [o.id for o in self.user_cloud.network.rbac_policies()] if self.ID: self.assertIn(self.ID, ids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_router.py0000664000175000017500000000474500000000000026505 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import router from openstack.tests.functional import base class TestRouter(base.BaseFunctionalTest): ID = None def setUp(self): super().setUp() self.NAME = self.getUniqueString() self.UPDATE_NAME = self.getUniqueString() sot = self.user_cloud.network.create_router(name=self.NAME) assert isinstance(sot, router.Router) self.assertEqual(self.NAME, sot.name) self.ID = sot.id def tearDown(self): sot = self.user_cloud.network.delete_router( self.ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_find(self): sot = self.user_cloud.network.find_router(self.NAME) self.assertEqual(self.ID, sot.id) def test_get(self): sot = self.user_cloud.network.get_router(self.ID) self.assertEqual(self.NAME, sot.name) self.assertEqual(self.ID, sot.id) if not self.user_cloud._has_neutron_extension("l3-ha"): self.assertFalse(sot.is_ha) def test_list(self): names = [o.name for o in self.user_cloud.network.routers()] self.assertIn(self.NAME, names) if not self.user_cloud._has_neutron_extension("l3-ha"): ha = [o.is_ha for o in self.user_cloud.network.routers()] self.assertIn(False, ha) def test_update(self): sot = self.user_cloud.network.update_router( self.ID, name=self.UPDATE_NAME ) self.assertEqual(self.UPDATE_NAME, sot.name) def test_set_tags(self): sot = self.user_cloud.network.get_router(self.ID) self.assertEqual([], sot.tags) self.user_cloud.network.set_tags(sot, ["blue"]) sot = self.user_cloud.network.get_router(self.ID) self.assertEqual(["blue"], sot.tags) self.user_cloud.network.set_tags(sot, []) sot = self.user_cloud.network.get_router(self.ID) self.assertEqual([], sot.tags) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_router_add_remove_interface.py0000664000175000017500000000532500000000000032705 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import network from openstack.network.v2 import router from openstack.network.v2 import subnet from openstack.tests.functional import base class TestRouterInterface(base.BaseFunctionalTest): CIDR = "10.100.0.0/16" IPV4 = 4 ROUTER_ID: str NET_ID: str SUB_ID: str ROT: router.Router def setUp(self): super().setUp() self.ROUTER_NAME = self.getUniqueString() self.NET_NAME = self.getUniqueString() self.SUB_NAME = self.getUniqueString() sot = self.user_cloud.network.create_router(name=self.ROUTER_NAME) assert isinstance(sot, router.Router) self.assertEqual(self.ROUTER_NAME, sot.name) net = self.user_cloud.network.create_network(name=self.NET_NAME) assert isinstance(net, network.Network) self.assertEqual(self.NET_NAME, net.name) sub = self.user_cloud.network.create_subnet( name=self.SUB_NAME, ip_version=self.IPV4, network_id=net.id, cidr=self.CIDR, ) assert isinstance(sub, subnet.Subnet) self.assertEqual(self.SUB_NAME, sub.name) self.ROUTER_ID = sot.id self.ROT = sot self.NET_ID = net.id self.SUB_ID = sub.id def tearDown(self): sot = self.user_cloud.network.delete_router( self.ROUTER_ID, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_subnet( self.SUB_ID, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_network( self.NET_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_router_add_remove_interface(self): iface = self.ROT.add_interface( self.user_cloud.network, subnet_id=self.SUB_ID ) self._verification(iface) iface = self.ROT.remove_interface( self.user_cloud.network, subnet_id=self.SUB_ID ) self._verification(iface) def _verification(self, interface): self.assertEqual(interface["subnet_id"], self.SUB_ID) self.assertIn("port_id", interface) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_security_group.py0000664000175000017500000000437500000000000030247 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import security_group from openstack.tests.functional import base class TestSecurityGroup(base.BaseFunctionalTest): ID = None def setUp(self): super().setUp() self.NAME = self.getUniqueString() sot = self.user_cloud.network.create_security_group(name=self.NAME) assert isinstance(sot, security_group.SecurityGroup) self.assertEqual(self.NAME, sot.name) self.ID = sot.id def tearDown(self): sot = self.user_cloud.network.delete_security_group( self.ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_find(self): sot = self.user_cloud.network.find_security_group(self.NAME) self.assertEqual(self.ID, sot.id) def test_get(self): sot = self.user_cloud.network.get_security_group(self.ID) self.assertEqual(self.NAME, sot.name) self.assertEqual(self.ID, sot.id) def test_list(self): names = [o.name for o in self.user_cloud.network.security_groups()] self.assertIn(self.NAME, names) def test_list_query_list_of_ids(self): ids = [ o.id for o in self.user_cloud.network.security_groups(id=[self.ID]) ] self.assertIn(self.ID, ids) def test_set_tags(self): sot = self.user_cloud.network.get_security_group(self.ID) self.assertEqual([], sot.tags) self.user_cloud.network.set_tags(sot, ["blue"]) sot = self.user_cloud.network.get_security_group(self.ID) self.assertEqual(["blue"], sot.tags) self.user_cloud.network.set_tags(sot, []) sot = self.user_cloud.network.get_security_group(self.ID) self.assertEqual([], sot.tags) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_security_group_rule.py0000664000175000017500000000514000000000000031265 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import security_group from openstack.network.v2 import security_group_rule from openstack.tests.functional import base class TestSecurityGroupRule(base.BaseFunctionalTest): IPV4 = "IPv4" PROTO = "tcp" PORT = 22 DIR = "ingress" ID = None RULE_ID = None def setUp(self): super().setUp() self.NAME = self.getUniqueString() sot = self.user_cloud.network.create_security_group(name=self.NAME) assert isinstance(sot, security_group.SecurityGroup) self.assertEqual(self.NAME, sot.name) self.ID = sot.id rul = self.user_cloud.network.create_security_group_rule( direction=self.DIR, ethertype=self.IPV4, port_range_max=self.PORT, port_range_min=self.PORT, protocol=self.PROTO, security_group_id=self.ID, ) assert isinstance(rul, security_group_rule.SecurityGroupRule) self.assertEqual(self.ID, rul.security_group_id) self.RULE_ID = rul.id def tearDown(self): sot = self.user_cloud.network.delete_security_group_rule( self.RULE_ID, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_security_group( self.ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_find(self): sot = self.user_cloud.network.find_security_group_rule(self.RULE_ID) self.assertEqual(self.RULE_ID, sot.id) def test_get(self): sot = self.user_cloud.network.get_security_group_rule(self.RULE_ID) self.assertEqual(self.RULE_ID, sot.id) self.assertEqual(self.DIR, sot.direction) self.assertEqual(self.PROTO, sot.protocol) self.assertEqual(self.PORT, sot.port_range_min) self.assertEqual(self.PORT, sot.port_range_max) self.assertEqual(self.ID, sot.security_group_id) def test_list(self): ids = [o.id for o in self.user_cloud.network.security_group_rules()] self.assertIn(self.RULE_ID, ids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_segment.py0000664000175000017500000000765600000000000026633 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import network from openstack.network.v2 import segment from openstack.tests.functional import base class TestSegment(base.BaseFunctionalTest): NETWORK_TYPE = None PHYSICAL_NETWORK = None SEGMENTATION_ID = None NETWORK_ID = None SEGMENT_ID = None SEGMENT_EXTENSION = None def setUp(self): super().setUp() self.NETWORK_NAME = self.getUniqueString() if not self.operator_cloud: self.skipTest("Operator cloud required for this test") # NOTE(rtheis): The segment extension is not yet enabled by default. # Skip the tests if not enabled. if not self.operator_cloud.network.find_extension("segment"): self.skipTest("Segment extension disabled") # Create a network to hold the segment. net = self.operator_cloud.network.create_network( name=self.NETWORK_NAME ) assert isinstance(net, network.Network) self.assertEqual(self.NETWORK_NAME, net.name) self.NETWORK_ID = net.id if self.SEGMENT_EXTENSION: # Get the segment for the network. for seg in self.operator_cloud.network.segments(): assert isinstance(seg, segment.Segment) if self.NETWORK_ID == seg.network_id: self.NETWORK_TYPE = seg.network_type self.PHYSICAL_NETWORK = seg.physical_network self.SEGMENTATION_ID = seg.segmentation_id self.SEGMENT_ID = seg.id break def tearDown(self): sot = self.operator_cloud.network.delete_network( self.NETWORK_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_create_delete(self): sot = self.operator_cloud.network.create_segment( description="test description", name="test name", network_id=self.NETWORK_ID, network_type="geneve", segmentation_id=2055, ) self.assertIsInstance(sot, segment.Segment) del_sot = self.operator_cloud.network.delete_segment(sot.id) self.assertEqual("test description", sot.description) self.assertEqual("test name", sot.name) self.assertEqual(self.NETWORK_ID, sot.network_id) self.assertEqual("geneve", sot.network_type) self.assertIsNone(sot.physical_network) self.assertEqual(2055, sot.segmentation_id) self.assertIsNone(del_sot) def test_find(self): sot = self.operator_cloud.network.find_segment(self.SEGMENT_ID) self.assertEqual(self.SEGMENT_ID, sot.id) def test_get(self): sot = self.operator_cloud.network.get_segment(self.SEGMENT_ID) self.assertEqual(self.SEGMENT_ID, sot.id) self.assertIsNone(sot.name) self.assertEqual(self.NETWORK_ID, sot.network_id) self.assertEqual(self.NETWORK_TYPE, sot.network_type) self.assertEqual(self.PHYSICAL_NETWORK, sot.physical_network) self.assertEqual(self.SEGMENTATION_ID, sot.segmentation_id) def test_list(self): ids = [o.id for o in self.operator_cloud.network.segments(name=None)] self.assertIn(self.SEGMENT_ID, ids) def test_update(self): sot = self.operator_cloud.network.update_segment( self.SEGMENT_ID, description="update" ) self.assertEqual("update", sot.description) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_service_profile.py0000664000175000017500000000663400000000000030344 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import service_profile as _service_profile from openstack.tests.functional import base class TestServiceProfile(base.BaseFunctionalTest): SERVICE_PROFILE_DESCRIPTION = "DESCRIPTION" UPDATE_DESCRIPTION = "UPDATED-DESCRIPTION" METAINFO = "FlAVOR_PROFILE_METAINFO" ID = None def setUp(self): super().setUp() if not self.user_cloud._has_neutron_extension("flavors"): self.skipTest("Neutron flavor extension is required for this test") if self.operator_cloud: service_profiles = ( self.operator_cloud.network.create_service_profile( description=self.SERVICE_PROFILE_DESCRIPTION, metainfo=self.METAINFO, ) ) assert isinstance( service_profiles, _service_profile.ServiceProfile ) self.assertEqual( self.SERVICE_PROFILE_DESCRIPTION, service_profiles.description ) self.assertEqual(self.METAINFO, service_profiles.meta_info) self.ID = service_profiles.id def tearDown(self): if self.ID: service_profiles = ( self.operator_cloud.network.delete_service_profile( self.ID, ignore_missing=True ) ) self.assertIsNone(service_profiles) super().tearDown() def test_find(self): self.user_cloud.network.find_service_profile( name_or_id="not_existing", ignore_missing=True ) if self.operator_cloud and self.ID: service_profiles = ( self.operator_cloud.network.find_service_profile(self.ID) ) self.assertEqual(self.METAINFO, service_profiles.meta_info) def test_get(self): if not self.ID: self.skipTest("ServiceProfile was not created") service_profiles = self.operator_cloud.network.get_service_profile( self.ID ) self.assertEqual(self.METAINFO, service_profiles.meta_info) self.assertEqual( self.SERVICE_PROFILE_DESCRIPTION, service_profiles.description ) def test_update(self): if not self.ID: self.skipTest("ServiceProfile was not created") service_profiles = self.operator_cloud.network.update_service_profile( self.ID, description=self.UPDATE_DESCRIPTION ) self.assertEqual(self.UPDATE_DESCRIPTION, service_profiles.description) def test_list(self): # Test in user scope self.user_cloud.network.service_profiles() # Test as operator if self.operator_cloud: metainfos = [ f.meta_info for f in self.operator_cloud.network.service_profiles() ] if self.ID: self.assertIn(self.METAINFO, metainfos) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_service_provider.py0000664000175000017500000000175000000000000030530 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestServiceProvider(base.BaseFunctionalTest): def test_list(self): providers = list(self.user_cloud.network.service_providers()) names = [o.name for o in providers] service_types = [o.service_type for o in providers] if self.user_cloud._has_neutron_extension("l3-ha"): self.assertIn("ha", names) self.assertIn("L3_ROUTER_NAT", service_types) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_sfc.py0000664000175000017500000001241200000000000025726 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import network as _network from openstack.network.v2 import port as _port from openstack.network.v2 import sfc_flow_classifier as _flow_classifier from openstack.network.v2 import subnet as _subnet from openstack.tests.functional import base class TestSFCFlowClassifier(base.BaseFunctionalTest): FC_ID = None def setUp(self): super().setUp() if not self.user_cloud.network.find_extension("sfc"): self.skipTest("Neutron SFC Extension disabled") self.FLOW_CLASSIFIER_NAME = 'my_classifier' + self.getUniqueString() self.UPDATE_NAME = 'updated' + self.getUniqueString() self.NET_NAME = 'network1' + self.getUniqueString() self.SUBNET_NAME = 'subnet1' + self.getUniqueString() self.PORT1_NAME = 'port1' + self.getUniqueString() self.PORT2_NAME = 'port2' + self.getUniqueString() self.ETHERTYPE = 'IPv4' self.PROTOCOL = 'tcp' self.S_PORT_RANGE_MIN = 80 self.S_PORT_RANGE_MAX = 80 self.D_PORT_RANGE_MIN = 180 self.D_PORT_RANGE_MAX = 180 self.CIDR = "10.101.0.0/24" self.SOURCE_IP = '10.101.1.12/32' self.DESTINATION_IP = '10.102.2.12/32' self.PORT_CHAIN_NAME = 'my_chain' + self.getUniqueString() self.PORT_PAIR_NAME = 'my_port_pair' + self.getUniqueString() self.PORT_PAIR_GROUP_NAME = ( 'my_port_pair_group' + self.getUniqueString() ) self.SERVICE_GRAPH_NAME = 'my_service_graph' + self.getUniqueString() self.op_net_client = self.operator_cloud.network net = self.op_net_client.create_network(name=self.NET_NAME) self.assertIsInstance(net, _network.Network) self.NETWORK = net subnet = self.operator_cloud.network.create_subnet( name=self.SUBNET_NAME, ip_version=4, network_id=self.NETWORK.id, cidr=self.CIDR, ) self.assertIsInstance(subnet, _subnet.Subnet) self.SUBNET = subnet self.PORT1 = self._create_port( network=self.NETWORK, port_name=self.PORT1_NAME ) self.PORT2 = self._create_port( network=self.NETWORK, port_name=self.PORT2_NAME ) flow_cls = self.op_net_client.create_sfc_flow_classifier( name=self.FLOW_CLASSIFIER_NAME, ethertype=self.ETHERTYPE, protocol=self.PROTOCOL, source_port_range_min=self.S_PORT_RANGE_MIN, source_port_range_max=self.S_PORT_RANGE_MAX, destination_port_range_min=self.D_PORT_RANGE_MIN, destination_port_range_max=self.D_PORT_RANGE_MAX, source_ip_prefix=self.SOURCE_IP, destination_ip_prefix=self.DESTINATION_IP, logical_source_port=self.PORT1.id, logical_destination_port=self.PORT2.id, ) self.assertIsInstance(flow_cls, _flow_classifier.SfcFlowClassifier) self.FLOW_CLASSIFIER = flow_cls self.FC_ID = flow_cls.id def _create_port(self, network, port_name): port = self.op_net_client.create_port( name=port_name, network_id=network.id, ) self.assertIsInstance(port, _port.Port) return port def tearDown(self): sot = self.operator_cloud.network.delete_sfc_flow_classifier( self.FLOW_CLASSIFIER.id, ignore_missing=True ) self.assertIsNone(sot) sot = self.operator_cloud.network.delete_port(self.PORT1.id) self.assertIsNone(sot) sot = self.operator_cloud.network.delete_port(self.PORT2.id) self.assertIsNone(sot) sot = self.operator_cloud.network.delete_subnet(self.SUBNET.id) self.assertIsNone(sot) sot = self.operator_cloud.network.delete_network(self.NETWORK.id) self.assertIsNone(sot) super().tearDown() def test_sfc_flow_classifier(self): sot = self.operator_cloud.network.find_sfc_flow_classifier( self.FLOW_CLASSIFIER.name ) self.assertEqual(self.ETHERTYPE, sot.ethertype) self.assertEqual(self.SOURCE_IP, sot.source_ip_prefix) self.assertEqual(self.PROTOCOL, sot.protocol) classifiers = [ fc.name for fc in self.operator_cloud.network.sfc_flow_classifiers() ] self.assertIn(self.FLOW_CLASSIFIER_NAME, classifiers) classifier = self.operator_cloud.network.get_sfc_flow_classifier( self.FC_ID ) self.assertEqual(self.FLOW_CLASSIFIER_NAME, classifier.name) self.assertEqual(self.FC_ID, classifier.id) classifier = self.operator_cloud.network.update_sfc_flow_classifier( self.FC_ID, name=self.UPDATE_NAME ) self.assertEqual(self.UPDATE_NAME, classifier.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_subnet.py0000664000175000017500000000675000000000000026463 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import network from openstack.network.v2 import subnet from openstack.tests.functional import base class TestSubnet(base.BaseFunctionalTest): IPV4 = 4 CIDR = "10.100.0.0/24" DNS_SERVERS = ["8.8.4.4", "8.8.8.8"] POOL = [{"start": "10.100.0.2", "end": "10.100.0.253"}] ROUTES = [{"destination": "10.101.0.0/24", "nexthop": "10.100.0.254"}] NET_ID = None SUB_ID = None def setUp(self): super().setUp() self.NET_NAME = self.getUniqueString() self.SUB_NAME = self.getUniqueString() self.UPDATE_NAME = self.getUniqueString() net = self.user_cloud.network.create_network(name=self.NET_NAME) assert isinstance(net, network.Network) self.assertEqual(self.NET_NAME, net.name) self.NET_ID = net.id sub = self.user_cloud.network.create_subnet( name=self.SUB_NAME, ip_version=self.IPV4, network_id=self.NET_ID, cidr=self.CIDR, dns_nameservers=self.DNS_SERVERS, allocation_pools=self.POOL, host_routes=self.ROUTES, ) assert isinstance(sub, subnet.Subnet) self.assertEqual(self.SUB_NAME, sub.name) self.SUB_ID = sub.id def tearDown(self): sot = self.user_cloud.network.delete_subnet(self.SUB_ID) self.assertIsNone(sot) sot = self.user_cloud.network.delete_network( self.NET_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_find(self): sot = self.user_cloud.network.find_subnet(self.SUB_NAME) self.assertEqual(self.SUB_ID, sot.id) def test_get(self): sot = self.user_cloud.network.get_subnet(self.SUB_ID) self.assertEqual(self.SUB_NAME, sot.name) self.assertEqual(self.SUB_ID, sot.id) self.assertEqual(self.DNS_SERVERS, sot.dns_nameservers) self.assertEqual(self.CIDR, sot.cidr) self.assertEqual(self.POOL, sot.allocation_pools) self.assertEqual(self.IPV4, sot.ip_version) self.assertEqual(self.ROUTES, sot.host_routes) self.assertEqual("10.100.0.1", sot.gateway_ip) self.assertTrue(sot.is_dhcp_enabled) def test_list(self): names = [o.name for o in self.user_cloud.network.subnets()] self.assertIn(self.SUB_NAME, names) def test_update(self): sot = self.user_cloud.network.update_subnet( self.SUB_ID, name=self.UPDATE_NAME ) self.assertEqual(self.UPDATE_NAME, sot.name) def test_set_tags(self): sot = self.user_cloud.network.get_subnet(self.SUB_ID) self.assertEqual([], sot.tags) self.user_cloud.network.set_tags(sot, ["blue"]) sot = self.user_cloud.network.get_subnet(self.SUB_ID) self.assertEqual(["blue"], sot.tags) self.user_cloud.network.set_tags(sot, []) sot = self.user_cloud.network.get_subnet(self.SUB_ID) self.assertEqual([], sot.tags) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_subnet_from_subnet_pool.py0000664000175000017500000000601400000000000032110 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import network from openstack.network.v2 import subnet from openstack.network.v2 import subnet_pool from openstack.tests.functional import base class TestSubnetFromSubnetPool(base.BaseFunctionalTest): IPV4 = 4 CIDR = "10.100.0.0/28" MINIMUM_PREFIX_LENGTH = 8 DEFAULT_PREFIX_LENGTH = 24 MAXIMUM_PREFIX_LENGTH = 32 SUBNET_PREFIX_LENGTH = 28 IP_VERSION = 4 PREFIXES = ["10.100.0.0/24"] NET_ID = None SUB_ID = None SUB_POOL_ID = None def setUp(self): super().setUp() self.NET_NAME = self.getUniqueString() self.SUB_NAME = self.getUniqueString() self.SUB_POOL_NAME = self.getUniqueString() sub_pool = self.user_cloud.network.create_subnet_pool( name=self.SUB_POOL_NAME, min_prefixlen=self.MINIMUM_PREFIX_LENGTH, default_prefixlen=self.DEFAULT_PREFIX_LENGTH, max_prefixlen=self.MAXIMUM_PREFIX_LENGTH, prefixes=self.PREFIXES, ) self.assertIsInstance(sub_pool, subnet_pool.SubnetPool) self.assertEqual(self.SUB_POOL_NAME, sub_pool.name) self.SUB_POOL_ID = sub_pool.id net = self.user_cloud.network.create_network(name=self.NET_NAME) self.assertIsInstance(net, network.Network) self.assertEqual(self.NET_NAME, net.name) self.NET_ID = net.id sub = self.user_cloud.network.create_subnet( name=self.SUB_NAME, ip_version=self.IPV4, network_id=self.NET_ID, prefixlen=self.SUBNET_PREFIX_LENGTH, subnetpool_id=self.SUB_POOL_ID, ) self.assertIsInstance(sub, subnet.Subnet) self.assertEqual(self.SUB_NAME, sub.name) self.SUB_ID = sub.id def tearDown(self): sot = self.user_cloud.network.delete_subnet(self.SUB_ID) self.assertIsNone(sot) sot = self.user_cloud.network.delete_network( self.NET_ID, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_subnet_pool(self.SUB_POOL_ID) self.assertIsNone(sot) super().tearDown() def test_get(self): sot = self.user_cloud.network.get_subnet(self.SUB_ID) self.assertEqual(self.SUB_NAME, sot.name) self.assertEqual(self.SUB_ID, sot.id) self.assertEqual(self.CIDR, sot.cidr) self.assertEqual(self.IPV4, sot.ip_version) self.assertEqual("10.100.0.1", sot.gateway_ip) self.assertTrue(sot.is_dhcp_enabled) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_subnet_pool.py0000664000175000017500000000663700000000000027520 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import subnet_pool as _subnet_pool from openstack.tests.functional import base class TestSubnetPool(base.BaseFunctionalTest): SUBNET_POOL_ID = None MINIMUM_PREFIX_LENGTH = 8 DEFAULT_PREFIX_LENGTH = 24 MAXIMUM_PREFIX_LENGTH = 32 DEFAULT_QUOTA = 24 IS_SHARED = False IP_VERSION = 4 PREFIXES = ["10.100.0.0/24", "10.101.0.0/24"] def setUp(self): super().setUp() self.SUBNET_POOL_NAME = self.getUniqueString() self.SUBNET_POOL_NAME_UPDATED = self.getUniqueString() subnet_pool = self.user_cloud.network.create_subnet_pool( name=self.SUBNET_POOL_NAME, min_prefixlen=self.MINIMUM_PREFIX_LENGTH, default_prefixlen=self.DEFAULT_PREFIX_LENGTH, max_prefixlen=self.MAXIMUM_PREFIX_LENGTH, default_quota=self.DEFAULT_QUOTA, shared=self.IS_SHARED, prefixes=self.PREFIXES, ) assert isinstance(subnet_pool, _subnet_pool.SubnetPool) self.assertEqual(self.SUBNET_POOL_NAME, subnet_pool.name) self.SUBNET_POOL_ID = subnet_pool.id def tearDown(self): sot = self.user_cloud.network.delete_subnet_pool(self.SUBNET_POOL_ID) self.assertIsNone(sot) super().tearDown() def test_find(self): sot = self.user_cloud.network.find_subnet_pool(self.SUBNET_POOL_NAME) self.assertEqual(self.SUBNET_POOL_ID, sot.id) def test_get(self): sot = self.user_cloud.network.get_subnet_pool(self.SUBNET_POOL_ID) self.assertEqual(self.SUBNET_POOL_NAME, sot.name) self.assertEqual(self.MINIMUM_PREFIX_LENGTH, sot.minimum_prefix_length) self.assertEqual(self.DEFAULT_PREFIX_LENGTH, sot.default_prefix_length) self.assertEqual(self.MAXIMUM_PREFIX_LENGTH, sot.maximum_prefix_length) self.assertEqual(self.DEFAULT_QUOTA, sot.default_quota) self.assertEqual(self.IS_SHARED, sot.is_shared) self.assertEqual(self.IP_VERSION, sot.ip_version) self.assertEqual(self.PREFIXES, sot.prefixes) def test_list(self): names = [o.name for o in self.user_cloud.network.subnet_pools()] self.assertIn(self.SUBNET_POOL_NAME, names) def test_update(self): sot = self.user_cloud.network.update_subnet_pool( self.SUBNET_POOL_ID, name=self.SUBNET_POOL_NAME_UPDATED ) self.assertEqual(self.SUBNET_POOL_NAME_UPDATED, sot.name) def test_set_tags(self): sot = self.user_cloud.network.get_subnet_pool(self.SUBNET_POOL_ID) self.assertEqual([], sot.tags) self.user_cloud.network.set_tags(sot, ["blue"]) sot = self.user_cloud.network.get_subnet_pool(self.SUBNET_POOL_ID) self.assertEqual(["blue"], sot.tags) self.user_cloud.network.set_tags(sot, []) sot = self.user_cloud.network.get_subnet_pool(self.SUBNET_POOL_ID) self.assertEqual([], sot.tags) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_taas.py0000664000175000017500000001176000000000000026110 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import network as _network from openstack.network.v2 import port as _port from openstack.network.v2 import tap_flow as _tap_flow from openstack.network.v2 import tap_service as _tap_service from openstack.tests.functional import base class TestTapService(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.user_cloud.network.find_extension("taas"): self.skipTest("Neutron Tap-as-a-service Extension disabled") self.TAP_S_NAME = 'my_service' + self.getUniqueString() self.TAP_F_NAME = 'my_flow' + self.getUniqueString() net = self.user_cloud.network.create_network() assert isinstance(net, _network.Network) self.SERVICE_NET_ID = net.id net = self.user_cloud.network.create_network() assert isinstance(net, _network.Network) self.FLOW_NET_ID = net.id port = self.user_cloud.network.create_port( network_id=self.SERVICE_NET_ID ) assert isinstance(port, _port.Port) self.SERVICE_PORT_ID = port.id port = self.user_cloud.network.create_port(network_id=self.FLOW_NET_ID) assert isinstance(port, _port.Port) self.FLOW_PORT_ID = port.id tap_service = self.user_cloud.network.create_tap_service( name=self.TAP_S_NAME, port_id=self.SERVICE_PORT_ID ) assert isinstance(tap_service, _tap_service.TapService) self.TAP_SERVICE = tap_service tap_flow = self.user_cloud.network.create_tap_flow( name=self.TAP_F_NAME, tap_service_id=self.TAP_SERVICE.id, source_port=self.FLOW_PORT_ID, direction='BOTH', ) assert isinstance(tap_flow, _tap_flow.TapFlow) self.TAP_FLOW = tap_flow def tearDown(self): sot = self.user_cloud.network.delete_tap_flow( self.TAP_FLOW.id, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_tap_service( self.TAP_SERVICE.id, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_port(self.SERVICE_PORT_ID) self.assertIsNone(sot) sot = self.user_cloud.network.delete_port(self.FLOW_PORT_ID) self.assertIsNone(sot) sot = self.user_cloud.network.delete_network(self.SERVICE_NET_ID) self.assertIsNone(sot) sot = self.user_cloud.network.delete_network(self.FLOW_NET_ID) self.assertIsNone(sot) super().tearDown() def test_find_tap_service(self): sot = self.user_cloud.network.find_tap_service(self.TAP_SERVICE.name) self.assertEqual(self.SERVICE_PORT_ID, sot.port_id) self.assertEqual(self.TAP_S_NAME, sot.name) def test_get_tap_service(self): sot = self.user_cloud.network.get_tap_service(self.TAP_SERVICE.id) self.assertEqual(self.SERVICE_PORT_ID, sot.port_id) self.assertEqual(self.TAP_S_NAME, sot.name) def test_list_tap_services(self): tap_service_ids = [ ts.id for ts in self.user_cloud.network.tap_services() ] self.assertIn(self.TAP_SERVICE.id, tap_service_ids) def test_update_tap_service(self): description = 'My tap service' sot = self.user_cloud.network.update_tap_service( self.TAP_SERVICE.id, description=description ) self.assertEqual(description, sot.description) def test_find_tap_flow(self): sot = self.user_cloud.network.find_tap_flow(self.TAP_FLOW.name) self.assertEqual(self.FLOW_PORT_ID, sot.source_port) self.assertEqual(self.TAP_SERVICE.id, sot.tap_service_id) self.assertEqual('BOTH', sot.direction) self.assertEqual(self.TAP_F_NAME, sot.name) def test_get_tap_flow(self): sot = self.user_cloud.network.get_tap_flow(self.TAP_FLOW.id) self.assertEqual(self.FLOW_PORT_ID, sot.source_port) self.assertEqual(self.TAP_F_NAME, sot.name) self.assertEqual(self.TAP_SERVICE.id, sot.tap_service_id) self.assertEqual('BOTH', sot.direction) def test_list_tap_flows(self): tap_flow_ids = [tf.id for tf in self.user_cloud.network.tap_flows()] self.assertIn(self.TAP_FLOW.id, tap_flow_ids) def test_update_tap_flow(self): description = 'My tap flow' sot = self.user_cloud.network.update_tap_flow( self.TAP_FLOW.id, description=description ) self.assertEqual(description, sot.description) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_tap_mirror.py0000664000175000017500000000606300000000000027336 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import network as _network from openstack.network.v2 import port as _port from openstack.network.v2 import tap_mirror as _tap_mirror from openstack.tests.functional import base class TestTapMirror(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.user_cloud.network.find_extension("tap-mirror"): self.skipTest("Neutron Tap Mirror Extension disabled") self.TAP_M_NAME = 'my_tap_mirror' + self.getUniqueString() net = self.user_cloud.network.create_network() assert isinstance(net, _network.Network) self.MIRROR_NET_ID = net.id port = self.user_cloud.network.create_port( network_id=self.MIRROR_NET_ID ) assert isinstance(port, _port.Port) self.MIRROR_PORT_ID = port.id self.REMOTE_IP = '193.10.10.2' self.MIRROR_TYPE = 'erspanv1' tap_mirror = self.user_cloud.network.create_tap_mirror( name=self.TAP_M_NAME, port_id=self.MIRROR_PORT_ID, remote_ip=self.REMOTE_IP, mirror_type=self.MIRROR_TYPE, directions={'IN': 99}, ) assert isinstance(tap_mirror, _tap_mirror.TapMirror) self.TAP_MIRROR = tap_mirror def tearDown(self): sot = self.user_cloud.network.delete_tap_mirror( self.TAP_MIRROR.id, ignore_missing=False ) self.assertIsNone(sot) sot = self.user_cloud.network.delete_port(self.MIRROR_PORT_ID) self.assertIsNone(sot) sot = self.user_cloud.network.delete_network(self.MIRROR_NET_ID) self.assertIsNone(sot) super().tearDown() def test_find_tap_mirror(self): sot = self.user_cloud.network.find_tap_mirror(self.TAP_MIRROR.name) self.assertEqual(self.MIRROR_PORT_ID, sot.port_id) self.assertEqual(self.TAP_M_NAME, sot.name) def test_get_tap_mirror(self): sot = self.user_cloud.network.get_tap_mirror(self.TAP_MIRROR.id) self.assertEqual(self.MIRROR_PORT_ID, sot.port_id) self.assertEqual(self.TAP_M_NAME, sot.name) def test_list_tap_mirrors(self): tap_mirror_ids = [ tm.id for tm in self.user_cloud.network.tap_mirrors() ] self.assertIn(self.TAP_MIRROR.id, tap_mirror_ids) def test_update_tap_mirror(self): description = 'My Tap Mirror' sot = self.user_cloud.network.update_tap_mirror( self.TAP_MIRROR.id, description=description ) self.assertEqual(description, sot.description) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_trunk.py0000664000175000017500000000712400000000000026322 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import network from openstack.network.v2 import port from openstack.network.v2 import trunk as _trunk from openstack.tests.functional import base class TestTrunk(base.BaseFunctionalTest): TIMEOUT_SCALING_FACTOR = 2.0 def setUp(self): super().setUp() # Skip the tests if trunk extension is not enabled. if not self.user_cloud.network.find_extension("trunk"): self.skipTest("Network trunk extension disabled") self.TRUNK_NAME = self.getUniqueString() self.TRUNK_NAME_UPDATED = self.getUniqueString() net = self.user_cloud.network.create_network() assert isinstance(net, network.Network) self.NET_ID = net.id prt = self.user_cloud.network.create_port(network_id=self.NET_ID) assert isinstance(prt, port.Port) self.PORT_ID = prt.id self.ports_to_clean = [self.PORT_ID] trunk = self.user_cloud.network.create_trunk( name=self.TRUNK_NAME, port_id=self.PORT_ID ) assert isinstance(trunk, _trunk.Trunk) self.TRUNK_ID = trunk.id def tearDown(self): self.user_cloud.network.delete_trunk( self.TRUNK_ID, ignore_missing=False ) for port_id in self.ports_to_clean: self.user_cloud.network.delete_port(port_id, ignore_missing=False) self.user_cloud.network.delete_network( self.NET_ID, ignore_missing=False ) super().tearDown() def test_find(self): sot = self.user_cloud.network.find_trunk(self.TRUNK_NAME) self.assertEqual(self.TRUNK_ID, sot.id) def test_get(self): sot = self.user_cloud.network.get_trunk(self.TRUNK_ID) self.assertEqual(self.TRUNK_ID, sot.id) self.assertEqual(self.TRUNK_NAME, sot.name) def test_list(self): ids = [o.id for o in self.user_cloud.network.trunks()] self.assertIn(self.TRUNK_ID, ids) def test_update(self): sot = self.user_cloud.network.update_trunk( self.TRUNK_ID, name=self.TRUNK_NAME_UPDATED ) self.assertEqual(self.TRUNK_NAME_UPDATED, sot.name) def test_subports(self): port_for_subport = self.user_cloud.network.create_port( network_id=self.NET_ID ) self.ports_to_clean.append(port_for_subport.id) subports = [ { "port_id": port_for_subport.id, "segmentation_type": "vlan", "segmentation_id": 111, } ] sot = self.user_cloud.network.get_trunk_subports(self.TRUNK_ID) self.assertEqual({"sub_ports": []}, sot) self.user_cloud.network.add_trunk_subports(self.TRUNK_ID, subports) sot = self.user_cloud.network.get_trunk_subports(self.TRUNK_ID) self.assertEqual({"sub_ports": subports}, sot) self.user_cloud.network.delete_trunk_subports( self.TRUNK_ID, [{"port_id": port_for_subport.id}] ) sot = self.user_cloud.network.get_trunk_subports(self.TRUNK_ID) self.assertEqual({"sub_ports": []}, sot) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/network/v2/test_vpnaas.py0000664000175000017500000000424700000000000026452 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import vpn_ike_policy from openstack.tests.functional import base class TestVpnIkePolicy(base.BaseFunctionalTest): ID = None def setUp(self): super().setUp() if not self.user_cloud._has_neutron_extension("vpnaas"): self.skipTest("vpnaas service not supported by cloud") self.IKEPOLICY_NAME = self.getUniqueString("ikepolicy") self.UPDATE_NAME = self.getUniqueString("ikepolicy-updated") policy = self.user_cloud.network.create_vpn_ike_policy( name=self.IKEPOLICY_NAME ) assert isinstance(policy, vpn_ike_policy.VpnIkePolicy) self.assertEqual(self.IKEPOLICY_NAME, policy.name) self.ID = policy.id def tearDown(self): ikepolicy = self.user_cloud.network.delete_vpn_ike_policy( self.ID, ignore_missing=True ) self.assertIsNone(ikepolicy) super().tearDown() def test_list(self): policies = [f.name for f in self.user_cloud.network.vpn_ike_policies()] self.assertIn(self.IKEPOLICY_NAME, policies) def test_find(self): policy = self.user_cloud.network.find_vpn_ike_policy( self.IKEPOLICY_NAME ) self.assertEqual(self.ID, policy.id) def test_get(self): policy = self.user_cloud.network.get_vpn_ike_policy(self.ID) self.assertEqual(self.IKEPOLICY_NAME, policy.name) self.assertEqual(self.ID, policy.id) def test_update(self): policy = self.user_cloud.network.update_vpn_ike_policy( self.ID, name=self.UPDATE_NAME ) self.assertEqual(self.UPDATE_NAME, policy.name) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.377377 openstacksdk-4.0.0/openstack/tests/functional/object_store/0000775000175000017500000000000000000000000024204 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/object_store/__init__.py0000664000175000017500000000000000000000000026303 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.377377 openstacksdk-4.0.0/openstack/tests/functional/object_store/v1/0000775000175000017500000000000000000000000024532 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/object_store/v1/__init__.py0000664000175000017500000000000000000000000026631 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/object_store/v1/test_account.py0000664000175000017500000000652100000000000027603 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestAccount(base.BaseFunctionalTest): def setUp(self): super().setUp() self.require_service('object-store') def tearDown(self): account = self.conn.object_store.get_account_metadata() self.conn.object_store.delete_account_metadata(account.metadata.keys()) super().tearDown() def test_system_metadata(self): account = self.conn.object_store.get_account_metadata() self.assertGreaterEqual(account.account_bytes_used, 0) self.assertGreaterEqual(account.account_container_count, 0) self.assertGreaterEqual(account.account_object_count, 0) def test_custom_metadata(self): # get custom metadata account = self.conn.object_store.get_account_metadata() self.assertFalse(account.metadata) # set no custom metadata self.conn.object_store.set_account_metadata() account = self.conn.object_store.get_account_metadata() self.assertFalse(account.metadata) # set empty custom metadata self.conn.object_store.set_account_metadata(k0='') account = self.conn.object_store.get_account_metadata() self.assertFalse(account.metadata) # set custom metadata self.conn.object_store.set_account_metadata(k1='v1') account = self.conn.object_store.get_account_metadata() self.assertTrue(account.metadata) self.assertEqual(1, len(account.metadata)) self.assertIn('k1', account.metadata) self.assertEqual('v1', account.metadata['k1']) # set more custom metadata self.conn.object_store.set_account_metadata(k2='v2') account = self.conn.object_store.get_account_metadata() self.assertTrue(account.metadata) self.assertEqual(2, len(account.metadata)) self.assertIn('k1', account.metadata) self.assertEqual('v1', account.metadata['k1']) self.assertIn('k2', account.metadata) self.assertEqual('v2', account.metadata['k2']) # update custom metadata self.conn.object_store.set_account_metadata(k1='v1.1') account = self.conn.object_store.get_account_metadata() self.assertTrue(account.metadata) self.assertEqual(2, len(account.metadata)) self.assertIn('k1', account.metadata) self.assertEqual('v1.1', account.metadata['k1']) self.assertIn('k2', account.metadata) self.assertEqual('v2', account.metadata['k2']) # unset custom metadata self.conn.object_store.delete_account_metadata(['k1']) account = self.conn.object_store.get_account_metadata() self.assertTrue(account.metadata) self.assertEqual(1, len(account.metadata)) self.assertIn('k2', account.metadata) self.assertEqual('v2', account.metadata['k2']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/object_store/v1/test_container.py0000664000175000017500000001340600000000000030131 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.object_store.v1 import container as _container from openstack.tests.functional import base class TestContainer(base.BaseFunctionalTest): def setUp(self): super().setUp() self.require_service('object-store') self.NAME = self.getUniqueString() container = self.conn.object_store.create_container(name=self.NAME) self.addEmptyCleanup( self.conn.object_store.delete_container, self.NAME, ignore_missing=False, ) assert isinstance(container, _container.Container) self.assertEqual(self.NAME, container.name) def test_list(self): names = [o.name for o in self.conn.object_store.containers()] self.assertIn(self.NAME, names) def test_system_metadata(self): # get system metadata container = self.conn.object_store.get_container_metadata(self.NAME) self.assertEqual(0, container.object_count) self.assertEqual(0, container.bytes_used) # set system metadata container = self.conn.object_store.get_container_metadata(self.NAME) self.assertIsNone(container.read_ACL) self.assertIsNone(container.write_ACL) self.conn.object_store.set_container_metadata( container, read_ACL='.r:*', write_ACL='demo:demo' ) container = self.conn.object_store.get_container_metadata(self.NAME) self.assertEqual('.r:*', container.read_ACL) self.assertEqual('demo:demo', container.write_ACL) # update system metadata self.conn.object_store.set_container_metadata( container, read_ACL='.r:demo' ) container = self.conn.object_store.get_container_metadata(self.NAME) self.assertEqual('.r:demo', container.read_ACL) self.assertEqual('demo:demo', container.write_ACL) # set system metadata and custom metadata self.conn.object_store.set_container_metadata( container, k0='v0', sync_key='1234' ) container = self.conn.object_store.get_container_metadata(self.NAME) self.assertTrue(container.metadata) self.assertIn('k0', container.metadata) self.assertEqual('v0', container.metadata['k0']) self.assertEqual('.r:demo', container.read_ACL) self.assertEqual('demo:demo', container.write_ACL) self.assertEqual('1234', container.sync_key) # unset system metadata self.conn.object_store.delete_container_metadata( container, ['sync_key'] ) container = self.conn.object_store.get_container_metadata(self.NAME) self.assertTrue(container.metadata) self.assertIn('k0', container.metadata) self.assertEqual('v0', container.metadata['k0']) self.assertEqual('.r:demo', container.read_ACL) self.assertEqual('demo:demo', container.write_ACL) self.assertIsNone(container.sync_key) def test_custom_metadata(self): # get custom metadata container = self.conn.object_store.get_container_metadata(self.NAME) self.assertFalse(container.metadata) # set no custom metadata self.conn.object_store.set_container_metadata(container) container = self.conn.object_store.get_container_metadata(container) self.assertFalse(container.metadata) # set empty custom metadata self.conn.object_store.set_container_metadata(container, k0='') container = self.conn.object_store.get_container_metadata(container) self.assertFalse(container.metadata) # set custom metadata self.conn.object_store.set_container_metadata(container, k1='v1') container = self.conn.object_store.get_container_metadata(container) self.assertTrue(container.metadata) self.assertEqual(1, len(container.metadata)) self.assertIn('k1', container.metadata) self.assertEqual('v1', container.metadata['k1']) # set more custom metadata by named container self.conn.object_store.set_container_metadata(self.NAME, k2='v2') container = self.conn.object_store.get_container_metadata(container) self.assertTrue(container.metadata) self.assertEqual(2, len(container.metadata)) self.assertIn('k1', container.metadata) self.assertEqual('v1', container.metadata['k1']) self.assertIn('k2', container.metadata) self.assertEqual('v2', container.metadata['k2']) # update metadata self.conn.object_store.set_container_metadata(container, k1='v1.1') container = self.conn.object_store.get_container_metadata(self.NAME) self.assertTrue(container.metadata) self.assertEqual(2, len(container.metadata)) self.assertIn('k1', container.metadata) self.assertEqual('v1.1', container.metadata['k1']) self.assertIn('k2', container.metadata) self.assertEqual('v2', container.metadata['k2']) # delete metadata self.conn.object_store.delete_container_metadata(container, ['k1']) container = self.conn.object_store.get_container_metadata(self.NAME) self.assertTrue(container.metadata) self.assertEqual(1, len(container.metadata)) self.assertIn('k2', container.metadata) self.assertEqual('v2', container.metadata['k2']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/object_store/v1/test_obj.py0000664000175000017500000001362400000000000026723 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestObject(base.BaseFunctionalTest): DATA = b'abc' def setUp(self): super().setUp() self.require_service('object-store') self.FOLDER = self.getUniqueString() self.FILE = self.getUniqueString() self.conn.object_store.create_container(name=self.FOLDER) self.addCleanup(self.conn.object_store.delete_container, self.FOLDER) self.sot = self.conn.object_store.upload_object( container=self.FOLDER, name=self.FILE, data=self.DATA ) self.addEmptyCleanup( self.conn.object_store.delete_object, self.sot, ignore_missing=False, ) def test_list(self): names = [ o.name for o in self.conn.object_store.objects(container=self.FOLDER) ] self.assertIn(self.FILE, names) def test_download_object(self): result = self.conn.object_store.download_object( self.FILE, container=self.FOLDER ) self.assertEqual(self.DATA, result) result = self.conn.object_store.download_object(self.sot) self.assertEqual(self.DATA, result) def test_system_metadata(self): # get system metadata obj = self.conn.object_store.get_object_metadata( self.FILE, container=self.FOLDER ) # TODO(shade) obj.bytes is coming up None on python3 but not python2 # self.assertGreaterEqual(0, obj.bytes) self.assertIsNotNone(obj.etag) # set system metadata obj = self.conn.object_store.get_object_metadata( self.FILE, container=self.FOLDER ) self.assertIsNone(obj.content_disposition) self.assertIsNone(obj.content_encoding) self.conn.object_store.set_object_metadata( obj, content_disposition='attachment', content_encoding='gzip' ) obj = self.conn.object_store.get_object_metadata(obj) self.assertEqual('attachment', obj.content_disposition) self.assertEqual('gzip', obj.content_encoding) # update system metadata self.conn.object_store.set_object_metadata( obj, content_encoding='deflate' ) obj = self.conn.object_store.get_object_metadata(obj) self.assertEqual('attachment', obj.content_disposition) self.assertEqual('deflate', obj.content_encoding) # set custom metadata self.conn.object_store.set_object_metadata(obj, k0='v0') obj = self.conn.object_store.get_object_metadata(obj) self.assertIn('k0', obj.metadata) self.assertEqual('v0', obj.metadata['k0']) self.assertEqual('attachment', obj.content_disposition) self.assertEqual('deflate', obj.content_encoding) # unset more system metadata self.conn.object_store.delete_object_metadata( obj, keys=['content_disposition'] ) obj = self.conn.object_store.get_object_metadata(obj) self.assertIn('k0', obj.metadata) self.assertEqual('v0', obj.metadata['k0']) self.assertIsNone(obj.content_disposition) self.assertEqual('deflate', obj.content_encoding) self.assertIsNone(obj.delete_at) def test_custom_metadata(self): # get custom metadata obj = self.conn.object_store.get_object_metadata( self.FILE, container=self.FOLDER ) self.assertFalse(obj.metadata) # set no custom metadata self.conn.object_store.set_object_metadata(obj) obj = self.conn.object_store.get_object_metadata(obj) self.assertFalse(obj.metadata) # set empty custom metadata self.conn.object_store.set_object_metadata(obj, k0='') obj = self.conn.object_store.get_object_metadata(obj) self.assertFalse(obj.metadata) # set custom metadata self.conn.object_store.set_object_metadata(obj, k1='v1') obj = self.conn.object_store.get_object_metadata(obj) self.assertTrue(obj.metadata) self.assertEqual(1, len(obj.metadata)) self.assertIn('k1', obj.metadata) self.assertEqual('v1', obj.metadata['k1']) # set more custom metadata by named object and container self.conn.object_store.set_object_metadata( self.FILE, self.FOLDER, k2='v2' ) obj = self.conn.object_store.get_object_metadata(obj) self.assertTrue(obj.metadata) self.assertEqual(2, len(obj.metadata)) self.assertIn('k1', obj.metadata) self.assertEqual('v1', obj.metadata['k1']) self.assertIn('k2', obj.metadata) self.assertEqual('v2', obj.metadata['k2']) # update custom metadata self.conn.object_store.set_object_metadata(obj, k1='v1.1') obj = self.conn.object_store.get_object_metadata(obj) self.assertTrue(obj.metadata) self.assertEqual(2, len(obj.metadata)) self.assertIn('k1', obj.metadata) self.assertEqual('v1.1', obj.metadata['k1']) self.assertIn('k2', obj.metadata) self.assertEqual('v2', obj.metadata['k2']) # unset custom metadata self.conn.object_store.delete_object_metadata(obj, keys=['k1']) obj = self.conn.object_store.get_object_metadata(obj) self.assertTrue(obj.metadata) self.assertEqual(1, len(obj.metadata)) self.assertIn('k2', obj.metadata) self.assertEqual('v2', obj.metadata['k2']) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.377377 openstacksdk-4.0.0/openstack/tests/functional/orchestration/0000775000175000017500000000000000000000000024406 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/orchestration/__init__.py0000664000175000017500000000000000000000000026505 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.381379 openstacksdk-4.0.0/openstack/tests/functional/orchestration/v1/0000775000175000017500000000000000000000000024734 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/orchestration/v1/__init__.py0000664000175000017500000000000000000000000027033 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/orchestration/v1/hello_world.yaml0000664000175000017500000000201600000000000030131 0ustar00zuulzuul00000000000000# # Minimal HOT template defining a single compute server. # heat_template_version: 2013-05-23 description: > Minimal HOT template for stack parameters: key_name: type: string description: Name of an existing key pair to use for the server constraints: - custom_constraint: nova.keypair flavor: type: string description: Flavor for the server to be created default: m1.small constraints: - custom_constraint: nova.flavor image: type: string description: Image ID or image name to use for the server constraints: - custom_constraint: glance.image network: type: string description: Network used by the server resources: server: type: OS::Nova::Server properties: key_name: { get_param: key_name } image: { get_param: image } flavor: { get_param: flavor } networks: [{network: {get_param: network} }] outputs: server_networks: description: The networks of the deployed server value: { get_attr: [server, networks] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/orchestration/v1/test_stack.py0000664000175000017500000000714100000000000027455 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import yaml from openstack import exceptions from openstack.orchestration.v1 import stack from openstack.tests.functional import base from openstack.tests.functional.network.v2 import test_network class TestStack(base.BaseFunctionalTest): NAME = 'test_stack' stack = None network = None subnet = None cidr = '10.99.99.0/16' _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_ORCHESTRATION' def setUp(self): super().setUp() self.require_service('orchestration') if self.conn.compute.find_keypair(self.NAME) is None: self.conn.compute.create_keypair(name=self.NAME) image = next(self.conn.image.images()) tname = "openstack/tests/functional/orchestration/v1/hello_world.yaml" with open(tname) as f: template = yaml.safe_load(f) # TODO(mordred) Fix the need for this. We have better support in # the shade layer. template['heat_template_version'] = '2013-05-23' self.network, self.subnet = test_network.create_network( self.conn, self.NAME, self.cidr ) parameters = { 'image': image.id, 'key_name': self.NAME, 'network': self.network.id, } sot = self.conn.orchestration.create_stack( name=self.NAME, parameters=parameters, template=template, ) assert isinstance(sot, stack.Stack) self.assertEqual(True, (sot.id is not None)) self.stack = sot self.assertEqual(self.NAME, sot.name) self.conn.orchestration.wait_for_status( sot, status='CREATE_COMPLETE', failures=['CREATE_FAILED'], wait=self._wait_for_timeout, ) def tearDown(self): self.conn.orchestration.delete_stack(self.stack, ignore_missing=False) self.conn.compute.delete_keypair(self.NAME) # Need to wait for the stack to go away before network delete try: self.conn.orchestration.wait_for_status( self.stack, 'DELETE_COMPLETE', wait=self._wait_for_timeout ) except exceptions.NotFoundException: pass test_network.delete_network(self.conn, self.network, self.subnet) super().tearDown() def test_list(self): names = [o.name for o in self.conn.orchestration.stacks()] self.assertIn(self.NAME, names) def test_suspend_resume(self): # given suspend_status = "SUSPEND_COMPLETE" resume_status = "RESUME_COMPLETE" # when self.conn.orchestration.suspend_stack(self.stack) sot = self.conn.orchestration.wait_for_status( self.stack, suspend_status, wait=self._wait_for_timeout ) # then self.assertEqual(suspend_status, sot.status) # when self.conn.orchestration.resume_stack(self.stack) sot = self.conn.orchestration.wait_for_status( self.stack, resume_status, wait=self._wait_for_timeout ) # then self.assertEqual(resume_status, sot.status) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.381379 openstacksdk-4.0.0/openstack/tests/functional/placement/0000775000175000017500000000000000000000000023472 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/placement/__init__.py0000664000175000017500000000000000000000000025571 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.381379 openstacksdk-4.0.0/openstack/tests/functional/placement/v1/0000775000175000017500000000000000000000000024020 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/placement/v1/__init__.py0000664000175000017500000000000000000000000026117 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/placement/v1/test_resource_provider.py0000664000175000017500000000756700000000000031211 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.placement.v1 import resource_provider as _resource_provider from openstack.tests.functional import base class TestResourceProvider(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.operator_cloud.has_service('placement'): self.skipTest('placement service not supported by cloud') self.resource_provider_name = self.getUniqueString() resource_provider = ( self.operator_cloud.placement.create_resource_provider( name=self.resource_provider_name, ) ) self.assertIsInstance( resource_provider, _resource_provider.ResourceProvider ) self.assertEqual(self.resource_provider_name, resource_provider.name) self.resource_provider = resource_provider def tearDown(self): result = self.conn.placement.delete_resource_provider( self.resource_provider, ) self.assertIsNone(result) super().tearDown() def test_resource_provider(self): # list all resource providers resource_providers = list( self.operator_cloud.placement.resource_providers() ) self.assertIsInstance( resource_providers[0], _resource_provider.ResourceProvider, ) self.assertIn( self.resource_provider_name, {x.name for x in resource_providers}, ) # retrieve details of the resource provider by name resource_provider = ( self.operator_cloud.placement.find_resource_provider( self.resource_provider.name, ) ) self.assertEqual(self.resource_provider_name, resource_provider.name) # retrieve details of the resource provider by ID resource_provider = ( self.operator_cloud.placement.get_resource_provider( self.resource_provider.id, ) ) self.assertEqual(self.resource_provider_name, resource_provider.name) # update the resource provider new_resource_provider_name = self.getUniqueString() resource_provider = ( self.operator_cloud.placement.update_resource_provider( self.resource_provider, name=new_resource_provider_name, generation=self.resource_provider.generation, ) ) self.assertIsInstance( resource_provider, _resource_provider.ResourceProvider, ) self.assertEqual( new_resource_provider_name, resource_provider.name, ) def test_resource_provider_aggregates(self): aggregates = [uuid.uuid4().hex, uuid.uuid4().hex] # update the resource provider aggregates resource_provider = ( self.operator_cloud.placement.set_resource_provider_aggregates( self.resource_provider, *aggregates, ) ) self.assertCountEqual(aggregates, resource_provider.aggregates) # retrieve details of resource provider aggregates resource_provider = ( self.operator_cloud.placement.get_resource_provider_aggregates( self.resource_provider, ) ) self.assertCountEqual(aggregates, resource_provider.aggregates) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/placement/v1/test_resource_provider_inventory.py0000664000175000017500000001333400000000000033313 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.placement.v1 import resource_class as _resource_class from openstack.placement.v1 import resource_provider as _resource_provider from openstack.placement.v1 import ( resource_provider_inventory as _resource_provider_inventory, ) from openstack.tests.functional import base class TestResourceProviderInventory(base.BaseFunctionalTest): def setUp(self): super().setUp() if not self.operator_cloud.has_service('placement'): self.skipTest('placement service not supported by cloud') self.resource_provider_name = self.getUniqueString() self.resource_class_name = f'CUSTOM_{uuid.uuid4().hex.upper()}' resource_class = self.operator_cloud.placement.create_resource_class( name=self.resource_class_name, ) self.assertIsInstance(resource_class, _resource_class.ResourceClass) self.assertEqual(self.resource_class_name, resource_class.name) resource_provider = ( self.operator_cloud.placement.create_resource_provider( name=self.resource_provider_name, ) ) self.assertIsInstance( resource_provider, _resource_provider.ResourceProvider, ) self.assertEqual(self.resource_provider_name, resource_provider.name) self.resource_provider = resource_provider self.resource_class = resource_class def tearDown(self): self.operator_cloud.placement.delete_resource_provider( self.resource_provider, ) self.operator_cloud.placement.delete_resource_class( self.resource_class, ) super().tearDown() def test_resource_provider_inventory(self): # create the resource provider inventory resource_provider_inventory = ( self.operator_cloud.placement.create_resource_provider_inventory( self.resource_provider, resource_class=self.resource_class, total=10, step_size=1, ) ) self.assertIsInstance( resource_provider_inventory, _resource_provider_inventory.ResourceProviderInventory, ) self.assertEqual( self.resource_class.name, resource_provider_inventory.resource_class, ) self.assertEqual(10, resource_provider_inventory.total) # list all resource provider inventories (there should only be one) resource_provider_inventories = list( self.operator_cloud.placement.resource_provider_inventories( self.resource_provider ) ) self.assertIsInstance( resource_provider_inventories[0], _resource_provider_inventory.ResourceProviderInventory, ) self.assertIn( self.resource_class.name, {rpi.id for rpi in resource_provider_inventories}, ) # update the resource provider inventory resource_provider_inventory = self.operator_cloud.placement.update_resource_provider_inventory( resource_provider_inventory, total=20, resource_provider_generation=resource_provider_inventory.resource_provider_generation, ) self.assertIsInstance( resource_provider_inventory, _resource_provider_inventory.ResourceProviderInventory, ) self.assertEqual( self.resource_class.name, resource_provider_inventory.id, ) self.assertEqual(20, resource_provider_inventory.total) # retrieve details of the (updated) resource provider inventory resource_provider_inventory = ( self.operator_cloud.placement.get_resource_provider_inventory( resource_provider_inventory, ) ) self.assertIsInstance( resource_provider_inventory, _resource_provider_inventory.ResourceProviderInventory, ) self.assertEqual( self.resource_class.name, resource_provider_inventory.id, ) self.assertEqual(20, resource_provider_inventory.total) # retrieve details of the resource provider inventory using IDs # (requires us to provide the resource provider also) resource_provider_inventory = ( self.operator_cloud.placement.get_resource_provider_inventory( resource_provider_inventory.id, self.resource_provider, ) ) self.assertIsInstance( resource_provider_inventory, _resource_provider_inventory.ResourceProviderInventory, ) self.assertEqual( self.resource_class.name, resource_provider_inventory.id, ) self.assertEqual(20, resource_provider_inventory.total) # (no find_resource_provider_inventory method) # delete the resource provider inventory result = ( self.operator_cloud.placement.delete_resource_provider_inventory( resource_provider_inventory, self.resource_provider, ignore_missing=False, ) ) self.assertIsNone(result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/placement/v1/test_trait.py0000664000175000017500000000433500000000000026561 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.placement.v1 import trait as _trait from openstack.tests.functional import base class TestTrait(base.BaseFunctionalTest): def setUp(self): super().setUp() self.skipTest( "This test intermittently fails on DevStack deployments. " "See https://bugs.launchpad.net/placement/+bug/2029520 for more " "information." ) if not self.operator_cloud.has_service('placement'): self.skipTest('placement service not supported by cloud') self.trait_name = f'CUSTOM_{uuid.uuid4().hex.upper()}' trait = self.operator_cloud.placement.create_trait( name=self.trait_name, ) self.assertIsInstance(trait, _trait.Trait) self.assertEqual(self.trait_name, trait.name) self.trait = trait def tearDown(self): self.operator_cloud.placement.delete_trait(self.trait) super().tearDown() def test_resource_provider_inventory(self): # list all traits traits = list(self.operator_cloud.placement.traits()) self.assertIsInstance(traits[0], _trait.Trait) self.assertIn(self.trait.name, {x.id for x in traits}) # (no update_trait method) # retrieve details of the trait trait = self.operator_cloud.placement.get_trait(self.trait) self.assertIsInstance(trait, _trait.Trait) self.assertEqual(self.trait_name, trait.id) # retrieve details of the trait using IDs trait = self.operator_cloud.placement.get_trait(self.trait_name) self.assertIsInstance(trait, _trait.Trait) self.assertEqual(self.trait_name, trait.id) # (no find_trait method) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3853807 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/0000775000175000017500000000000000000000000025373 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/__init__.py0000664000175000017500000000000000000000000027472 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/base.py0000664000175000017500000000574200000000000026667 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty from openstack import resource from openstack.tests.functional import base class BaseSharedFileSystemTest(base.BaseFunctionalTest): min_microversion: ty.Optional[str] = None def setUp(self): super().setUp() self.require_service( 'shared-file-system', min_microversion=self.min_microversion ) self._set_operator_cloud(shared_file_system_api_version='2.82') self._set_user_cloud(shared_file_system_api_version='2.82') def create_share(self, **kwargs): share = self.user_cloud.share.create_share(**kwargs) self.addCleanup( self.user_cloud.share.delete_share, share.id, ignore_missing=True ) self.user_cloud.share.wait_for_status( share, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) self.assertIsNotNone(share.id) return share def create_share_snapshot(self, share_id, **kwargs): share_snapshot = self.user_cloud.share.create_share_snapshot( share_id=share_id, force=True ) self.addCleanup( resource.wait_for_delete, self.user_cloud.share, share_snapshot, wait=self._wait_for_timeout, interval=2, ) self.addCleanup( self.user_cloud.share.delete_share_snapshot, share_snapshot.id, ignore_missing=False, ) self.user_cloud.share.wait_for_status( share_snapshot, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) self.assertIsNotNone(share_snapshot.id) return share_snapshot def create_share_group(self, **kwargs): share_group = self.user_cloud.share.create_share_group(**kwargs) self.addCleanup( self.conn.share.delete_share_group, share_group.id, ignore_missing=True, ) self.assertIsNotNone(share_group.id) return share_group def create_resource_lock(self, **kwargs): resource_lock = self.user_cloud.share.create_resource_lock(**kwargs) self.addCleanup( self.user_cloud.share.delete_resource_lock, resource_lock.id, ignore_missing=True, ) self.assertIsNotNone(resource_lock.id) return resource_lock ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/test_availability_zone.py0000664000175000017500000000206400000000000032513 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional.shared_file_system import base class AvailabilityZoneTest(base.BaseSharedFileSystemTest): min_microversion = '2.7' def test_availability_zones(self): azs = self.user_cloud.shared_file_system.availability_zones() self.assertGreater(len(list(azs)), 0) for az in azs: for attribute in ('id', 'name', 'created_at', 'updated_at'): self.assertTrue(hasattr(az, attribute)) self.assertIsInstance(getattr(az, attribute), 'str') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/test_export_locations.py0000664000175000017500000000331500000000000032402 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional.shared_file_system import base class TestExportLocation(base.BaseSharedFileSystemTest): min_microversion = '2.9' def setUp(self): super().setUp() self.SHARE_NAME = self.getUniqueString() my_share = self.create_share( name=self.SHARE_NAME, size=2, share_type="dhss_false", share_protocol='NFS', description=None, ) self.SHARE_ID = my_share.id def test_export_locations(self): exs = self.user_cloud.shared_file_system.export_locations( self.SHARE_ID ) self.assertGreater(len(list(exs)), 0) for ex in exs: for attribute in ( 'id', 'path', 'share_instance_id', 'updated_at', 'created_at', ): self.assertTrue(hasattr(ex, attribute)) self.assertIsInstance(getattr(ex, attribute), 'str') for attribute in ('is_preferred', 'is_admin'): self.assertTrue(hasattr(ex, attribute)) self.assertIsInstance(getattr(ex, attribute), 'bool') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/test_limit.py0000664000175000017500000000275300000000000030131 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional.shared_file_system import base class LimitTest(base.BaseSharedFileSystemTest): def test_limits(self): limits = self.user_cloud.shared_file_system.limits() self.assertGreater(len(list(limits)), 0) for limit in limits: for attribute in ( "maxTotalReplicaGigabytes", "maxTotalShares", "maxTotalShareGigabytes", "maxTotalShareNetworks", "maxTotalShareSnapshots", "maxTotalShareReplicas", "maxTotalSnapshotGigabytes", "totalReplicaGigabytesUsed", "totalShareGigabytesUsed", "totalSharesUsed", "totalShareNetworksUsed", "totalShareSnapshotsUsed", "totalSnapshotGigabytesUsed", "totalShareReplicasUsed", ): self.assertTrue(hasattr(limit, attribute)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/test_quota_class_set.py0000664000175000017500000000303400000000000032175 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional.shared_file_system import base class QuotaClassSetTest(base.BaseSharedFileSystemTest): def test_quota_class_set(self): project_id = self.operator_cloud.current_project_id initial_quota_class_set = ( self.operator_cloud.share.get_quota_class_set(project_id) ) self.assertIn('shares', initial_quota_class_set) initial_backups_value = initial_quota_class_set['backups'] updated_quota_class_set = ( self.operator_cloud.share.update_quota_class_set( project_id, **{ "backups": initial_backups_value + 1, } ) ) self.assertEqual( updated_quota_class_set['backups'], initial_backups_value + 1 ) reverted = self.operator_cloud.share.update_quota_class_set( project_id, **{"backups": initial_backups_value} ) self.assertEqual(initial_quota_class_set, reverted) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/test_resource_lock.py0000664000175000017500000000665600000000000031660 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.shared_file_system.v2 import resource_locks as _resource_locks from openstack.tests.functional.shared_file_system import base class ResourceLocksTest(base.BaseSharedFileSystemTest): def setUp(self): super().setUp() self.SHARE_NAME = self.getUniqueString() share = self.user_cloud.shared_file_system.create_share( name=self.SHARE_NAME, size=2, share_type="dhss_false", share_protocol='NFS', description=None, ) self.SHARE_ID = share.id self.user_cloud.shared_file_system.wait_for_status( share, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) access_rule = self.user_cloud.share.create_access_rule( self.SHARE_ID, access_level="rw", access_type="ip", access_to="0.0.0.0/0", ) self.user_cloud.shared_file_system.wait_for_status( access_rule, status='active', failures=['error'], interval=5, wait=self._wait_for_timeout, status_attr_name='state', ) self.assertIsNotNone(share) self.assertIsNotNone(share.id) self.ACCESS_ID = access_rule.id share_lock = self.create_resource_lock( resource_action='delete', resource_type='share', resource_id=self.SHARE_ID, lock_reason='openstacksdk testing', ) access_lock = self.create_resource_lock( resource_action='show', resource_type='access_rule', resource_id=self.ACCESS_ID, lock_reason='openstacksdk testing', ) self.SHARE_LOCK_ID = share_lock.id self.ACCESS_LOCK_ID = access_lock.id def test_get(self): share_lock = self.user_cloud.shared_file_system.get_resource_lock( self.SHARE_LOCK_ID ) access_lock = self.user_cloud.shared_file_system.get_resource_lock( self.ACCESS_LOCK_ID ) assert isinstance(share_lock, _resource_locks.ResourceLock) assert isinstance(access_lock, _resource_locks.ResourceLock) self.assertEqual(self.SHARE_LOCK_ID, share_lock.id) self.assertEqual(self.ACCESS_LOCK_ID, access_lock.id) self.assertEqual('show', access_lock.resource_action) def test_list(self): resource_locks = self.user_cloud.share.resource_locks() self.assertGreater(len(list(resource_locks)), 0) lock_attrs = ( 'id', 'lock_reason', 'resource_type', 'resource_action', 'lock_context', 'created_at', 'updated_at', ) for lock in resource_locks: for attribute in lock_attrs: self.assertTrue(hasattr(lock, attribute)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/test_share.py0000664000175000017500000001611100000000000030106 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack.shared_file_system.v2 import share as _share from openstack.tests.functional.shared_file_system import base class ShareTest(base.BaseSharedFileSystemTest): def setUp(self): super().setUp() self.SHARE_NAME = self.getUniqueString() my_share = self.create_share( name=self.SHARE_NAME, size=2, share_type="dhss_false", share_protocol='NFS', description=None, ) self.SHARE_ID = my_share.id self.SHARE_SIZE = my_share.size my_share_snapshot = self.create_share_snapshot(share_id=self.SHARE_ID) self.SHARE_SNAPSHOT_ID = my_share_snapshot.id def test_get(self): sot = self.user_cloud.share.get_share(self.SHARE_ID) assert isinstance(sot, _share.Share) self.assertEqual(self.SHARE_ID, sot.id) def test_find(self): sot = self.user_cloud.share.find_share(name_or_id=self.SHARE_NAME) assert isinstance(sot, _share.Share) self.assertEqual(self.SHARE_ID, sot.id) def test_list_share(self): shares = self.user_cloud.share.shares(details=False) self.assertGreater(len(list(shares)), 0) for share in shares: for attribute in ('id', 'name', 'created_at', 'updated_at'): self.assertTrue(hasattr(share, attribute)) def test_update(self): updated_share = self.user_cloud.share.update_share( self.SHARE_ID, display_description='updated share' ) get_updated_share = self.user_cloud.share.get_share(updated_share.id) self.assertEqual('updated share', get_updated_share.description) def test_revert_share_to_snapshot(self): self.user_cloud.share.revert_share_to_snapshot( self.SHARE_ID, self.SHARE_SNAPSHOT_ID ) get_reverted_share = self.user_cloud.share.get_share(self.SHARE_ID) self.user_cloud.share.wait_for_status( get_reverted_share, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) self.assertIsNotNone(get_reverted_share.id) def test_resize_share_larger(self): larger_size = 3 self.user_cloud.share.resize_share(self.SHARE_ID, larger_size) get_resized_share = self.user_cloud.share.get_share(self.SHARE_ID) self.user_cloud.share.wait_for_status( get_resized_share, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) self.assertEqual(larger_size, get_resized_share.size) def test_resize_share_smaller(self): # Resize to 3 GiB smaller_size = 1 self.user_cloud.share.resize_share(self.SHARE_ID, smaller_size) get_resized_share = self.user_cloud.share.get_share(self.SHARE_ID) self.user_cloud.share.wait_for_status( get_resized_share, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) self.assertEqual(smaller_size, get_resized_share.size) def test_resize_share_larger_no_extend(self): larger_size = 3 self.user_cloud.share.resize_share( self.SHARE_ID, larger_size, no_extend=True ) get_resized_share = self.user_cloud.share.get_share(self.SHARE_ID) self.user_cloud.share.wait_for_status( get_resized_share, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) # Assert that no change was made. self.assertEqual(self.SHARE_SIZE, get_resized_share.size) def test_resize_share_smaller_no_shrink(self): smaller_size = 1 self.user_cloud.share.resize_share( self.SHARE_ID, smaller_size, no_shrink=True ) get_resized_share = self.user_cloud.share.get_share(self.SHARE_ID) self.user_cloud.share.wait_for_status( get_resized_share, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) # Assert that no change was made. self.assertEqual(self.SHARE_SIZE, get_resized_share.size) def test_resize_share_with_force(self): """Test that extend with force works as expected.""" # Resize to 3 GiB larger_size = 3 self.operator_cloud.share.resize_share( self.SHARE_ID, larger_size, force=True ) get_resized_share = self.user_cloud.share.get_share(self.SHARE_ID) self.user_cloud.share.wait_for_status( get_resized_share, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) self.assertEqual(larger_size, get_resized_share.size) class ManageUnmanageShareTest(base.BaseSharedFileSystemTest): def setUp(self): super().setUp() self.NEW_SHARE = self.create_share( share_proto="NFS", name="accounting_p8787", size=2, ) self.SHARE_ID = self.NEW_SHARE.id self.export_locations = self.operator_cloud.share.export_locations( self.SHARE_ID ) export_paths = [export['path'] for export in self.export_locations] self.export_path = export_paths[0] self.share_host = self.operator_cloud.share.get_share(self.SHARE_ID)[ 'host' ] def test_manage_and_unmanage_share(self): self.operator_cloud.share.unmanage_share(self.SHARE_ID) self.operator_cloud.shared_file_system.wait_for_delete( self.NEW_SHARE, interval=2, wait=self._wait_for_timeout ) try: self.operator_cloud.share.get_share(self.SHARE_ID) except exceptions.NotFoundException: pass managed_share = self.operator_cloud.share.manage_share( self.NEW_SHARE.share_protocol, self.export_path, self.share_host ) self.operator_cloud.share.wait_for_status( managed_share, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) self.assertEqual( self.NEW_SHARE.share_protocol, managed_share.share_protocol ) managed_host = self.operator_cloud.share.get_share(managed_share.id)[ 'host' ] self.assertEqual(self.share_host, managed_host) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/test_share_access_rule.py0000664000175000017500000000566300000000000032470 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional.shared_file_system import base class ShareAccessRuleTest(base.BaseSharedFileSystemTest): def setUp(self): super().setUp() self.SHARE_NAME = self.getUniqueString() mys = self.create_share( name=self.SHARE_NAME, size=2, share_type="dhss_false", share_protocol='NFS', description=None, ) self.user_cloud.shared_file_system.wait_for_status( mys, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) self.assertIsNotNone(mys) self.assertIsNotNone(mys.id) self.SHARE_ID = mys.id self.SHARE = mys access_rule = self.user_cloud.share.create_access_rule( self.SHARE_ID, access_level="rw", access_type="ip", access_to="0.0.0.0/0", ) self.ACCESS_ID = access_rule.id self.RESOURCE_KEY = access_rule.resource_key def tearDown(self): self.user_cloud.share.delete_access_rule( self.ACCESS_ID, self.SHARE_ID, ignore_missing=True ) super().tearDown() def test_get_access_rule(self): sot = self.user_cloud.shared_file_system.get_access_rule( self.ACCESS_ID ) self.assertEqual(self.ACCESS_ID, sot.id) def test_list_access_rules(self): rules = self.user_cloud.shared_file_system.access_rules( self.SHARE, details=True ) self.assertGreater(len(list(rules)), 0) for rule in rules: for attribute in ( 'id', 'created_at', 'updated_at', 'access_level', 'access_type', 'access_to', 'share_id', 'access_key', 'metadata', ): self.assertTrue(hasattr(rule, attribute)) def test_create_delete_access_rule_with_locks(self): access_rule = self.user_cloud.share.create_access_rule( self.SHARE_ID, access_level="rw", access_type="ip", access_to="203.0.113.10", lock_deletion=True, lock_visibility=True, ) self.user_cloud.share.delete_access_rule( access_rule['id'], self.SHARE_ID, unrestrict=True ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/test_share_group.py0000664000175000017500000000504600000000000031327 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.shared_file_system.v2 import share_group as _share_group from openstack.tests.functional.shared_file_system import base class ShareGroupTest(base.BaseSharedFileSystemTest): def setUp(self): super().setUp() self.SHARE_GROUP_NAME = self.getUniqueString() share_grp = self.user_cloud.shared_file_system.create_share_group( name=self.SHARE_GROUP_NAME ) self.user_cloud.shared_file_system.wait_for_status( share_grp, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) self.assertIsNotNone(share_grp) self.assertIsNotNone(share_grp.id) self.SHARE_GROUP_ID = share_grp.id def test_get(self): sot = self.user_cloud.shared_file_system.get_share_group( self.SHARE_GROUP_ID ) assert isinstance(sot, _share_group.ShareGroup) self.assertEqual(self.SHARE_GROUP_ID, sot.id) def test_find(self): sot = self.user_cloud.shared_file_system.find_share_group( self.SHARE_GROUP_NAME ) assert isinstance(sot, _share_group.ShareGroup) self.assertEqual(self.SHARE_GROUP_NAME, sot.name) self.assertEqual(self.SHARE_GROUP_ID, sot.id) def test_list_delete_share_group(self): s_grps = self.user_cloud.shared_file_system.share_groups() self.assertGreater(len(list(s_grps)), 0) for s_grp in s_grps: for attribute in ('id', 'name', 'created_at'): self.assertTrue(hasattr(s_grp, attribute)) sot = self.conn.shared_file_system.delete_share_group(s_grp) self.assertIsNone(sot) def test_update(self): u_gp = self.user_cloud.shared_file_system.update_share_group( self.SHARE_GROUP_ID, description='updated share group' ) get_u_gp = self.user_cloud.shared_file_system.get_share_group(u_gp.id) self.assertEqual('updated share group', get_u_gp.description) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/test_share_group_snapshot.py0000664000175000017500000000757400000000000033256 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource from openstack.shared_file_system.v2 import ( share_group_snapshot as _share_group_snapshot, ) from openstack.tests.functional.shared_file_system import base class ShareGroupSnapshotTest(base.BaseSharedFileSystemTest): min_microversion = '2.55' def setUp(self): super().setUp() self.SHARE_GROUP_NAME = self.getUniqueString() share_grp = self.user_cloud.shared_file_system.create_share_group( name=self.SHARE_GROUP_NAME ) self.user_cloud.shared_file_system.wait_for_status( share_grp, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) self.assertIsNotNone(share_grp) self.assertIsNotNone(share_grp.id) self.SHARE_GROUP_ID = share_grp.id self.SHARE_GROUP_SNAPSHOT_NAME = self.getUniqueString() grp_ss = ( self.user_cloud.shared_file_system.create_share_group_snapshot( self.SHARE_GROUP_ID, name=self.SHARE_GROUP_SNAPSHOT_NAME ) ) self.user_cloud.shared_file_system.wait_for_status( grp_ss, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) self.assertIsNotNone(grp_ss) self.assertIsNotNone(grp_ss.id) self.SHARE_GROUP_SNAPSHOT_ID = grp_ss.id def tearDown(self): sot = self.user_cloud.shared_file_system.get_share_group_snapshot( self.SHARE_GROUP_SNAPSHOT_ID ) self.user_cloud.shared_file_system.delete_share_group_snapshot( self.SHARE_GROUP_SNAPSHOT_ID, ignore_missing=False ) resource.wait_for_delete( self.user_cloud.share, sot, wait=self._wait_for_timeout, interval=2 ) self.user_cloud.shared_file_system.delete_share_group( self.SHARE_GROUP_ID, ignore_missing=False ) super().tearDown() def test_get(self): sot = self.user_cloud.shared_file_system.get_share_group_snapshot( self.SHARE_GROUP_SNAPSHOT_ID ) assert isinstance(sot, _share_group_snapshot.ShareGroupSnapshot) self.assertEqual(self.SHARE_GROUP_SNAPSHOT_ID, sot.id) def test_list(self): snapshots = self.user_cloud.shared_file_system.share_group_snapshots() self.assertGreater(len(list(snapshots)), 0) for snapshot in snapshots: for attribute in ('id', 'name', 'created_at'): self.assertTrue(hasattr(snapshot, attribute)) def test_update(self): u_ss = self.user_cloud.shared_file_system.update_share_group_snapshot( self.SHARE_GROUP_SNAPSHOT_ID, description='updated share group snapshot', ) get_u_ss = self.user_cloud.shared_file_system.get_share_group_snapshot( u_ss.id ) self.assertEqual('updated share group snapshot', get_u_ss.description) def test_reset(self): res = self.operator_cloud.shared_file_system.reset_share_group_snapshot_status( self.SHARE_GROUP_SNAPSHOT_ID, 'error' ) self.assertIsNone(res) sot = self.user_cloud.shared_file_system.get_share_group_snapshot( self.SHARE_GROUP_SNAPSHOT_ID ) self.assertEqual('error', sot.status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/test_share_instance.py0000664000175000017500000000547200000000000032002 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource from openstack.shared_file_system.v2 import share_instance as _share_instance from openstack.tests.functional.shared_file_system import base class ShareInstanceTest(base.BaseSharedFileSystemTest): min_microversion = '2.7' def setUp(self): super().setUp() self.SHARE_NAME = self.getUniqueString() my_share = self.create_share( name=self.SHARE_NAME, size=2, share_type="dhss_false", share_protocol='NFS', description=None, ) self.SHARE_ID = my_share.id instances_list = self.operator_cloud.share.share_instances() self.SHARE_INSTANCE_ID = None for i in instances_list: if i.share_id == self.SHARE_ID: self.SHARE_INSTANCE_ID = i.id def test_get(self): sot = self.operator_cloud.share.get_share_instance( self.SHARE_INSTANCE_ID ) assert isinstance(sot, _share_instance.ShareInstance) self.assertEqual(self.SHARE_INSTANCE_ID, sot.id) def test_list_share_instances(self): share_instances = self.operator_cloud.share.share_instances() self.assertGreater(len(list(share_instances)), 0) for share_instance in share_instances: for attribute in ( 'id', 'name', 'created_at', 'access_rules_status', 'availability_zone', ): self.assertTrue(hasattr(share_instance, attribute)) def test_reset(self): res = self.operator_cloud.share.reset_share_instance_status( self.SHARE_INSTANCE_ID, 'error' ) self.assertIsNone(res) sot = self.operator_cloud.share.get_share_instance( self.SHARE_INSTANCE_ID ) self.assertEqual('error', sot.status) def test_delete(self): sot = self.operator_cloud.share.get_share_instance( self.SHARE_INSTANCE_ID ) fdel = self.operator_cloud.share.delete_share_instance( self.SHARE_INSTANCE_ID ) resource.wait_for_delete( self.operator_cloud.share, sot, wait=self._wait_for_timeout, interval=2, ) self.assertIsNone(fdel) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/test_share_metadata.py0000664000175000017500000000763400000000000031760 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty from openstack.shared_file_system.v2 import share as _share from openstack.tests.functional.shared_file_system import base class ShareMetadataTest(base.BaseSharedFileSystemTest): def setUp(self): super().setUp() self.SHARE_NAME = self.getUniqueString() my_share = self.create_share( name=self.SHARE_NAME, size=2, share_type="dhss_false", share_protocol='NFS', description=None, ) self.SHARE_ID = my_share.id self.assertIsNotNone(my_share) self.assertIsNotNone(my_share.id) def test_create(self): meta = {"foo": "bar"} created_share = ( self.user_cloud.shared_file_system.create_share_metadata( self.SHARE_ID, **meta ) ) assert isinstance(created_share, _share.Share) self.assertEqual(created_share['metadata'], meta) def test_get_item(self): meta = {"foo": "bar"} created_share = ( self.user_cloud.shared_file_system.create_share_metadata( self.SHARE_ID, **meta ) ) returned_share = ( self.user_cloud.shared_file_system.get_share_metadata_item( self.SHARE_ID, "foo" ) ) self.assertEqual( created_share['metadata']['foo'], returned_share['metadata']['foo'] ) def test_get(self): meta = {"foo": "bar"} created_share = ( self.user_cloud.shared_file_system.create_share_metadata( self.SHARE_ID, **meta ) ) returned_share = self.user_cloud.shared_file_system.get_share_metadata( self.SHARE_ID ) self.assertEqual( created_share['metadata']['foo'], returned_share['metadata']['foo'] ) def test_update(self): meta = {"foo": "bar"} created_share = ( self.user_cloud.shared_file_system.create_share_metadata( self.SHARE_ID, **meta ) ) new_meta = {"newFoo": "newBar"} full_meta = {"foo": "bar", "newFoo": "newBar"} empty_meta: ty.Dict[str, str] = {} updated_share = ( self.user_cloud.shared_file_system.update_share_metadata( created_share, new_meta ) ) self.assertEqual(updated_share['metadata'], new_meta) full_metadata = self.user_cloud.shared_file_system.get_share_metadata( created_share )['metadata'] self.assertEqual(full_metadata, full_meta) share_with_deleted_metadata = ( self.user_cloud.shared_file_system.update_share_metadata( updated_share, empty_meta ) ) self.assertEqual(share_with_deleted_metadata['metadata'], empty_meta) def test_delete(self): meta = {"foo": "bar", "newFoo": "newBar"} created_share = ( self.user_cloud.shared_file_system.create_share_metadata( self.SHARE_ID, **meta ) ) self.user_cloud.shared_file_system.delete_share_metadata( created_share, ["foo", "invalidKey"] ) deleted_share = self.user_cloud.shared_file_system.get_share_metadata( self.SHARE_ID ) self.assertEqual(deleted_share['metadata'], {"newFoo": "newBar"}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/test_share_network.py0000664000175000017500000000653700000000000031672 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.shared_file_system.v2 import share_network as _share_network from openstack.tests.functional.shared_file_system import base class ShareNetworkTest(base.BaseSharedFileSystemTest): def setUp(self): super().setUp() self.NETWORK_NAME = self.getUniqueString() net = self.user_cloud.network.create_network(name=self.NETWORK_NAME) self.assertIsNotNone(net) self.assertIsNotNone(net.id) self.NETWORK_ID = net.id self.SUBNET_NAME = self.getUniqueString() subnet = self.user_cloud.network.create_subnet( name=self.SUBNET_NAME, network_id=self.NETWORK_ID, ip_version=4, cidr='10.0.0.0/24', ) self.SUBNET_ID = subnet.id self.SHARE_NETWORK_NAME = self.getUniqueString() snt = self.user_cloud.shared_file_system.create_share_network( name=self.SHARE_NETWORK_NAME, neutron_net_id=self.NETWORK_ID, neutron_subnet_id=self.SUBNET_ID, ) self.assertIsNotNone(snt) self.assertIsNotNone(snt.id) self.SHARE_NETWORK_ID = snt.id def tearDown(self): sot = self.user_cloud.shared_file_system.delete_share_network( self.SHARE_NETWORK_ID, ignore_missing=True ) self.assertIsNone(sot) self.user_cloud.network.delete_network(self.NETWORK_ID) super().tearDown() def test_get(self): sot = self.user_cloud.shared_file_system.get_share_network( self.SHARE_NETWORK_ID ) assert isinstance(sot, _share_network.ShareNetwork) self.assertEqual(self.SHARE_NETWORK_ID, sot.id) self.assertIsNotNone(sot.share_network_subnets) self.assertEqual( self.NETWORK_ID, sot.share_network_subnets[0]['neutron_net_id'], ) self.assertEqual( self.SUBNET_ID, sot.share_network_subnets[0]['neutron_subnet_id'], ) def test_list_share_network(self): share_nets = self.user_cloud.shared_file_system.share_networks( details=False ) self.assertGreater(len(list(share_nets)), 0) for share_net in share_nets: for attribute in ('id', 'name', 'created_at', 'updated_at'): self.assertTrue(hasattr(share_net, attribute)) def test_delete_share_network(self): sot = self.user_cloud.shared_file_system.delete_share_network( self.SHARE_NETWORK_ID ) self.assertIsNone(sot) def test_update(self): unt = self.user_cloud.shared_file_system.update_share_network( self.SHARE_NETWORK_ID, description='updated share network' ) get_unt = self.user_cloud.shared_file_system.get_share_network(unt.id) self.assertEqual('updated share network', get_unt.description) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/test_share_network_subnet.py0000664000175000017500000000615700000000000033250 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.shared_file_system.v2 import ( share_network_subnet as _share_network_subnet, ) from openstack.tests.functional.shared_file_system import base class ShareNetworkSubnetTest(base.BaseSharedFileSystemTest): def setUp(self): super().setUp() zones = self.user_cloud.shared_file_system.availability_zones() first_zone = next(zones) self.SHARE_NETWORK_NAME = self.getUniqueString() snt = self.user_cloud.shared_file_system.create_share_network( name=self.SHARE_NETWORK_NAME ) self.assertIsNotNone(snt) self.assertIsNotNone(snt.id) self.SHARE_NETWORK_ID = snt.id snsb = self.user_cloud.shared_file_system.create_share_network_subnet( self.SHARE_NETWORK_ID, availability_zone=first_zone.name ) self.assertIsNotNone(snsb) self.assertIsNotNone(snsb.id) self.SHARE_NETWORK_SUBNET_ID = snsb.id def tearDown(self): subnet = self.user_cloud.shared_file_system.get_share_network_subnet( self.SHARE_NETWORK_ID, self.SHARE_NETWORK_SUBNET_ID ) fdel = self.user_cloud.shared_file_system.delete_share_network_subnet( self.SHARE_NETWORK_ID, self.SHARE_NETWORK_SUBNET_ID, ignore_missing=True, ) self.assertIsNone(fdel) self.user_cloud.shared_file_system.wait_for_delete(subnet) sot = self.user_cloud.shared_file_system.delete_share_network( self.SHARE_NETWORK_ID, ignore_missing=True ) self.assertIsNone(sot) super().tearDown() def test_get(self): sub = self.user_cloud.shared_file_system.get_share_network_subnet( self.SHARE_NETWORK_ID, self.SHARE_NETWORK_SUBNET_ID ) assert isinstance(sub, _share_network_subnet.ShareNetworkSubnet) def test_list(self): subs = self.user_cloud.shared_file_system.share_network_subnets( self.SHARE_NETWORK_ID ) self.assertGreater(len(list(subs)), 0) for sub in subs: for attribute in ( 'id', 'name', 'created_at', 'updated_at', 'share_network_id', 'availability_zone', 'cidr', 'gateway', 'ip_version', 'mtu', 'network_type', 'neutron_net_id', 'neutron_subnet_id', 'segmentation_id', 'share_network_name', ): self.assertTrue(hasattr(sub, attribute)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/test_share_snapshot.py0000664000175000017500000000706600000000000032036 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional.shared_file_system import base class ShareSnapshotTest(base.BaseSharedFileSystemTest): def setUp(self): super().setUp() self.SHARE_NAME = self.getUniqueString() self.SNAPSHOT_NAME = self.getUniqueString() my_share = self.operator_cloud.shared_file_system.create_share( name=self.SHARE_NAME, size=2, share_type="dhss_false", share_protocol='NFS', description=None, ) self.operator_cloud.shared_file_system.wait_for_status( my_share, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) self.assertIsNotNone(my_share) self.assertIsNotNone(my_share.id) self.SHARE_ID = my_share.id msp = self.operator_cloud.shared_file_system.create_share_snapshot( share_id=self.SHARE_ID, name=self.SNAPSHOT_NAME, force=True ) self.operator_cloud.shared_file_system.wait_for_status( msp, status='available', failures=['error'], interval=5, wait=self._wait_for_timeout, ) self.assertIsNotNone(msp.id) self.SNAPSHOT_ID = msp.id def tearDown(self): snpt = self.operator_cloud.shared_file_system.get_share_snapshot( self.SNAPSHOT_ID ) sot = self.operator_cloud.shared_file_system.delete_share_snapshot( snpt, ignore_missing=False ) self.operator_cloud.shared_file_system.wait_for_delete( snpt, interval=2, wait=self._wait_for_timeout ) self.assertIsNone(sot) sot = self.operator_cloud.shared_file_system.delete_share( self.SHARE_ID, ignore_missing=False ) self.assertIsNone(sot) super().tearDown() def test_get(self): sot = self.operator_cloud.shared_file_system.get_share_snapshot( self.SNAPSHOT_ID ) self.assertEqual(self.SNAPSHOT_NAME, sot.name) def test_list(self): snaps = self.operator_cloud.shared_file_system.share_snapshots( details=True ) self.assertGreater(len(list(snaps)), 0) for snap in snaps: for attribute in ( 'id', 'name', 'created_at', 'updated_at', 'description', 'share_id', 'share_proto', 'share_size', 'size', 'status', 'user_id', ): self.assertTrue(hasattr(snap, attribute)) def test_update(self): u_snap = self.operator_cloud.shared_file_system.update_share_snapshot( self.SNAPSHOT_ID, display_description='updated share snapshot' ) get_u_snap = self.operator_cloud.shared_file_system.get_share_snapshot( u_snap.id ) self.assertEqual('updated share snapshot', get_u_snap.description) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/test_share_snapshot_instance.py0000664000175000017500000000272300000000000033715 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional.shared_file_system import base class ShareSnapshotInstanceTest(base.BaseSharedFileSystemTest): def setUp(self): super().setUp() self.SHARE_NAME = self.getUniqueString() my_share = self.create_share( name=self.SHARE_NAME, size=2, share_type="dhss_false", share_protocol='NFS', description=None, ) self.SHARE_ID = my_share.id self.create_share_snapshot(share_id=self.SHARE_ID) def test_share_snapshot_instances(self): sots = ( self.operator_cloud.shared_file_system.share_snapshot_instances() ) self.assertGreater(len(list(sots)), 0) for sot in sots: for attribute in ('id', 'name', 'created_at', 'updated_at'): self.assertTrue(hasattr(sot, attribute)) self.assertIsInstance(getattr(sot, attribute), 'str') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/test_storage_pool.py0000664000175000017500000000207000000000000031500 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional.shared_file_system import base class StoragePoolTest(base.BaseSharedFileSystemTest): def test_storage_pools(self): pools = self.operator_cloud.shared_file_system.storage_pools() self.assertGreater(len(list(pools)), 0) for pool in pools: for attribute in ( 'pool', 'name', 'host', 'backend', 'capabilities', ): self.assertTrue(hasattr(pool, attribute)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/functional/shared_file_system/test_user_message.py0000664000175000017500000000306000000000000031465 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional.shared_file_system import base class UserMessageTest(base.BaseSharedFileSystemTest): def test_user_messages(self): # TODO(kafilat): We must intentionally cause an asynchronous failure to # ensure that at least one user message exists; u_messages = self.user_cloud.shared_file_system.user_messages() # self.assertGreater(len(list(u_messages)), 0) for u_message in u_messages: for attribute in ( 'id', 'created_at', 'action_id', 'detail_id', 'expires_at', 'message_level', 'project_id', 'request_id', 'resource_id', 'resource_type', 'user_message', ): self.assertTrue(hasattr(u_message, attribute)) self.assertIsInstance(getattr(u_message, attribute), str) self.conn.shared_file_system.delete_user_message(u_message) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3933847 openstacksdk-4.0.0/openstack/tests/unit/0000775000175000017500000000000000000000000020337 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/README.rst0000664000175000017500000000034300000000000022026 0ustar00zuulzuul00000000000000Unit Tests for openstacksdk =========================== For information on how to run and extend these tests, refer to the `contributor guide`__. .. __: https://docs.openstack.org/openstacksdk/latest/contributor/testing.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/__init__.py0000664000175000017500000000000000000000000022436 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3933847 openstacksdk-4.0.0/openstack/tests/unit/accelerator/0000775000175000017500000000000000000000000022623 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/accelerator/__init__.py0000664000175000017500000000000000000000000024722 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/accelerator/test_version.py0000664000175000017500000000260700000000000025726 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.accelerator import version from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'links': '2', 'status': '3', } class TestVersion(base.TestCase): def test_basic(self): sot = version.Version() self.assertEqual('version', sot.resource_key) self.assertEqual('versions', sot.resources_key) self.assertEqual('/', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = version.Version(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['status'], sot.status) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3933847 openstacksdk-4.0.0/openstack/tests/unit/accelerator/v2/0000775000175000017500000000000000000000000023152 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/accelerator/v2/__init__.py0000664000175000017500000000000000000000000025251 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/accelerator/v2/test_accelerator_request.py0000664000175000017500000000435400000000000030625 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.accelerator.v2 import accelerator_request as arq from openstack.tests.unit import base FAKE_ID = '0725b527-e51a-41df-ad22-adad5f4546ad' FAKE_RP_UUID = 'f4b7fe6c-8ab4-4914-a113-547af022935b' FAKE_INSTANCE_UUID = '1ce4a597-9836-4e02-bea1-a3a6cbe7b9f9' FAKE_ATTACH_INFO_STR = ( '{"bus": "5e", "device": "00", "domain": "0000", "function": "1"}' ) FAKE = { 'uuid': FAKE_ID, 'device_profile_name': 'fake-devprof', 'device_profile_group_id': 0, 'device_rp_uuid': FAKE_RP_UUID, 'instance_uuid': FAKE_INSTANCE_UUID, 'attach_handle_type': 'PCI', 'attach_handle_info': FAKE_ATTACH_INFO_STR, } class TestAcceleratorRequest(base.TestCase): def test_basic(self): sot = arq.AcceleratorRequest() self.assertEqual('arq', sot.resource_key) self.assertEqual('arqs', sot.resources_key) self.assertEqual('/accelerator_requests', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_patch) def test_make_it(self): sot = arq.AcceleratorRequest(**FAKE) self.assertEqual(FAKE_ID, sot.uuid) self.assertEqual(FAKE['device_profile_name'], sot.device_profile_name) self.assertEqual( FAKE['device_profile_group_id'], sot.device_profile_group_id ) self.assertEqual(FAKE_RP_UUID, sot.device_rp_uuid) self.assertEqual(FAKE_INSTANCE_UUID, sot.instance_uuid) self.assertEqual(FAKE['attach_handle_type'], sot.attach_handle_type) self.assertEqual(FAKE_ATTACH_INFO_STR, sot.attach_handle_info) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/accelerator/v2/test_deployable.py0000664000175000017500000000363600000000000026713 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.accelerator.v2 import deployable from openstack.tests.unit import base EXAMPLE = { 'uuid': uuid.uuid4(), 'created_at': '2019-08-09T12:14:57.233772', 'updated_at': '2019-08-09T12:15:57.233772', 'parent_id': '1', 'root_id': '1', 'name': 'test_name', 'num_accelerators': '1', 'device_id': '1', } class TestDeployable(base.TestCase): def test_basic(self): sot = deployable.Deployable() self.assertEqual('deployable', sot.resource_key) self.assertEqual('deployables', sot.resources_key) self.assertEqual('/deployables', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = deployable.Deployable(**EXAMPLE) self.assertEqual(EXAMPLE['uuid'], sot.id) self.assertEqual(EXAMPLE['parent_id'], sot.parent_id) self.assertEqual(EXAMPLE['root_id'], sot.root_id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['num_accelerators'], sot.num_accelerators) self.assertEqual(EXAMPLE['device_id'], sot.device_id) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/accelerator/v2/test_device.py0000664000175000017500000000375600000000000026035 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.accelerator.v2 import device from openstack.tests.unit import base EXAMPLE = { 'id': '1', 'uuid': uuid.uuid4(), 'created_at': '2019-08-09T12:14:57.233772', 'updated_at': '2019-08-09T12:15:57.233772', 'type': 'test_type', 'vendor': '0x8086', 'model': 'test_model', 'std_board_info': '{"product_id": "0x09c4"}', 'vendor_board_info': 'test_vb_info', } class TestDevice(base.TestCase): def test_basic(self): sot = device.Device() self.assertEqual('device', sot.resource_key) self.assertEqual('devices', sot.resources_key) self.assertEqual('/devices', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = device.Device(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['uuid'], sot.uuid) self.assertEqual(EXAMPLE['type'], sot.type) self.assertEqual(EXAMPLE['vendor'], sot.vendor) self.assertEqual(EXAMPLE['model'], sot.model) self.assertEqual(EXAMPLE['std_board_info'], sot.std_board_info) self.assertEqual(EXAMPLE['vendor_board_info'], sot.vendor_board_info) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/accelerator/v2/test_device_profile.py0000664000175000017500000000400600000000000027542 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.accelerator.v2 import device_profile from openstack.tests.unit import base FAKE = { "id": 1, "uuid": "a95e10ae-b3e3-4eab-a513-1afae6f17c51", "name": 'afaas_example_1', "groups": [ { "resources:ACCELERATOR_FPGA": "1", "trait:CUSTOM_FPGA_INTEL_PAC_ARRIA10": "required", "trait:CUSTOM_FUNCTION_ID_3AFB": "required", }, { "resources:CUSTOM_ACCELERATOR_FOO": "2", "resources:CUSTOM_MEMORY": "200", "trait:CUSTOM_TRAIT_ALWAYS": "required", }, ], 'description': 'description_test', } class TestDeviceProfile(base.TestCase): def test_basic(self): sot = device_profile.DeviceProfile() self.assertEqual('device_profile', sot.resource_key) self.assertEqual('device_profiles', sot.resources_key) self.assertEqual('/device_profiles', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertFalse(sot.allow_patch) def test_make_it(self): sot = device_profile.DeviceProfile(**FAKE) self.assertEqual(FAKE['id'], sot.id) self.assertEqual(FAKE['uuid'], sot.uuid) self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['groups'], sot.groups) self.assertEqual(FAKE['description'], sot.description) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/accelerator/v2/test_proxy.py0000664000175000017500000000574100000000000025753 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.accelerator.v2 import _proxy from openstack.accelerator.v2 import accelerator_request from openstack.accelerator.v2 import deployable from openstack.accelerator.v2 import device_profile from openstack.tests.unit import test_proxy_base as test_proxy_base class TestAcceleratorProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) class TestAcceleratorDeployable(TestAcceleratorProxy): def test_list_deployables(self): self.verify_list(self.proxy.deployables, deployable.Deployable) class TestAcceleratorDevice(TestAcceleratorProxy): def test_list_device_profile(self): self.verify_list( self.proxy.device_profiles, device_profile.DeviceProfile ) def test_create_device_profile(self): self.verify_create( self.proxy.create_device_profile, device_profile.DeviceProfile ) def test_delete_device_profile(self): self.verify_delete( self.proxy.delete_device_profile, device_profile.DeviceProfile, False, ) def test_delete_device_profile_ignore(self): self.verify_delete( self.proxy.delete_device_profile, device_profile.DeviceProfile, True, ) def test_get_device_profile(self): self.verify_get( self.proxy.get_device_profile, device_profile.DeviceProfile ) class TestAcceleratorRequest(TestAcceleratorProxy): def test_list_accelerator_request(self): self.verify_list( self.proxy.accelerator_requests, accelerator_request.AcceleratorRequest, ) def test_create_accelerator_request(self): self.verify_create( self.proxy.create_accelerator_request, accelerator_request.AcceleratorRequest, ) def test_delete_accelerator_request(self): self.verify_delete( self.proxy.delete_accelerator_request, accelerator_request.AcceleratorRequest, False, ) def test_delete_accelerator_request_ignore(self): self.verify_delete( self.proxy.delete_accelerator_request, accelerator_request.AcceleratorRequest, True, ) def test_get_accelerator_request(self): self.verify_get( self.proxy.get_accelerator_request, accelerator_request.AcceleratorRequest, ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3933847 openstacksdk-4.0.0/openstack/tests/unit/baremetal/0000775000175000017500000000000000000000000022273 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal/__init__.py0000664000175000017500000000000000000000000024372 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal/test_configdrive.py0000664000175000017500000000757400000000000026220 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from unittest import mock import testtools from openstack.baremetal import configdrive class TestPopulateDirectory(testtools.TestCase): def _check( self, metadata, user_data=None, network_data=None, vendor_data=None ): with configdrive.populate_directory( metadata, user_data=user_data, network_data=network_data, vendor_data=vendor_data, ) as d: for version in ('2012-08-10', 'latest'): with open( os.path.join(d, 'openstack', version, 'meta_data.json') ) as fp: actual_metadata = json.load(fp) self.assertEqual(metadata, actual_metadata) network_data_file = os.path.join( d, 'openstack', version, 'network_data.json' ) user_data_file = os.path.join( d, 'openstack', version, 'user_data' ) vendor_data_file = os.path.join( d, 'openstack', version, 'vendor_data2.json' ) if network_data is None: self.assertFalse(os.path.exists(network_data_file)) else: with open(network_data_file) as fp: self.assertEqual(network_data, json.load(fp)) if vendor_data is None: self.assertFalse(os.path.exists(vendor_data_file)) else: with open(vendor_data_file) as fp: self.assertEqual(vendor_data, json.load(fp)) if user_data is None: self.assertFalse(os.path.exists(user_data_file)) else: if isinstance(user_data, str): user_data = user_data.encode() with open(user_data_file, 'rb') as fp: self.assertEqual(user_data, fp.read()) # Clean up in __exit__ self.assertFalse(os.path.exists(d)) def test_without_user_data(self): self._check({'foo': 42}) def test_with_user_data(self): self._check({'foo': 42}, b'I am user data') def test_with_user_data_as_string(self): self._check({'foo': 42}, 'I am user data') def test_with_network_data(self): self._check({'foo': 42}, network_data={'networks': {}}) def test_with_vendor_data(self): self._check({'foo': 42}, vendor_data={'foo': 'bar'}) @mock.patch('subprocess.Popen', autospec=True) class TestPack(testtools.TestCase): def test_no_genisoimage(self, mock_popen): mock_popen.side_effect = OSError self.assertRaisesRegex( RuntimeError, "genisoimage", configdrive.pack, "/fake" ) def test_genisoimage_fails(self, mock_popen): mock_popen.return_value.communicate.return_value = b"", b"BOOM" mock_popen.return_value.returncode = 1 self.assertRaisesRegex(RuntimeError, "BOOM", configdrive.pack, "/fake") def test_success(self, mock_popen): mock_popen.return_value.communicate.return_value = b"", b"" mock_popen.return_value.returncode = 0 result = configdrive.pack("/fake") # Make sure the result is string on all python versions self.assertIsInstance(result, str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal/test_version.py0000664000175000017500000000314300000000000025372 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal import version from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'links': '2', 'status': '3', 'updated': '4', } class TestVersion(base.TestCase): def test_basic(self): sot = version.Version() self.assertEqual('version', sot.resource_key) self.assertEqual('versions', sot.resources_key) self.assertEqual('/', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertFalse(sot.allow_head) self.assertEqual('PUT', sot.commit_method) self.assertEqual('POST', sot.create_method) def test_make_it(self): sot = version.Version(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['updated'], sot.updated) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3973866 openstacksdk-4.0.0/openstack/tests/unit/baremetal/v1/0000775000175000017500000000000000000000000022621 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal/v1/__init__.py0000664000175000017500000000000000000000000024720 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal/v1/test_allocation.py0000664000175000017500000001225200000000000026361 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.baremetal.v1 import allocation from openstack import exceptions from openstack.tests.unit import base FAKE = { "candidate_nodes": [], "created_at": "2016-08-18T22:28:48.165105+00:00", "extra": {}, "last_error": None, "links": [ { "href": "http://127.0.0.1:6385/v1/allocations/", "rel": "self", }, { "href": "http://127.0.0.1:6385/allocations/", "rel": "bookmark", }, ], "name": "test_allocation", "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "owner": "demo", "resource_class": "baremetal", "state": "active", "traits": [], "updated_at": None, "uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", } class TestAllocation(base.TestCase): def test_basic(self): sot = allocation.Allocation() self.assertIsNone(sot.resource_key) self.assertEqual('allocations', sot.resources_key) self.assertEqual('/allocations', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_instantiate(self): sot = allocation.Allocation(**FAKE) self.assertEqual(FAKE['candidate_nodes'], sot.candidate_nodes) self.assertEqual(FAKE['created_at'], sot.created_at) self.assertEqual(FAKE['extra'], sot.extra) self.assertEqual(FAKE['last_error'], sot.last_error) self.assertEqual(FAKE['links'], sot.links) self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['node_uuid'], sot.node_id) self.assertEqual(FAKE['owner'], sot.owner) self.assertEqual(FAKE['resource_class'], sot.resource_class) self.assertEqual(FAKE['state'], sot.state) self.assertEqual(FAKE['traits'], sot.traits) self.assertEqual(FAKE['updated_at'], sot.updated_at) self.assertEqual(FAKE['uuid'], sot.id) @mock.patch('time.sleep', lambda _t: None) @mock.patch.object(allocation.Allocation, 'fetch', autospec=True) class TestWaitForAllocation(base.TestCase): def setUp(self): super().setUp() self.session = mock.Mock(spec=adapter.Adapter) self.session.default_microversion = '1.52' self.session.log = mock.Mock() self.fake = dict(FAKE, state='allocating', node_uuid=None) self.allocation = allocation.Allocation(**self.fake) def test_already_active(self, mock_fetch): self.allocation.state = 'active' allocation = self.allocation.wait(None) self.assertIs(allocation, self.allocation) self.assertFalse(mock_fetch.called) def test_wait(self, mock_fetch): marker = [False] # mutable object to modify in the closure def _side_effect(allocation, session): if marker[0]: self.allocation.state = 'active' self.allocation.node_id = FAKE['node_uuid'] else: marker[0] = True mock_fetch.side_effect = _side_effect allocation = self.allocation.wait(self.session) self.assertIs(allocation, self.allocation) self.assertEqual(2, mock_fetch.call_count) def test_failure(self, mock_fetch): marker = [False] # mutable object to modify in the closure def _side_effect(allocation, session): if marker[0]: self.allocation.state = 'error' self.allocation.last_error = 'boom!' else: marker[0] = True mock_fetch.side_effect = _side_effect self.assertRaises( exceptions.ResourceFailure, self.allocation.wait, self.session ) self.assertEqual(2, mock_fetch.call_count) def test_failure_ignored(self, mock_fetch): marker = [False] # mutable object to modify in the closure def _side_effect(allocation, session): if marker[0]: self.allocation.state = 'error' self.allocation.last_error = 'boom!' else: marker[0] = True mock_fetch.side_effect = _side_effect allocation = self.allocation.wait(self.session, ignore_error=True) self.assertIs(allocation, self.allocation) self.assertEqual(2, mock_fetch.call_count) def test_timeout(self, mock_fetch): self.assertRaises( exceptions.ResourceTimeout, self.allocation.wait, self.session, timeout=0.001, ) mock_fetch.assert_called_with(self.allocation, self.session) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal/v1/test_chassis.py0000664000175000017500000000412300000000000025667 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal.v1 import chassis from openstack.tests.unit import base FAKE = { "created_at": "2016-08-18T22:28:48.165105+00:00", "description": "Sample chassis", "extra": {}, "links": [ {"href": "http://127.0.0.1:6385/v1/chassis/ID", "rel": "self"}, {"href": "http://127.0.0.1:6385/chassis/ID", "rel": "bookmark"}, ], "nodes": [ {"href": "http://127.0.0.1:6385/v1/chassis/ID/nodes", "rel": "self"}, {"href": "http://127.0.0.1:6385/chassis/ID/nodes", "rel": "bookmark"}, ], "updated_at": None, "uuid": "dff29d23-1ded-43b4-8ae1-5eebb3e30de1", } class TestChassis(base.TestCase): def test_basic(self): sot = chassis.Chassis() self.assertIsNone(sot.resource_key) self.assertEqual('chassis', sot.resources_key) self.assertEqual('/chassis', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) def test_instantiate(self): sot = chassis.Chassis(**FAKE) self.assertEqual(FAKE['uuid'], sot.id) self.assertEqual(FAKE['created_at'], sot.created_at) self.assertEqual(FAKE['description'], sot.description) self.assertEqual(FAKE['extra'], sot.extra) self.assertEqual(FAKE['links'], sot.links) self.assertEqual(FAKE['nodes'], sot.nodes) self.assertEqual(FAKE['updated_at'], sot.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal/v1/test_conductor.py0000664000175000017500000000410500000000000026232 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal.v1 import conductor from openstack.tests.unit import base FAKE = { "links": [ { "href": "http://127.0.0.1:6385/v1/conductors/compute2.localdomain", "rel": "self", }, { "href": "http://127.0.0.1:6385/conductors/compute2.localdomain", "rel": "bookmark", }, ], "created_at": "2018-12-05T07:03:19+00:00", "hostname": "compute2.localdomain", "conductor_group": "", "updated_at": "2018-12-05T07:03:21+00:00", "alive": True, "drivers": ["ipmi"], } class TestContainer(base.TestCase): def test_basic(self): sot = conductor.Conductor() self.assertIsNone(sot.resource_key) self.assertEqual('conductors', sot.resources_key) self.assertEqual('/conductors', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertFalse(sot.allow_patch) def test_instantiate(self): sot = conductor.Conductor(**FAKE) self.assertEqual(FAKE['created_at'], sot.created_at) self.assertEqual(FAKE['updated_at'], sot.updated_at) self.assertEqual(FAKE['hostname'], sot.hostname) self.assertEqual(FAKE['conductor_group'], sot.conductor_group) self.assertEqual(FAKE['alive'], sot.alive) self.assertEqual(FAKE['links'], sot.links) self.assertEqual(FAKE['drivers'], sot.drivers) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal/v1/test_deploy_templates.py0000664000175000017500000000465300000000000027614 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal.v1 import deploy_templates from openstack.tests.unit import base FAKE = { "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": """http://10.60.253.180:6385/v1/deploy_templates /bbb45f41-d4bc-4307-8d1d-32f95ce1e920""", "rel": "self", }, { "href": """http://10.60.253.180:6385/deploy_templates /bbb45f41-d4bc-4307-8d1d-32f95ce1e920""", "rel": "bookmark", }, ], "name": "CUSTOM_HYPERTHREADING_ON", "steps": [ { "args": { "settings": [{"name": "LogicalProc", "value": "Enabled"}] }, "interface": "bios", "priority": 150, "step": "apply_configuration", } ], "updated_at": None, "uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920", } class DeployTemplates(base.TestCase): def test_basic(self): sot = deploy_templates.DeployTemplate() self.assertIsNone(sot.resource_key) self.assertEqual('deploy_templates', sot.resources_key) self.assertEqual('/deploy_templates', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) def test_instantiate(self): sot = deploy_templates.DeployTemplate(**FAKE) self.assertEqual(FAKE['steps'], sot.steps) self.assertEqual(FAKE['created_at'], sot.created_at) self.assertEqual(FAKE['extra'], sot.extra) self.assertEqual(FAKE['links'], sot.links) self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['updated_at'], sot.updated_at) self.assertEqual(FAKE['uuid'], sot.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal/v1/test_driver.py0000664000175000017500000001212300000000000025524 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.baremetal.v1 import _common from openstack.baremetal.v1 import driver from openstack import exceptions from openstack.tests.unit import base FAKE = { "hosts": ["897ab1dad809"], "links": [ { "href": "http://127.0.0.1:6385/v1/drivers/agent_ipmitool", "rel": "self", }, { "href": "http://127.0.0.1:6385/drivers/agent_ipmitool", "rel": "bookmark", }, ], "name": "agent_ipmitool", "properties": [ { "href": "http://127.0.0.1:6385/v1/drivers/agent_ipmitool/properties", # noqa: E501 "rel": "self", }, { "href": "http://127.0.0.1:6385/drivers/agent_ipmitool/properties", "rel": "bookmark", }, ], } class TestDriver(base.TestCase): def test_basic(self): sot = driver.Driver() self.assertIsNone(sot.resource_key) self.assertEqual('drivers', sot.resources_key) self.assertEqual('/drivers', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_instantiate(self): sot = driver.Driver(**FAKE) self.assertEqual(FAKE['name'], sot.id) self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['hosts'], sot.hosts) self.assertEqual(FAKE['links'], sot.links) self.assertEqual(FAKE['properties'], sot.properties) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) def test_list_vendor_passthru(self): self.session = mock.Mock(spec=adapter.Adapter) sot = driver.Driver(**FAKE) fake_vendor_passthru_info = { 'fake_vendor_method': { 'async': True, 'attach': False, 'description': "Fake function that does nothing in background", 'http_methods': ['GET', 'PUT', 'POST', 'DELETE'], } } self.session.get.return_value.json.return_value = ( fake_vendor_passthru_info ) result = sot.list_vendor_passthru(self.session) self.session.get.assert_called_once_with( 'drivers/{driver_name}/vendor_passthru/methods'.format( driver_name=FAKE["name"] ), headers=mock.ANY, ) self.assertEqual(result, fake_vendor_passthru_info) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) def test_call_vendor_passthru(self): self.session = mock.Mock(spec=adapter.Adapter) sot = driver.Driver(**FAKE) # GET sot.call_vendor_passthru(self.session, 'GET', 'fake_vendor_method') self.session.get.assert_called_once_with( 'drivers/{}/vendor_passthru?method={}'.format( FAKE["name"], 'fake_vendor_method' ), json=None, headers=mock.ANY, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) # PUT sot.call_vendor_passthru( self.session, 'PUT', 'fake_vendor_method', body={"fake_param_key": "fake_param_value"}, ) self.session.put.assert_called_once_with( 'drivers/{}/vendor_passthru?method={}'.format( FAKE["name"], 'fake_vendor_method' ), json={"fake_param_key": "fake_param_value"}, headers=mock.ANY, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) # POST sot.call_vendor_passthru( self.session, 'POST', 'fake_vendor_method', body={"fake_param_key": "fake_param_value"}, ) self.session.post.assert_called_once_with( 'drivers/{}/vendor_passthru?method={}'.format( FAKE["name"], 'fake_vendor_method' ), json={"fake_param_key": "fake_param_value"}, headers=mock.ANY, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) # DELETE sot.call_vendor_passthru(self.session, 'DELETE', 'fake_vendor_method') self.session.delete.assert_called_once_with( 'drivers/{}/vendor_passthru?method={}'.format( FAKE["name"], 'fake_vendor_method' ), json=None, headers=mock.ANY, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal/v1/test_node.py0000664000175000017500000014337200000000000025171 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 from unittest import mock from keystoneauth1 import adapter from openstack.baremetal.v1 import _common from openstack.baremetal.v1 import node from openstack import exceptions from openstack import resource from openstack.tests.unit import base from openstack import utils # NOTE: Sample data from api-ref doc FAKE = { "automated_clean": False, "boot_mode": "uefi", "chassis_uuid": "1", # NOTE: missed in api-ref sample "clean_step": {}, "conductor_group": None, "console_enabled": False, "created_at": "2016-08-18T22:28:48.643434+00:00", "description": "A node.", "driver": "agent_ipmitool", "driver_info": {"ipmi_password": "******", "ipmi_username": "ADMIN"}, "driver_internal_info": {}, "extra": {}, "firmware_interface": None, "inspection_finished_at": None, "inspection_started_at": None, "instance_info": {}, "instance_uuid": None, "last_error": None, "lessee": None, "links": [ {"href": "http://127.0.0.1:6385/v1/nodes/", "rel": "self"}, {"href": "http://127.0.0.1:6385/nodes/", "rel": "bookmark"}, ], "maintenance": False, "maintenance_reason": None, "name": "test_node", "network_interface": "flat", "owner": "4b7ed919-e4a6-4017-a081-43205c5b0b73", "parent_node": None, "portgroups": [ { "href": "http://127.0.0.1:6385/v1/nodes//portgroups", "rel": "self", }, { "href": "http://127.0.0.1:6385/nodes//portgroups", "rel": "bookmark", }, ], "ports": [ { "href": "http://127.0.0.1:6385/v1/nodes//ports", "rel": "self", }, { "href": "http://127.0.0.1:6385/nodes//ports", "rel": "bookmark", }, ], "power_state": None, "properties": {}, "provision_state": "enroll", "provision_updated_at": None, "raid_config": {}, "reservation": None, "resource_class": None, "service_step": {}, "secure_boot": True, "shard": "TestShard", "states": [ { "href": "http://127.0.0.1:6385/v1/nodes//states", "rel": "self", }, { "href": "http://127.0.0.1:6385/nodes//states", "rel": "bookmark", }, ], "target_power_state": None, "target_provision_state": None, "target_raid_config": {}, "updated_at": None, "uuid": "6d85703a-565d-469a-96ce-30b6de53079d", } def _fake_assert(self, session, action, expected, error_message=None): return expected class TestNode(base.TestCase): def test_basic(self): sot = node.Node() self.assertIsNone(sot.resource_key) self.assertEqual('nodes', sot.resources_key) self.assertEqual('/nodes', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) def test_instantiate(self): sot = node.Node(**FAKE) self.assertEqual(FAKE['uuid'], sot.id) self.assertEqual(FAKE['name'], sot.name) self.assertEqual( FAKE['automated_clean'], sot.is_automated_clean_enabled ) self.assertEqual(FAKE['boot_mode'], sot.boot_mode) self.assertEqual(FAKE['chassis_uuid'], sot.chassis_id) self.assertEqual(FAKE['clean_step'], sot.clean_step) self.assertEqual(FAKE['conductor_group'], sot.conductor_group) self.assertEqual(FAKE['created_at'], sot.created_at) self.assertEqual(FAKE['description'], sot.description) self.assertEqual(FAKE['driver'], sot.driver) self.assertEqual(FAKE['driver_info'], sot.driver_info) self.assertEqual( FAKE['driver_internal_info'], sot.driver_internal_info ) self.assertEqual(FAKE['extra'], sot.extra) self.assertEqual(FAKE['firmware_interface'], sot.firmware_interface) self.assertEqual(FAKE['instance_info'], sot.instance_info) self.assertEqual(FAKE['instance_uuid'], sot.instance_id) self.assertEqual(FAKE['console_enabled'], sot.is_console_enabled) self.assertEqual(FAKE['maintenance'], sot.is_maintenance) self.assertEqual(FAKE['last_error'], sot.last_error) self.assertEqual(FAKE['lessee'], sot.lessee) self.assertEqual(FAKE['links'], sot.links) self.assertEqual(FAKE['maintenance_reason'], sot.maintenance_reason) self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['network_interface'], sot.network_interface) self.assertEqual(FAKE['owner'], sot.owner) self.assertEqual(FAKE['parent_node'], sot.parent_node) self.assertEqual(FAKE['ports'], sot.ports) self.assertEqual(FAKE['portgroups'], sot.port_groups) self.assertEqual(FAKE['power_state'], sot.power_state) self.assertEqual(FAKE['properties'], sot.properties) self.assertEqual(FAKE['provision_state'], sot.provision_state) self.assertEqual(FAKE['raid_config'], sot.raid_config) self.assertEqual(FAKE['reservation'], sot.reservation) self.assertEqual(FAKE['resource_class'], sot.resource_class) self.assertEqual(FAKE['service_step'], sot.service_step) self.assertEqual(FAKE['secure_boot'], sot.is_secure_boot) self.assertEqual(FAKE['states'], sot.states) self.assertEqual( FAKE['target_provision_state'], sot.target_provision_state ) self.assertEqual(FAKE['target_power_state'], sot.target_power_state) self.assertEqual(FAKE['target_raid_config'], sot.target_raid_config) self.assertEqual(FAKE['updated_at'], sot.updated_at) def test_normalize_provision_state(self): attrs = dict(FAKE, provision_state=None) sot = node.Node(**attrs) self.assertEqual('available', sot.provision_state) @mock.patch.object(node.Node, '_assert_microversion_for', _fake_assert) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) def test_list(self): self.node = node.Node() self.session = mock.Mock( spec=adapter.Adapter, default_microversion=None ) # Set a default, so we don't try and figure out the microversions # with additional requests. self.session.default_microversion = float(self.node._max_microversion) self.session.get.return_value.json.return_value = {'nodes': []} result = list( self.node.list( self.session, details=False, shard='meow', allow_unknown_params=True, ) ) self.assertEqual(0, len(result)) self.session.get.assert_called_once_with( '/nodes', headers={'Accept': 'application/json'}, params={'shard': 'meow'}, microversion=float(self.node._max_microversion), ) @mock.patch('time.sleep', lambda _t: None) @mock.patch.object(node.Node, 'fetch', autospec=True) class TestNodeWaitForProvisionState(base.TestCase): def setUp(self): super().setUp() self.node = node.Node(**FAKE) self.session = mock.Mock() def test_success(self, mock_fetch): def _get_side_effect(_self, session): self.node.provision_state = 'manageable' self.assertIs(session, self.session) mock_fetch.side_effect = _get_side_effect node = self.node.wait_for_provision_state(self.session, 'manageable') self.assertIs(node, self.node) def test_failure(self, mock_fetch): def _get_side_effect(_self, session): self.node.provision_state = 'deploy failed' self.assertIs(session, self.session) mock_fetch.side_effect = _get_side_effect self.assertRaisesRegex( exceptions.ResourceFailure, 'failure state "deploy failed"', self.node.wait_for_provision_state, self.session, 'manageable', ) def test_failure_error(self, mock_fetch): def _get_side_effect(_self, session): self.node.provision_state = 'error' self.assertIs(session, self.session) mock_fetch.side_effect = _get_side_effect self.assertRaisesRegex( exceptions.ResourceFailure, 'failure state "error"', self.node.wait_for_provision_state, self.session, 'manageable', ) def test_enroll_as_failure(self, mock_fetch): def _get_side_effect(_self, session): self.node.provision_state = 'enroll' self.node.last_error = 'power failure' self.assertIs(session, self.session) mock_fetch.side_effect = _get_side_effect self.assertRaisesRegex( exceptions.ResourceFailure, 'failed to verify management credentials', self.node.wait_for_provision_state, self.session, 'manageable', ) def test_timeout(self, mock_fetch): self.assertRaises( exceptions.ResourceTimeout, self.node.wait_for_provision_state, self.session, 'manageable', timeout=0.001, ) def test_not_abort_on_failed_state(self, mock_fetch): def _get_side_effect(_self, session): self.node.provision_state = 'deploy failed' self.assertIs(session, self.session) mock_fetch.side_effect = _get_side_effect self.assertRaises( exceptions.ResourceTimeout, self.node.wait_for_provision_state, self.session, 'manageable', timeout=0.001, abort_on_failed_state=False, ) @mock.patch.object(node.Node, '_assert_microversion_for', _fake_assert) @mock.patch.object(node.Node, 'fetch', lambda self, session: self) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) class TestNodeSetProvisionState(base.TestCase): def setUp(self): super().setUp() self.node = node.Node(**FAKE) self.session = mock.Mock( spec=adapter.Adapter, default_microversion=None ) def test_no_arguments(self): result = self.node.set_provision_state(self.session, 'active') self.assertIs(result, self.node) self.session.put.assert_called_once_with( 'nodes/%s/states/provision' % self.node.id, json={'target': 'active'}, headers=mock.ANY, microversion=None, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_manage(self): result = self.node.set_provision_state(self.session, 'manage') self.assertIs(result, self.node) self.session.put.assert_called_once_with( 'nodes/%s/states/provision' % self.node.id, json={'target': 'manage'}, headers=mock.ANY, microversion='1.4', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_deploy_with_configdrive(self): result = self.node.set_provision_state( self.session, 'active', config_drive='abcd' ) self.assertIs(result, self.node) self.session.put.assert_called_once_with( 'nodes/%s/states/provision' % self.node.id, json={'target': 'active', 'configdrive': 'abcd'}, headers=mock.ANY, microversion=None, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_deploy_with_configdrive_as_bytestring(self): config_drive = base64.b64encode(b'foo') result = self.node.set_provision_state( self.session, 'active', config_drive=config_drive ) self.assertIs(result, self.node) self.session.put.assert_called_once_with( 'nodes/%s/states/provision' % self.node.id, json={'target': 'active', 'configdrive': config_drive.decode()}, headers=mock.ANY, microversion=None, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_rebuild_with_configdrive(self): result = self.node.set_provision_state( self.session, 'rebuild', config_drive='abcd' ) self.assertIs(result, self.node) self.session.put.assert_called_once_with( 'nodes/%s/states/provision' % self.node.id, json={'target': 'rebuild', 'configdrive': 'abcd'}, headers=mock.ANY, microversion='1.35', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_configdrive_as_dict(self): for target in ('rebuild', 'active'): self.session.put.reset_mock() result = self.node.set_provision_state( self.session, target, config_drive={'user_data': 'abcd'} ) self.assertIs(result, self.node) self.session.put.assert_called_once_with( 'nodes/%s/states/provision' % self.node.id, json={'target': target, 'configdrive': {'user_data': 'abcd'}}, headers=mock.ANY, microversion='1.56', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_deploy_with_deploy_steps(self): deploy_steps = [{'interface': 'deploy', 'step': 'upgrade_fw'}] result = self.node.set_provision_state( self.session, 'active', deploy_steps=deploy_steps ) self.assertIs(result, self.node) self.session.put.assert_called_once_with( 'nodes/%s/states/provision' % self.node.id, json={'target': 'active', 'deploy_steps': deploy_steps}, headers=mock.ANY, microversion='1.69', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_rebuild_with_deploy_steps(self): deploy_steps = [{'interface': 'deploy', 'step': 'upgrade_fw'}] result = self.node.set_provision_state( self.session, 'rebuild', deploy_steps=deploy_steps ) self.assertIs(result, self.node) self.session.put.assert_called_once_with( 'nodes/%s/states/provision' % self.node.id, json={'target': 'rebuild', 'deploy_steps': deploy_steps}, headers=mock.ANY, microversion='1.69', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_set_provision_state_unhold(self): result = self.node.set_provision_state(self.session, 'unhold') self.assertIs(result, self.node) self.session.put.assert_called_once_with( 'nodes/%s/states/provision' % self.node.id, json={'target': 'unhold'}, headers=mock.ANY, microversion='1.85', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_set_provision_state_service(self): service_steps = [{'interface': 'deploy', 'step': 'hold'}] result = self.node.set_provision_state( self.session, 'service', service_steps=service_steps ) self.assertIs(result, self.node) self.session.put.assert_called_once_with( 'nodes/%s/states/provision' % self.node.id, json={'target': 'service', 'service_steps': service_steps}, headers=mock.ANY, microversion='1.87', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) @mock.patch.object(node.Node, '_translate_response', mock.Mock()) @mock.patch.object(node.Node, '_get_session', lambda self, x: x) @mock.patch.object(node.Node, 'set_provision_state', autospec=True) class TestNodeCreate(base.TestCase): def setUp(self): super().setUp() self.new_state = None self.session = mock.Mock(spec=adapter.Adapter) self.session.default_microversion = '1.1' self.node = node.Node(driver=FAKE['driver']) def _change_state(*args, **kwargs): self.node.provision_state = self.new_state self.session.post.side_effect = _change_state def test_available_old_version(self, mock_prov): self.node.provision_state = 'available' result = self.node.create(self.session) self.assertIs(result, self.node) self.session.post.assert_called_once_with( mock.ANY, json={'driver': FAKE['driver']}, headers=mock.ANY, microversion=self.session.default_microversion, params={}, ) self.assertFalse(mock_prov.called) def test_available_new_version(self, mock_prov): self.session.default_microversion = '1.11' self.node.provision_state = 'available' result = self.node.create(self.session) self.assertIs(result, self.node) self.session.post.assert_called_once_with( mock.ANY, json={'driver': FAKE['driver']}, headers=mock.ANY, microversion='1.10', params={}, ) mock_prov.assert_not_called() def test_no_enroll_in_old_version(self, mock_prov): self.node.provision_state = 'enroll' self.assertRaises( exceptions.NotSupported, self.node.create, self.session ) self.assertFalse(self.session.post.called) self.assertFalse(mock_prov.called) def test_enroll_new_version(self, mock_prov): self.session.default_microversion = '1.11' self.node.provision_state = 'enroll' self.new_state = 'enroll' result = self.node.create(self.session) self.assertIs(result, self.node) self.session.post.assert_called_once_with( mock.ANY, json={'driver': FAKE['driver']}, headers=mock.ANY, microversion=self.session.default_microversion, params={}, ) self.assertFalse(mock_prov.called) def test_no_manageable_in_old_version(self, mock_prov): self.node.provision_state = 'manageable' self.assertRaises( exceptions.NotSupported, self.node.create, self.session ) self.assertFalse(self.session.post.called) self.assertFalse(mock_prov.called) def test_manageable_old_version(self, mock_prov): self.session.default_microversion = '1.4' self.node.provision_state = 'manageable' self.new_state = 'available' result = self.node.create(self.session) self.assertIs(result, self.node) self.session.post.assert_called_once_with( mock.ANY, json={'driver': FAKE['driver']}, headers=mock.ANY, microversion=self.session.default_microversion, params={}, ) mock_prov.assert_called_once_with( self.node, self.session, 'manage', wait=True ) def test_manageable_new_version(self, mock_prov): self.session.default_microversion = '1.11' self.node.provision_state = 'manageable' self.new_state = 'enroll' result = self.node.create(self.session) self.assertIs(result, self.node) self.session.post.assert_called_once_with( mock.ANY, json={'driver': FAKE['driver']}, headers=mock.ANY, microversion=self.session.default_microversion, params={}, ) mock_prov.assert_called_once_with( self.node, self.session, 'manage', wait=True ) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) @mock.patch.object(node.Node, '_get_session', lambda self, x: x) class TestNodeVif(base.TestCase): def setUp(self): super().setUp() self.session = mock.Mock(spec=adapter.Adapter) self.session.default_microversion = '1.67' self.session.log = mock.Mock() self.node = node.Node( id='c29db401-b6a7-4530-af8e-20a720dee946', driver=FAKE['driver'] ) self.vif_id = '714bdf6d-2386-4b5e-bd0d-bc036f04b1ef' self.vif_port_uuid = 'port-uuid' self.vif_portgroup_uuid = 'portgroup-uuid' def test_attach_vif(self): self.assertIsNone(self.node.attach_vif(self.session, self.vif_id)) self.session.post.assert_called_once_with( 'nodes/%s/vifs' % self.node.id, json={'id': self.vif_id}, headers=mock.ANY, microversion='1.67', retriable_status_codes=[409, 503], ) def test_attach_vif_no_retries(self): self.assertIsNone( self.node.attach_vif( self.session, self.vif_id, retry_on_conflict=False ) ) self.session.post.assert_called_once_with( 'nodes/%s/vifs' % self.node.id, json={'id': self.vif_id}, headers=mock.ANY, microversion='1.67', retriable_status_codes=[503], ) def test_attach_vif_with_port_uuid(self): self.assertIsNone( self.node.attach_vif( self.session, self.vif_id, port_id=self.vif_port_uuid ) ) self.session.post.assert_called_once_with( 'nodes/%s/vifs' % self.node.id, json={'id': self.vif_id, 'port_uuid': self.vif_port_uuid}, headers=mock.ANY, microversion='1.67', retriable_status_codes=[409, 503], ) def test_attach_vif_with_portgroup_uuid(self): self.assertIsNone( self.node.attach_vif( self.session, self.vif_id, port_group_id=self.vif_portgroup_uuid, ) ) self.session.post.assert_called_once_with( 'nodes/%s/vifs' % self.node.id, json={ 'id': self.vif_id, 'portgroup_uuid': self.vif_portgroup_uuid, }, headers=mock.ANY, microversion='1.67', retriable_status_codes=[409, 503], ) def test_attach_vif_with_port_uuid_and_portgroup_uuid(self): self.assertRaises( exceptions.InvalidRequest, self.node.attach_vif, self.session, self.vif_id, port_id=self.vif_port_uuid, port_group_id=self.vif_portgroup_uuid, ) def test_detach_vif_existing(self): self.assertTrue(self.node.detach_vif(self.session, self.vif_id)) self.session.delete.assert_called_once_with( f'nodes/{self.node.id}/vifs/{self.vif_id}', headers=mock.ANY, microversion='1.67', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_detach_vif_missing(self): self.session.delete.return_value.status_code = 400 self.assertFalse(self.node.detach_vif(self.session, self.vif_id)) self.session.delete.assert_called_once_with( f'nodes/{self.node.id}/vifs/{self.vif_id}', headers=mock.ANY, microversion='1.67', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_list_vifs(self): self.session.get.return_value.json.return_value = { 'vifs': [ {'id': '1234'}, {'id': '5678'}, ] } res = self.node.list_vifs(self.session) self.assertEqual(['1234', '5678'], res) self.session.get.assert_called_once_with( 'nodes/%s/vifs' % self.node.id, headers=mock.ANY, microversion='1.67', ) def test_incompatible_microversion(self): self.session.default_microversion = '1.1' self.assertRaises( exceptions.NotSupported, self.node.attach_vif, self.session, self.vif_id, ) self.assertRaises( exceptions.NotSupported, self.node.detach_vif, self.session, self.vif_id, ) self.assertRaises( exceptions.NotSupported, self.node.list_vifs, self.session ) def test_incompatible_microversion_optional_params(self): self.session.default_microversion = '1.28' self.assertRaises( exceptions.NotSupported, self.node.attach_vif, self.session, self.vif_id, port_id=self.vif_port_uuid, ) self.assertRaises( exceptions.NotSupported, self.node.attach_vif, self.session, self.vif_id, port_group_id=self.vif_portgroup_uuid, ) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) @mock.patch.object(node.Node, '_get_session', lambda self, x: x) class TestNodeValidate(base.TestCase): def setUp(self): super().setUp() self.session = mock.Mock(spec=adapter.Adapter) self.session.default_microversion = '1.28' self.node = node.Node(**FAKE) def test_validate_ok(self): self.session.get.return_value.json.return_value = { 'boot': {'result': True}, 'console': {'result': False, 'reason': 'Not configured'}, 'deploy': {'result': True}, 'inspect': {'result': None, 'reason': 'Not supported'}, 'power': {'result': True}, } result = self.node.validate(self.session) for iface in ('boot', 'deploy', 'power'): self.assertTrue(result[iface].result) self.assertFalse(result[iface].reason) for iface in ('console', 'inspect'): self.assertIsNot(True, result[iface].result) self.assertTrue(result[iface].reason) def test_validate_failed(self): self.session.get.return_value.json.return_value = { 'boot': {'result': False}, 'console': {'result': False, 'reason': 'Not configured'}, 'deploy': {'result': False, 'reason': 'No deploy for you'}, 'inspect': {'result': None, 'reason': 'Not supported'}, 'power': {'result': True}, } self.assertRaisesRegex( exceptions.ValidationException, 'No deploy for you', self.node.validate, self.session, ) def test_validate_no_failure(self): self.session.get.return_value.json.return_value = { 'boot': {'result': False}, 'console': {'result': False, 'reason': 'Not configured'}, 'deploy': {'result': False, 'reason': 'No deploy for you'}, 'inspect': {'result': None, 'reason': 'Not supported'}, 'power': {'result': True}, } result = self.node.validate(self.session, required=None) self.assertTrue(result['power'].result) self.assertFalse(result['power'].reason) for iface in ('deploy', 'console', 'inspect'): self.assertIsNot(True, result[iface].result) self.assertTrue(result[iface].reason) # Reason can be empty self.assertFalse(result['boot'].result) self.assertIsNone(result['boot'].reason) @mock.patch('time.sleep', lambda _t: None) @mock.patch.object(node.Node, 'fetch', autospec=True) class TestNodeWaitForReservation(base.TestCase): def setUp(self): super().setUp() self.session = mock.Mock(spec=adapter.Adapter) self.session.default_microversion = '1.6' self.session.log = mock.Mock() self.node = node.Node(**FAKE) def test_no_reservation(self, mock_fetch): self.node.reservation = None node = self.node.wait_for_reservation(None) self.assertIs(node, self.node) self.assertFalse(mock_fetch.called) def test_reservation(self, mock_fetch): self.node.reservation = 'example.com' def _side_effect(node, session): if self.node.reservation == 'example.com': self.node.reservation = 'example2.com' else: self.node.reservation = None mock_fetch.side_effect = _side_effect node = self.node.wait_for_reservation(self.session) self.assertIs(node, self.node) self.assertEqual(2, mock_fetch.call_count) def test_timeout(self, mock_fetch): self.node.reservation = 'example.com' self.assertRaises( exceptions.ResourceTimeout, self.node.wait_for_reservation, self.session, timeout=0.001, ) mock_fetch.assert_called_with(self.node, self.session) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) class TestNodeInjectNMI(base.TestCase): def setUp(self): super().setUp() self.node = node.Node(**FAKE) self.session = mock.Mock(spec=adapter.Adapter) self.session.default_microversion = '1.29' self.node = node.Node(**FAKE) def test_inject_nmi(self): self.node.inject_nmi(self.session) self.session.put.assert_called_once_with( 'nodes/%s/management/inject_nmi' % FAKE['uuid'], json={}, headers=mock.ANY, microversion='1.29', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_incompatible_microversion(self): self.session.default_microversion = '1.28' self.assertRaises( exceptions.NotSupported, self.node.inject_nmi, self.session, ) @mock.patch.object(node.Node, '_assert_microversion_for', _fake_assert) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) class TestNodeSetPowerState(base.TestCase): def setUp(self): super().setUp() self.node = node.Node(**FAKE) self.session = mock.Mock( spec=adapter.Adapter, default_microversion=None ) def test_power_on(self): self.node.set_power_state(self.session, 'power on') self.session.put.assert_called_once_with( 'nodes/%s/states/power' % FAKE['uuid'], json={'target': 'power on'}, headers=mock.ANY, microversion=None, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_soft_power_on(self): self.node.set_power_state(self.session, 'soft power off') self.session.put.assert_called_once_with( 'nodes/%s/states/power' % FAKE['uuid'], json={'target': 'soft power off'}, headers=mock.ANY, microversion='1.27', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) @mock.patch.object(node.Node, '_translate_response', mock.Mock()) @mock.patch.object(node.Node, '_get_session', lambda self, x: x) class TestNodeMaintenance(base.TestCase): def setUp(self): super().setUp() self.node = node.Node.existing(**FAKE) self.session = mock.Mock( spec=adapter.Adapter, default_microversion='1.1', retriable_status_codes=None, ) def test_set(self): self.node.set_maintenance(self.session) self.session.put.assert_called_once_with( 'nodes/%s/maintenance' % self.node.id, json={'reason': None}, headers=mock.ANY, microversion=mock.ANY, ) def test_set_with_reason(self): self.node.set_maintenance(self.session, 'No work on Monday') self.session.put.assert_called_once_with( 'nodes/%s/maintenance' % self.node.id, json={'reason': 'No work on Monday'}, headers=mock.ANY, microversion=mock.ANY, ) def test_unset(self): self.node.unset_maintenance(self.session) self.session.delete.assert_called_once_with( 'nodes/%s/maintenance' % self.node.id, json=None, headers=mock.ANY, microversion=mock.ANY, ) def test_set_via_update(self): self.node.is_maintenance = True self.node.commit(self.session) self.session.put.assert_called_once_with( 'nodes/%s/maintenance' % self.node.id, json={'reason': None}, headers=mock.ANY, microversion=mock.ANY, ) self.assertFalse(self.session.patch.called) def test_set_with_reason_via_update(self): self.node.is_maintenance = True self.node.maintenance_reason = 'No work on Monday' self.node.commit(self.session) self.session.put.assert_called_once_with( 'nodes/%s/maintenance' % self.node.id, json={'reason': 'No work on Monday'}, headers=mock.ANY, microversion=mock.ANY, ) self.assertFalse(self.session.patch.called) def test_set_with_other_fields(self): self.node.is_maintenance = True self.node.name = 'lazy-3000' self.node.commit(self.session) self.session.put.assert_called_once_with( 'nodes/%s/maintenance' % self.node.id, json={'reason': None}, headers=mock.ANY, microversion=mock.ANY, ) self.session.patch.assert_called_once_with( 'nodes/%s' % self.node.id, json=[{'path': '/name', 'op': 'replace', 'value': 'lazy-3000'}], headers=mock.ANY, microversion=mock.ANY, ) def test_set_with_reason_and_other_fields(self): self.node.is_maintenance = True self.node.maintenance_reason = 'No work on Monday' self.node.name = 'lazy-3000' self.node.commit(self.session) self.session.put.assert_called_once_with( 'nodes/%s/maintenance' % self.node.id, json={'reason': 'No work on Monday'}, headers=mock.ANY, microversion=mock.ANY, ) self.session.patch.assert_called_once_with( 'nodes/%s' % self.node.id, json=[{'path': '/name', 'op': 'replace', 'value': 'lazy-3000'}], headers=mock.ANY, microversion=mock.ANY, ) def test_no_reason_without_maintenance(self): self.node.maintenance_reason = 'Can I?' self.assertRaises(ValueError, self.node.commit, self.session) self.assertFalse(self.session.put.called) self.assertFalse(self.session.patch.called) def test_set_unset_maintenance(self): self.node.is_maintenance = True self.node.maintenance_reason = 'No work on Monday' self.node.commit(self.session) self.session.put.assert_called_once_with( 'nodes/%s/maintenance' % self.node.id, json={'reason': 'No work on Monday'}, headers=mock.ANY, microversion=mock.ANY, ) self.node.is_maintenance = False self.node.commit(self.session) self.assertIsNone(self.node.maintenance_reason) self.session.delete.assert_called_once_with( 'nodes/%s/maintenance' % self.node.id, json=None, headers=mock.ANY, microversion=mock.ANY, ) @mock.patch.object(node.Node, 'fetch', lambda self, session: self) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) class TestNodeBootDevice(base.TestCase): def setUp(self): super().setUp() self.node = node.Node(**FAKE) self.session = mock.Mock( spec=adapter.Adapter, default_microversion='1.1' ) def test_get_boot_device(self): self.node.get_boot_device(self.session) self.session.get.assert_called_once_with( 'nodes/%s/management/boot_device' % self.node.id, headers=mock.ANY, microversion=mock.ANY, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_set_boot_device(self): self.node.set_boot_device(self.session, 'pxe', persistent=False) self.session.put.assert_called_once_with( 'nodes/%s/management/boot_device' % self.node.id, json={'boot_device': 'pxe', 'persistent': False}, headers=mock.ANY, microversion=mock.ANY, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_get_supported_boot_devices(self): self.node.get_supported_boot_devices(self.session) self.session.get.assert_called_once_with( 'nodes/%s/management/boot_device/supported' % self.node.id, headers=mock.ANY, microversion=mock.ANY, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) @mock.patch.object(utils, 'pick_microversion', lambda session, v: v) @mock.patch.object(node.Node, 'fetch', lambda self, session: self) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) class TestNodeSetBootMode(base.TestCase): def setUp(self): super().setUp() self.node = node.Node(**FAKE) self.session = mock.Mock( spec=adapter.Adapter, default_microversion='1.1' ) def test_node_set_boot_mode(self): self.node.set_boot_mode(self.session, 'uefi') self.session.put.assert_called_once_with( 'nodes/%s/states/boot_mode' % self.node.id, json={'target': 'uefi'}, headers=mock.ANY, microversion=mock.ANY, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_node_set_boot_mode_invalid_mode(self): self.assertRaises( ValueError, self.node.set_boot_mode, self.session, 'invalid-efi' ) @mock.patch.object(utils, 'pick_microversion', lambda session, v: v) @mock.patch.object(node.Node, 'fetch', lambda self, session: self) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) class TestNodeSetSecureBoot(base.TestCase): def setUp(self): super().setUp() self.node = node.Node(**FAKE) self.session = mock.Mock( spec=adapter.Adapter, default_microversion='1.1' ) def test_node_set_secure_boot(self): self.node.set_secure_boot(self.session, True) self.session.put.assert_called_once_with( 'nodes/%s/states/secure_boot' % self.node.id, json={'target': True}, headers=mock.ANY, microversion=mock.ANY, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_node_set_secure_boot_invalid_none(self): self.assertRaises( ValueError, self.node.set_secure_boot, self.session, None ) @mock.patch.object(utils, 'pick_microversion', lambda session, v: v) @mock.patch.object(node.Node, 'fetch', lambda self, session: self) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) class TestNodeTraits(base.TestCase): def setUp(self): super().setUp() self.node = node.Node(**FAKE) self.session = mock.Mock( spec=adapter.Adapter, default_microversion='1.37' ) self.session.log = mock.Mock() def test_node_add_trait(self): self.node.add_trait(self.session, 'CUSTOM_FAKE') self.session.put.assert_called_once_with( 'nodes/{}/traits/{}'.format(self.node.id, 'CUSTOM_FAKE'), json=None, headers=mock.ANY, microversion='1.37', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_remove_trait(self): self.assertTrue(self.node.remove_trait(self.session, 'CUSTOM_FAKE')) self.session.delete.assert_called_once_with( 'nodes/{}/traits/{}'.format(self.node.id, 'CUSTOM_FAKE'), headers=mock.ANY, microversion='1.37', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_remove_trait_missing(self): self.session.delete.return_value.status_code = 400 self.assertFalse( self.node.remove_trait(self.session, 'CUSTOM_MISSING') ) self.session.delete.assert_called_once_with( 'nodes/{}/traits/{}'.format(self.node.id, 'CUSTOM_MISSING'), headers=mock.ANY, microversion='1.37', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_set_traits(self): traits = ['CUSTOM_FAKE', 'CUSTOM_REAL', 'CUSTOM_MISSING'] self.node.set_traits(self.session, traits) self.session.put.assert_called_once_with( 'nodes/%s/traits' % self.node.id, json={'traits': ['CUSTOM_FAKE', 'CUSTOM_REAL', 'CUSTOM_MISSING']}, headers=mock.ANY, microversion='1.37', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) @mock.patch.object(node.Node, '_assert_microversion_for', _fake_assert) @mock.patch.object(resource.Resource, 'patch', autospec=True) class TestNodePatch(base.TestCase): def setUp(self): super().setUp() self.node = node.Node(**FAKE) self.session = mock.Mock( spec=adapter.Adapter, default_microversion=None ) self.session.log = mock.Mock() def test_node_patch(self, mock_patch): patch = {'path': 'test'} self.node.patch(self.session, patch=patch) mock_patch.assert_called_once() kwargs = mock_patch.call_args[1] self.assertEqual(kwargs['patch'], {'path': 'test'}) @mock.patch.object(resource.Resource, '_prepare_request', autospec=True) @mock.patch.object(resource.Resource, '_commit', autospec=True) def test_node_patch_reset_interfaces( self, mock__commit, mock_prepreq, mock_patch ): patch = {'path': 'test'} self.node.patch( self.session, patch=patch, retry_on_conflict=True, reset_interfaces=True, ) mock_prepreq.assert_called_once() prepreq_kwargs = mock_prepreq.call_args[1] self.assertEqual( prepreq_kwargs['params'], [('reset_interfaces', True)] ) mock__commit.assert_called_once() commit_args = mock__commit.call_args[0] commit_kwargs = mock__commit.call_args[1] self.assertIn('1.45', commit_args) self.assertEqual(commit_kwargs['retry_on_conflict'], True) mock_patch.assert_not_called() @mock.patch('time.sleep', lambda _t: None) @mock.patch.object(node.Node, 'fetch', autospec=True) class TestNodeWaitForPowerState(base.TestCase): def setUp(self): super().setUp() self.node = node.Node(**FAKE) self.session = mock.Mock() def test_success(self, mock_fetch): self.node.power_state = 'power on' def _get_side_effect(_self, session): self.node.power_state = 'power off' self.assertIs(session, self.session) mock_fetch.side_effect = _get_side_effect node = self.node.wait_for_power_state(self.session, 'power off') self.assertIs(node, self.node) def test_timeout(self, mock_fetch): self.node.power_state = 'power on' self.assertRaises( exceptions.ResourceTimeout, self.node.wait_for_power_state, self.session, 'power off', timeout=0.001, ) @mock.patch.object(utils, 'pick_microversion', lambda session, v: v) @mock.patch.object(node.Node, 'fetch', lambda self, session: self) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) class TestNodePassthru: def setUp(self): super().setUp() self.node = node.Node(**FAKE) self.session = node.Mock( spec=adapter.Adapter, default_microversion='1.37' ) self.session.log = mock.Mock() def test_get_passthru(self): self.node.call_vendor_passthru(self.session, "GET", "test_method") self.session.get.assert_called_once_with( 'nodes/%s/vendor_passthru?method=test_method' % self.node.id, headers=mock.ANY, microversion='1.37', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_post_passthru(self): self.node.call_vendor_passthru(self.session, "POST", "test_method") self.session.post.assert_called_once_with( 'nodes/%s/vendor_passthru?method=test_method' % self.node.id, headers=mock.ANY, microversion='1.37', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_put_passthru(self): self.node.call_vendor_passthru(self.session, "PUT", "test_method") self.session.put.assert_called_once_with( 'nodes/%s/vendor_passthru?method=test_method' % self.node.id, headers=mock.ANY, microversion='1.37', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_delete_passthru(self): self.node.call_vendor_passthru(self.session, "DELETE", "test_method") self.session.delete.assert_called_once_with( 'nodes/%s/vendor_passthru?method=test_method' % self.node.id, headers=mock.ANY, microversion='1.37', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_list_passthru(self): self.node.list_vendor_passthru(self.session) self.session.get.assert_called_once_with( 'nodes/%s/vendor_passthru/methods' % self.node.id, headers=mock.ANY, microversion='1.37', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) @mock.patch.object(node.Node, 'fetch', lambda self, session: self) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) class TestNodeConsole(base.TestCase): def setUp(self): super().setUp() self.node = node.Node(**FAKE) self.session = mock.Mock( spec=adapter.Adapter, default_microversion='1.1', ) def test_get_console(self): self.node.get_console(self.session) self.session.get.assert_called_once_with( 'nodes/%s/states/console' % self.node.id, headers=mock.ANY, microversion=mock.ANY, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_set_console_mode(self): self.node.set_console_mode(self.session, True) self.session.put.assert_called_once_with( 'nodes/%s/states/console' % self.node.id, json={'enabled': True}, headers=mock.ANY, microversion=mock.ANY, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) def test_set_console_mode_invalid_enabled(self): self.assertRaises( ValueError, self.node.set_console_mode, self.session, 'true', # not a bool ) @mock.patch.object(node.Node, 'fetch', lambda self, session: self) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) class TestNodeInventory(base.TestCase): def setUp(self): super().setUp() self.node = node.Node(**FAKE) self.session = mock.Mock( spec=adapter.Adapter, default_microversion='1.81', ) def test_get_inventory(self): node_inventory = { 'inventory': { 'memory': {'physical_mb': 3072}, 'cpu': { 'count': 1, 'model_name': 'qemu64', 'architecture': 'x86_64', }, 'disks': [{'name': 'testvm1.qcow2', 'size': 11811160064}], 'interfaces': [{'mac_address': '52:54:00:c7:02:45'}], 'system_vendor': { 'product_name': 'testvm1', 'manufacturer': 'Sushy Emulator', }, 'boot': {'current_boot_mode': 'uefi'}, }, 'plugin_data': {'fake_plugin_data'}, } self.session.get.return_value.json.return_value = node_inventory res = self.node.get_node_inventory(self.session, self.node.id) self.assertEqual(node_inventory, res) self.session.get.assert_called_once_with( 'nodes/%s/inventory' % self.node.id, headers=mock.ANY, microversion='1.81', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) @mock.patch.object(node.Node, 'fetch', lambda self, session: self) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) class TestNodeFirmware(base.TestCase): def setUp(self): super().setUp() self.node = node.Node(**FAKE) self.session = mock.Mock( spec=adapter.Adapter, default_microversion='1.86', ) def test_list_firmware(self): node_firmware = { "firmware": [ { "created_at": "2016-08-18T22:28:49.653974+00:00", "updated_at": "2016-08-18T22:28:49.653974+00:00", "component": "BMC", "initial_version": "v1.0.0", "current_version": "v1.2.0", "last_version_flashed": "v1.2.0", }, { "created_at": "2016-08-18T22:28:49.653974+00:00", "updated_at": "2016-08-18T22:28:49.653974+00:00", "component": "BIOS", "initial_version": "v1.0.0", "current_version": "v1.1.5", "last_version_flashed": "v1.1.5", }, ] } self.session.get.return_value.json.return_value = node_firmware res = self.node.list_firmware(self.session) self.assertEqual(node_firmware, res) self.session.get.assert_called_once_with( 'nodes/%s/firmware' % self.node.id, headers=mock.ANY, microversion='1.86', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal/v1/test_port.py0000664000175000017500000000525000000000000025220 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal.v1 import port from openstack.tests.unit import base FAKE = { "address": "11:11:11:11:11:11", "created_at": "2016-08-18T22:28:49.946416+00:00", "extra": {}, "internal_info": {}, "is_smartnic": True, "links": [ {"href": "http://127.0.0.1:6385/v1/ports/", "rel": "self"}, {"href": "http://127.0.0.1:6385/ports/", "rel": "bookmark"}, ], "local_link_connection": { "port_id": "Ethernet3/1", "switch_id": "0a:1b:2c:3d:4e:5f", "switch_info": "switch1", }, "name": "port_name", "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "portgroup_uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", "pxe_enabled": True, "updated_at": None, "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1", } class TestPort(base.TestCase): def test_basic(self): sot = port.Port() self.assertIsNone(sot.resource_key) self.assertEqual('ports', sot.resources_key) self.assertEqual('/ports', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) def test_instantiate(self): sot = port.PortDetail(**FAKE) self.assertEqual(FAKE['uuid'], sot.id) self.assertEqual(FAKE['address'], sot.address) self.assertEqual(FAKE['created_at'], sot.created_at) self.assertEqual(FAKE['extra'], sot.extra) self.assertEqual(FAKE['internal_info'], sot.internal_info) self.assertEqual(FAKE['is_smartnic'], sot.is_smartnic) self.assertEqual(FAKE['links'], sot.links) self.assertEqual( FAKE['local_link_connection'], sot.local_link_connection ) self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['node_uuid'], sot.node_id) self.assertEqual(FAKE['portgroup_uuid'], sot.port_group_id) self.assertEqual(FAKE['pxe_enabled'], sot.is_pxe_enabled) self.assertEqual(FAKE['updated_at'], sot.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal/v1/test_port_group.py0000664000175000017500000000526000000000000026435 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal.v1 import port_group from openstack.tests.unit import base FAKE = { "address": "11:11:11:11:11:11", "created_at": "2016-08-18T22:28:48.165105+00:00", "extra": {}, "internal_info": {}, "links": [ {"href": "http://127.0.0.1:6385/v1/portgroups/", "rel": "self"}, { "href": "http://127.0.0.1:6385/portgroups/", "rel": "bookmark", }, ], "name": "test_portgroup", "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "ports": [ { "href": "http://127.0.0.1:6385/v1/portgroups//ports", "rel": "self", }, { "href": "http://127.0.0.1:6385/portgroups//ports", "rel": "bookmark", }, ], "standalone_ports_supported": True, "updated_at": None, "uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", } class TestPortGroup(base.TestCase): def test_basic(self): sot = port_group.PortGroup() self.assertIsNone(sot.resource_key) self.assertEqual('portgroups', sot.resources_key) self.assertEqual('/portgroups', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) def test_instantiate(self): sot = port_group.PortGroup(**FAKE) self.assertEqual(FAKE['uuid'], sot.id) self.assertEqual(FAKE['address'], sot.address) self.assertEqual(FAKE['created_at'], sot.created_at) self.assertEqual(FAKE['extra'], sot.extra) self.assertEqual(FAKE['internal_info'], sot.internal_info) self.assertEqual(FAKE['links'], sot.links) self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['node_uuid'], sot.node_id) self.assertEqual(FAKE['ports'], sot.ports) self.assertEqual( FAKE['standalone_ports_supported'], sot.is_standalone_ports_supported, ) self.assertEqual(FAKE['updated_at'], sot.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal/v1/test_proxy.py0000664000175000017500000003730400000000000025422 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.baremetal.v1 import _proxy from openstack.baremetal.v1 import allocation from openstack.baremetal.v1 import chassis from openstack.baremetal.v1 import driver from openstack.baremetal.v1 import node from openstack.baremetal.v1 import port from openstack.baremetal.v1 import port_group from openstack.baremetal.v1 import volume_connector from openstack.baremetal.v1 import volume_target from openstack import exceptions from openstack.tests.unit import base from openstack.tests.unit import test_proxy_base _MOCK_METHOD = 'openstack.baremetal.v1._proxy.Proxy._get_with_fields' class TestBaremetalProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) class TestDrivers(TestBaremetalProxy): def test_drivers(self): self.verify_list(self.proxy.drivers, driver.Driver) def test_get_driver(self): self.verify_get(self.proxy.get_driver, driver.Driver) class TestChassis(TestBaremetalProxy): @mock.patch.object(chassis.Chassis, 'list') def test_chassis_detailed(self, mock_list): result = self.proxy.chassis(details=True, query=1) self.assertIs(result, mock_list.return_value) mock_list.assert_called_once_with(self.proxy, details=True, query=1) @mock.patch.object(chassis.Chassis, 'list') def test_chassis_not_detailed(self, mock_list): result = self.proxy.chassis(query=1) self.assertIs(result, mock_list.return_value) mock_list.assert_called_once_with(self.proxy, details=False, query=1) def test_create_chassis(self): self.verify_create(self.proxy.create_chassis, chassis.Chassis) def test_find_chassis(self): self.verify_find(self.proxy.find_chassis, chassis.Chassis) def test_get_chassis(self): self.verify_get( self.proxy.get_chassis, chassis.Chassis, mock_method=_MOCK_METHOD, expected_kwargs={'fields': None}, ) def test_update_chassis(self): self.verify_update(self.proxy.update_chassis, chassis.Chassis) def test_delete_chassis(self): self.verify_delete(self.proxy.delete_chassis, chassis.Chassis, False) def test_delete_chassis_ignore(self): self.verify_delete(self.proxy.delete_chassis, chassis.Chassis, True) class TestNode(TestBaremetalProxy): @mock.patch.object(node.Node, 'list') def test_nodes_detailed(self, mock_list): result = self.proxy.nodes(details=True, query=1) self.assertIs(result, mock_list.return_value) mock_list.assert_called_once_with(self.proxy, details=True, query=1) @mock.patch.object(node.Node, 'list') def test_nodes_not_detailed(self, mock_list): result = self.proxy.nodes(query=1) self.assertIs(result, mock_list.return_value) mock_list.assert_called_once_with(self.proxy, details=False, query=1) @mock.patch.object(node.Node, 'list') def test_nodes_sharded(self, mock_list): kwargs = {"shard": 'meow', "query": 1} result = self.proxy.nodes(fields=("uuid", "instance_uuid"), **kwargs) self.assertIs(result, mock_list.return_value) mock_list.assert_called_once_with( self.proxy, details=False, fields=('uuid', 'instance_uuid'), shard='meow', query=1, ) def test_create_node(self): self.verify_create(self.proxy.create_node, node.Node) def test_find_node(self): self.verify_find(self.proxy.find_node, node.Node) def test_get_node(self): self.verify_get( self.proxy.get_node, node.Node, mock_method=_MOCK_METHOD, expected_kwargs={'fields': None}, ) @mock.patch.object(node.Node, 'commit', autospec=True) def test_update_node(self, mock_commit): self.proxy.update_node('uuid', instance_id='new value') mock_commit.assert_called_once_with( mock.ANY, self.proxy, retry_on_conflict=True ) self.assertEqual('new value', mock_commit.call_args[0][0].instance_id) @mock.patch.object(node.Node, 'commit', autospec=True) def test_update_node_no_retries(self, mock_commit): self.proxy.update_node( 'uuid', instance_id='new value', retry_on_conflict=False ) mock_commit.assert_called_once_with( mock.ANY, self.proxy, retry_on_conflict=False ) self.assertEqual('new value', mock_commit.call_args[0][0].instance_id) def test_delete_node(self): self.verify_delete(self.proxy.delete_node, node.Node, False) def test_delete_node_ignore(self): self.verify_delete(self.proxy.delete_node, node.Node, True) class TestPort(TestBaremetalProxy): @mock.patch.object(port.Port, 'list') def test_ports_detailed(self, mock_list): result = self.proxy.ports(details=True, query=1) self.assertIs(result, mock_list.return_value) mock_list.assert_called_once_with(self.proxy, details=True, query=1) @mock.patch.object(port.Port, 'list') def test_ports_not_detailed(self, mock_list): result = self.proxy.ports(query=1) self.assertIs(result, mock_list.return_value) mock_list.assert_called_once_with(self.proxy, details=False, query=1) def test_create_port(self): self.verify_create(self.proxy.create_port, port.Port) def test_find_port(self): self.verify_find(self.proxy.find_port, port.Port) def test_get_port(self): self.verify_get( self.proxy.get_port, port.Port, mock_method=_MOCK_METHOD, expected_kwargs={'fields': None}, ) def test_update_port(self): self.verify_update(self.proxy.update_port, port.Port) def test_delete_port(self): self.verify_delete(self.proxy.delete_port, port.Port, False) def test_delete_port_ignore(self): self.verify_delete(self.proxy.delete_port, port.Port, True) class TestPortGroups(TestBaremetalProxy): @mock.patch.object(port_group.PortGroup, 'list') def test_port_groups_detailed(self, mock_list): result = self.proxy.port_groups(details=True, query=1) self.assertIs(result, mock_list.return_value) mock_list.assert_called_once_with(self.proxy, details=True, query=1) @mock.patch.object(port_group.PortGroup, 'list') def test_port_groups_not_detailed(self, mock_list): result = self.proxy.port_groups(query=1) self.assertIs(result, mock_list.return_value) mock_list.assert_called_once_with(self.proxy, details=False, query=1) def test_get_port_group(self): self.verify_get( self.proxy.get_port_group, port_group.PortGroup, mock_method=_MOCK_METHOD, expected_kwargs={'fields': None}, ) class TestAllocation(TestBaremetalProxy): def test_create_allocation(self): self.verify_create(self.proxy.create_allocation, allocation.Allocation) def test_get_allocation(self): self.verify_get( self.proxy.get_allocation, allocation.Allocation, mock_method=_MOCK_METHOD, expected_kwargs={'fields': None}, ) def test_delete_allocation(self): self.verify_delete( self.proxy.delete_allocation, allocation.Allocation, False ) def test_delete_allocation_ignore(self): self.verify_delete( self.proxy.delete_allocation, allocation.Allocation, True ) class TestVolumeConnector(TestBaremetalProxy): def test_create_volume_connector(self): self.verify_create( self.proxy.create_volume_connector, volume_connector.VolumeConnector, ) def test_find_volume_connector(self): self.verify_find( self.proxy.find_volume_connector, volume_connector.VolumeConnector ) def test_get_volume_connector(self): self.verify_get( self.proxy.get_volume_connector, volume_connector.VolumeConnector, mock_method=_MOCK_METHOD, expected_kwargs={'fields': None}, ) def test_delete_volume_connector(self): self.verify_delete( self.proxy.delete_volume_connector, volume_connector.VolumeConnector, False, ) def test_delete_volume_connector_ignore(self): self.verify_delete( self.proxy.delete_volume_connector, volume_connector.VolumeConnector, True, ) class TestVolumeTarget(TestBaremetalProxy): @mock.patch.object(volume_target.VolumeTarget, 'list') def test_volume_target_detailed(self, mock_list): result = self.proxy.volume_targets(details=True, query=1) self.assertIs(result, mock_list.return_value) mock_list.assert_called_once_with(self.proxy, detail=True, query=1) @mock.patch.object(volume_target.VolumeTarget, 'list') def test_volume_target_not_detailed(self, mock_list): result = self.proxy.volume_targets(query=1) self.assertIs(result, mock_list.return_value) mock_list.assert_called_once_with(self.proxy, query=1) def test_create_volume_target(self): self.verify_create( self.proxy.create_volume_target, volume_target.VolumeTarget ) def test_find_volume_target(self): self.verify_find( self.proxy.find_volume_target, volume_target.VolumeTarget ) def test_get_volume_target(self): self.verify_get( self.proxy.get_volume_target, volume_target.VolumeTarget, mock_method=_MOCK_METHOD, expected_kwargs={'fields': None}, ) def test_delete_volume_target(self): self.verify_delete( self.proxy.delete_volume_target, volume_target.VolumeTarget, False ) def test_delete_volume_target_ignore(self): self.verify_delete( self.proxy.delete_volume_target, volume_target.VolumeTarget, True ) class TestMisc(TestBaremetalProxy): @mock.patch.object(node.Node, 'fetch', autospec=True) def test__get_with_fields_none(self, mock_fetch): result = self.proxy._get_with_fields(node.Node, 'value') self.assertIs(result, mock_fetch.return_value) mock_fetch.assert_called_once_with( mock.ANY, self.proxy, error_message=mock.ANY ) @mock.patch.object(node.Node, 'fetch', autospec=True) def test__get_with_fields_node(self, mock_fetch): result = self.proxy._get_with_fields( # Mix of server-side and client-side fields node.Node, 'value', fields=['maintenance', 'id', 'instance_id'], ) self.assertIs(result, mock_fetch.return_value) mock_fetch.assert_called_once_with( mock.ANY, self.proxy, error_message=mock.ANY, # instance_id converted to server-side instance_uuid fields='maintenance,uuid,instance_uuid', ) @mock.patch.object(port.Port, 'fetch', autospec=True) def test__get_with_fields_port(self, mock_fetch): result = self.proxy._get_with_fields( port.Port, 'value', fields=['address', 'id', 'node_id'] ) self.assertIs(result, mock_fetch.return_value) mock_fetch.assert_called_once_with( mock.ANY, self.proxy, error_message=mock.ANY, # node_id converted to server-side node_uuid fields='address,uuid,node_uuid', ) @mock.patch('time.sleep', lambda _sec: None) @mock.patch.object(_proxy.Proxy, 'get_node', autospec=True) class TestWaitForNodesProvisionState(base.TestCase): def setUp(self): super().setUp() self.session = mock.Mock() self.proxy = _proxy.Proxy(self.session) def test_success(self, mock_get): # two attempts, one node succeeds after the 1st nodes = [mock.Mock(spec=node.Node, id=str(i)) for i in range(3)] for i, n in enumerate(nodes): # 1st attempt on 1st node, 2nd attempt on 2nd node n._check_state_reached.return_value = not (i % 2) mock_get.side_effect = nodes result = self.proxy.wait_for_nodes_provision_state( ['abcd', node.Node(id='1234')], 'fake state' ) self.assertEqual([nodes[0], nodes[2]], result) for n in nodes: n._check_state_reached.assert_called_once_with( self.proxy, 'fake state', True ) def test_success_no_fail(self, mock_get): # two attempts, one node succeeds after the 1st nodes = [mock.Mock(spec=node.Node, id=str(i)) for i in range(3)] for i, n in enumerate(nodes): # 1st attempt on 1st node, 2nd attempt on 2nd node n._check_state_reached.return_value = not (i % 2) mock_get.side_effect = nodes result = self.proxy.wait_for_nodes_provision_state( ['abcd', node.Node(id='1234')], 'fake state', fail=False ) self.assertEqual([nodes[0], nodes[2]], result.success) self.assertEqual([], result.failure) self.assertEqual([], result.timeout) for n in nodes: n._check_state_reached.assert_called_once_with( self.proxy, 'fake state', True ) def test_timeout(self, mock_get): mock_get.return_value._check_state_reached.return_value = False mock_get.return_value.id = '1234' self.assertRaises( exceptions.ResourceTimeout, self.proxy.wait_for_nodes_provision_state, ['abcd', node.Node(id='1234')], 'fake state', timeout=0.001, ) mock_get.return_value._check_state_reached.assert_called_with( self.proxy, 'fake state', True ) def test_timeout_no_fail(self, mock_get): mock_get.return_value._check_state_reached.return_value = False mock_get.return_value.id = '1234' result = self.proxy.wait_for_nodes_provision_state( ['abcd'], 'fake state', timeout=0.001, fail=False ) mock_get.return_value._check_state_reached.assert_called_with( self.proxy, 'fake state', True ) self.assertEqual([], result.success) self.assertEqual([mock_get.return_value], result.timeout) self.assertEqual([], result.failure) def test_timeout_and_failures_not_fail(self, mock_get): def _fake_get(_self, node): result = mock.Mock() result.id = getattr(node, 'id', node) if result.id == '1': result._check_state_reached.return_value = True elif result.id == '2': result._check_state_reached.side_effect = ( exceptions.ResourceFailure("boom") ) else: result._check_state_reached.return_value = False return result mock_get.side_effect = _fake_get result = self.proxy.wait_for_nodes_provision_state( ['1', '2', '3'], 'fake state', timeout=0.001, fail=False ) self.assertEqual(['1'], [x.id for x in result.success]) self.assertEqual(['3'], [x.id for x in result.timeout]) self.assertEqual(['2'], [x.id for x in result.failure]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal/v1/test_volume_connector.py0000664000175000017500000000433400000000000027617 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal.v1 import volume_connector from openstack.tests.unit import base FAKE = { "connector_id": "iqn.2017-07.org.openstack:01:d9a51732c3f", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/connector/", "rel": "self", }, { "href": "http://127.0.0.1:6385/volume/connector/", "rel": "bookmark", }, ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "type": "iqn", "updated_at": None, "uuid": "9bf93e01-d728-47a3-ad4b-5e66a835037c", } class TestVolumeconnector(base.TestCase): def test_basic(self): sot = volume_connector.VolumeConnector() self.assertIsNone(sot.resource_key) self.assertEqual('connectors', sot.resources_key) self.assertEqual('/volume/connectors', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) def test_instantiate(self): sot = volume_connector.VolumeConnector(**FAKE) self.assertEqual(FAKE['connector_id'], sot.connector_id) self.assertEqual(FAKE['created_at'], sot.created_at) self.assertEqual(FAKE['extra'], sot.extra) self.assertEqual(FAKE['links'], sot.links) self.assertEqual(FAKE['node_uuid'], sot.node_id) self.assertEqual(FAKE['type'], sot.type) self.assertEqual(FAKE['updated_at'], sot.updated_at) self.assertEqual(FAKE['uuid'], sot.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal/v1/test_volume_target.py0000664000175000017500000000455700000000000027122 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal.v1 import volume_target from openstack.tests.unit import base FAKE = { "boot_index": 0, "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/targets/", "rel": "self", }, { "href": "http://127.0.0.1:6385/volume/targets/", "rel": "bookmark", }, ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "properties": {}, "updated_at": None, "uuid": "bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "volume_id": "04452bed-5367-4202-8bf5-de4335ac56d2", "volume_type": "iscsi", } class TestVolumeTarget(base.TestCase): def test_basic(self): sot = volume_target.VolumeTarget() self.assertIsNone(sot.resource_key) self.assertEqual('targets', sot.resources_key) self.assertEqual('/volume/targets', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) def test_instantiate(self): sot = volume_target.VolumeTarget(**FAKE) self.assertEqual(FAKE['boot_index'], sot.boot_index) self.assertEqual(FAKE['created_at'], sot.created_at) self.assertEqual(FAKE['extra'], sot.extra) self.assertEqual(FAKE['links'], sot.links) self.assertEqual(FAKE['node_uuid'], sot.node_id) self.assertEqual(FAKE['properties'], sot.properties) self.assertEqual(FAKE['updated_at'], sot.updated_at) self.assertEqual(FAKE['uuid'], sot.id) self.assertEqual(FAKE['volume_id'], sot.volume_id) self.assertEqual(FAKE['volume_type'], sot.volume_type) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3973866 openstacksdk-4.0.0/openstack/tests/unit/baremetal_introspection/0000775000175000017500000000000000000000000025253 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal_introspection/__init__.py0000664000175000017500000000000000000000000027352 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3973866 openstacksdk-4.0.0/openstack/tests/unit/baremetal_introspection/v1/0000775000175000017500000000000000000000000025601 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal_introspection/v1/__init__.py0000664000175000017500000000000000000000000027700 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal_introspection/v1/test_introspection_rule.py0000664000175000017500000000500400000000000033140 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal_introspection.v1 import introspection_rule from openstack.tests.unit import base FAKE = { "actions": [ { "action": "set-attribute", "path": "driver_info/deploy_kernel", "value": "8fd65-c97b-4d00-aa8b-7ed166a60971", }, { "action": "set-attribute", "path": "driver_info/deploy_ramdisk", "value": "09e5420c-6932-4199-996e-9485c56b3394", }, ], "conditions": [ { "field": "node://driver_info.deploy_ramdisk", "invert": False, "multiple": "any", "op": "is-empty", }, { "field": "node://driver_info.deploy_kernel", "invert": False, "multiple": "any", "op": "is-empty", }, ], "description": "Set deploy info if not already set on node", "links": [ { "href": "/v1/rules/7459bf7c-9ff9-43a8-ba9f-48542ecda66c", "rel": "self", } ], "uuid": "7459bf7c-9ff9-43a8-ba9f-48542ecda66c", "scope": "", } class TestIntrospectionRule(base.TestCase): def test_basic(self): sot = introspection_rule.IntrospectionRule() self.assertIsNone(sot.resource_key) self.assertEqual('rules', sot.resources_key) self.assertEqual('/rules', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('POST', sot.create_method) def test_instantiate(self): sot = introspection_rule.IntrospectionRule(**FAKE) self.assertEqual(FAKE['conditions'], sot.conditions) self.assertEqual(FAKE['actions'], sot.actions) self.assertEqual(FAKE['description'], sot.description) self.assertEqual(FAKE['uuid'], sot.id) self.assertEqual(FAKE['scope'], sot.scope) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/baremetal_introspection/v1/test_proxy.py0000664000175000017500000002024500000000000030376 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.baremetal.v1 import node as _node from openstack.baremetal_introspection.v1 import _proxy from openstack.baremetal_introspection.v1 import introspection from openstack.baremetal_introspection.v1 import introspection_rule from openstack import exceptions from openstack.tests.unit import base from openstack.tests.unit import test_proxy_base @mock.patch.object(introspection.Introspection, 'create', autospec=True) class TestStartIntrospection(base.TestCase): def setUp(self): super().setUp() self.session = mock.Mock(spec=adapter.Adapter) self.proxy = _proxy.Proxy(self.session) def test_create_introspection(self, mock_create): self.proxy.start_introspection('abcd') mock_create.assert_called_once_with(mock.ANY, self.proxy) introspect = mock_create.call_args[0][0] self.assertEqual('abcd', introspect.id) def test_create_introspection_with_node(self, mock_create): self.proxy.start_introspection(_node.Node(id='abcd')) mock_create.assert_called_once_with(mock.ANY, self.proxy) introspect = mock_create.call_args[0][0] self.assertEqual('abcd', introspect.id) def test_create_introspection_manage_boot(self, mock_create): self.proxy.start_introspection('abcd', manage_boot=False) mock_create.assert_called_once_with( mock.ANY, self.proxy, manage_boot=False ) introspect = mock_create.call_args[0][0] self.assertEqual('abcd', introspect.id) class TestBaremetalIntrospectionProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) def test_get_introspection(self): self.verify_get( self.proxy.get_introspection, introspection.Introspection ) @mock.patch('time.sleep', lambda _sec: None) @mock.patch.object(introspection.Introspection, 'fetch', autospec=True) class TestWaitForIntrospection(base.TestCase): def setUp(self): super().setUp() self.session = mock.Mock(spec=adapter.Adapter) self.proxy = _proxy.Proxy(self.session) self.fake = {'state': 'waiting', 'error': None, 'finished': False} self.introspection = introspection.Introspection(**self.fake) def test_already_finished(self, mock_fetch): self.introspection.is_finished = True self.introspection.state = 'finished' result = self.proxy.wait_for_introspection(self.introspection) self.assertIs(result, self.introspection) self.assertFalse(mock_fetch.called) def test_wait(self, mock_fetch): marker = [False] # mutable object to modify in the closure def _side_effect(allocation, session): if marker[0]: self.introspection.state = 'finished' self.introspection.is_finished = True else: self.introspection.state = 'processing' marker[0] = True mock_fetch.side_effect = _side_effect result = self.proxy.wait_for_introspection(self.introspection) self.assertIs(result, self.introspection) self.assertEqual(2, mock_fetch.call_count) def test_timeout(self, mock_fetch): self.assertRaises( exceptions.ResourceTimeout, self.proxy.wait_for_introspection, self.introspection, timeout=0.001, ) mock_fetch.assert_called_with(self.introspection, self.proxy) def test_failure(self, mock_fetch): def _side_effect(allocation, session): self.introspection.state = 'error' self.introspection.is_finished = True self.introspection.error = 'boom' mock_fetch.side_effect = _side_effect self.assertRaisesRegex( exceptions.ResourceFailure, 'boom', self.proxy.wait_for_introspection, self.introspection, ) mock_fetch.assert_called_once_with(self.introspection, self.proxy) def test_failure_ignored(self, mock_fetch): def _side_effect(allocation, session): self.introspection.state = 'error' self.introspection.is_finished = True self.introspection.error = 'boom' mock_fetch.side_effect = _side_effect result = self.proxy.wait_for_introspection( self.introspection, ignore_error=True ) self.assertIs(result, self.introspection) mock_fetch.assert_called_once_with(self.introspection, self.proxy) @mock.patch.object(_proxy.Proxy, 'request', autospec=True) class TestAbortIntrospection(base.TestCase): def setUp(self): super().setUp() self.session = mock.Mock(spec=adapter.Adapter) self.proxy = _proxy.Proxy(self.session) self.fake = {'id': '1234', 'finished': False} self.introspection = introspection.Introspection(**self.fake) def test_abort(self, mock_request): mock_request.return_value.status_code = 202 self.proxy.abort_introspection(self.introspection) mock_request.assert_called_once_with( self.proxy, 'introspection/1234/abort', 'POST', headers=mock.ANY, microversion=mock.ANY, retriable_status_codes=[409, 503], ) @mock.patch.object(_proxy.Proxy, 'request', autospec=True) class TestGetData(base.TestCase): def setUp(self): super().setUp() self.session = mock.Mock(spec=adapter.Adapter) self.proxy = _proxy.Proxy(self.session) self.fake = {'id': '1234', 'finished': False} self.introspection = introspection.Introspection(**self.fake) def test_get_data(self, mock_request): mock_request.return_value.status_code = 200 data = self.proxy.get_introspection_data(self.introspection) mock_request.assert_called_once_with( self.proxy, 'introspection/1234/data', 'GET', headers=mock.ANY, microversion=mock.ANY, ) self.assertIs(data, mock_request.return_value.json.return_value) def test_get_unprocessed_data(self, mock_request): mock_request.return_value.status_code = 200 data = self.proxy.get_introspection_data( self.introspection, processed=False ) mock_request.assert_called_once_with( self.proxy, 'introspection/1234/data/unprocessed', 'GET', headers=mock.ANY, microversion='1.17', ) self.assertIs(data, mock_request.return_value.json.return_value) class TestIntrospectionRule(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) def test_introspection_rule_create(self): self.verify_create( self.proxy.create_introspection_rule, introspection_rule.IntrospectionRule, ) def test_introspection_rule_delete(self): self.verify_delete( self.proxy.delete_introspection_rule, introspection_rule.IntrospectionRule, False, ) def test_introspection_rule_delete_ignore(self): self.verify_delete( self.proxy.delete_introspection_rule, introspection_rule.IntrospectionRule, True, ) def test_introspection_rule_get(self): self.verify_get( self.proxy.get_introspection_rule, introspection_rule.IntrospectionRule, ) def test_introspection_rules(self): self.verify_list( self.proxy.introspection_rules, introspection_rule.IntrospectionRule, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/base.py0000664000175000017500000010437400000000000021634 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import os import tempfile import time import urllib import uuid import fixtures from keystoneauth1 import loading as ks_loading from oslo_config import cfg from requests import structures from requests_mock.contrib import fixture as rm_fixture import openstack.cloud import openstack.config as occ import openstack.connection from openstack.fixture import connection as os_fixture from openstack.tests import base from openstack.tests import fakes _ProjectData = collections.namedtuple( 'ProjectData', 'project_id, project_name, enabled, domain_id, description, ' 'parent_id, json_response, json_request', ) _UserData = collections.namedtuple( 'UserData', 'user_id, password, name, email, description, domain_id, enabled, ' 'json_response, json_request', ) _GroupData = collections.namedtuple( 'GroupData', 'group_id, group_name, domain_id, description, json_response, ' 'json_request', ) _DomainData = collections.namedtuple( 'DomainData', 'domain_id, domain_name, description, json_response, json_request', ) _ServiceData = collections.namedtuple( 'Servicedata', 'service_id, service_name, service_type, description, enabled, ' 'json_response_v3, json_response_v2, json_request', ) _EndpointDataV3 = collections.namedtuple( 'EndpointData', 'endpoint_id, service_id, interface, region_id, url, enabled, ' 'json_response, json_request', ) # NOTE(notmorgan): Shade does not support domain-specific roles # This should eventually be fixed if it becomes a main-stream feature. _RoleData = collections.namedtuple( 'RoleData', 'role_id, role_name, json_response, json_request' ) class TestCase(base.TestCase): strict_cloud = False def setUp(self, cloud_config_fixture='clouds.yaml'): """Run before each test method to initialize test environment.""" super().setUp() # Sleeps are for real testing, but unit tests shouldn't need them realsleep = time.sleep def _nosleep(seconds): return realsleep(seconds * 0.0001) self.sleep_fixture = self.useFixture( fixtures.MonkeyPatch('time.sleep', _nosleep) ) self.fixtures_directory = 'openstack/tests/unit/fixtures' self.os_fixture = self.useFixture( os_fixture.ConnectionFixture(project_id=fakes.PROJECT_ID) ) # Isolate openstack.config from test environment self.os_cloud_fixture = self.useFixture( fixtures.EnvironmentVariable('OS_CLOUD'), ) config = tempfile.NamedTemporaryFile(delete=False) cloud_path = os.path.join( self.fixtures_directory, 'clouds', cloud_config_fixture, ) with open(cloud_path, 'rb') as f: content = f.read() config.write(content) config.close() vendor = tempfile.NamedTemporaryFile(delete=False) vendor.write(b'{}') vendor.close() self.config = occ.OpenStackConfig( config_files=[config.name], vendor_files=[vendor.name], secure_files=['non-existant'], ) self.oslo_config_dict = { # All defaults for nova 'nova': {}, # monasca-api not in the service catalog 'monasca-api': {}, # Overrides for heat 'heat': { 'region_name': 'SpecialRegion', 'interface': 'internal', 'endpoint_override': 'https://example.org:8888/heat/v2', }, # test a service with dashes 'ironic_inspector': { 'endpoint_override': 'https://example.org:5050', }, } # FIXME(notmorgan): Convert the uri_registry, discovery.json, and # use of keystone_v3/v2 to a proper fixtures.Fixture. For now this # is acceptable, but eventually this should become it's own fixture # that encapsulates the registry, registering the URIs, and # assert_calls (and calling assert_calls every test case that uses # it on cleanup). Subclassing here could be 100% eliminated in the # future allowing any class to simply # self.useFixture(openstack.cloud.RequestsMockFixture) and get all # the benefits. # NOTE(notmorgan): use an ordered dict here to ensure we preserve the # order in which items are added to the uri_registry. This makes # the behavior more consistent when dealing with ensuring the # requests_mock uri/query_string matchers are ordered and parse the # request in the correct orders. self._uri_registry = collections.OrderedDict() self.discovery_json = os.path.join( self.fixtures_directory, 'discovery.json' ) self.use_keystone_v3() self.__register_uris_called = False def _load_ks_cfg_opts(self): conf = cfg.ConfigOpts() for group, opts in self.oslo_config_dict.items(): conf.register_group(cfg.OptGroup(group)) if opts is not None: ks_loading.register_adapter_conf_options(conf, group) for name, val in opts.items(): conf.set_override(name, val, group=group) return conf # TODO(shade) Update this to handle service type aliases def get_mock_url( self, service_type, interface='public', resource=None, append=None, base_url_append=None, qs_elements=None, ): endpoint_url = self.cloud.endpoint_for( service_type=service_type, interface=interface ) # Strip trailing slashes, so as not to produce double-slashes below if endpoint_url.endswith('/'): endpoint_url = endpoint_url[:-1] to_join = [endpoint_url] qs = '' if base_url_append: to_join.append(base_url_append) if resource: to_join.append(resource) if append: to_join.extend([urllib.parse.quote(i) for i in append]) if qs_elements is not None: qs = '?%s' % '&'.join(qs_elements) return '{uri}{qs}'.format(uri='/'.join(to_join), qs=qs) def mock_for_keystone_projects( self, project=None, v3=True, list_get=False, id_get=False, project_list=None, project_count=None, ): if project: assert not (project_list or project_count) elif project_list: assert not (project or project_count) elif project_count: assert not (project or project_list) else: raise Exception( 'Must specify a project, project_list, or project_count' ) assert list_get or id_get base_url_append = 'v3' if v3 else None if project: project_list = [project] elif project_count: # Generate multiple projects project_list = [ self._get_project_data(v3=v3) for c in range(0, project_count) ] uri_mock_list = [] if list_get: uri_mock_list.append( dict( method='GET', uri=self.get_mock_url( service_type='identity', interface='admin', resource='projects', base_url_append=base_url_append, ), status_code=200, json={ 'projects': [ p.json_response['project'] for p in project_list ] }, ) ) if id_get: for p in project_list: uri_mock_list.append( dict( method='GET', uri=self.get_mock_url( service_type='identity', interface='admin', resource='projects', append=[p.project_id], base_url_append=base_url_append, ), status_code=200, json=p.json_response, ) ) self.__do_register_uris(uri_mock_list) return project_list def _get_project_data( self, project_name=None, enabled=None, domain_id=None, description=None, v3=True, project_id=None, parent_id=None, ): project_name = project_name or self.getUniqueString('projectName') project_id = uuid.UUID(project_id or uuid.uuid4().hex).hex if parent_id: parent_id = uuid.UUID(parent_id).hex response = {'id': project_id, 'name': project_name} request = {'name': project_name} domain_id = (domain_id or uuid.uuid4().hex) if v3 else None if domain_id: request['domain_id'] = domain_id response['domain_id'] = domain_id if enabled is not None: enabled = bool(enabled) response['enabled'] = enabled request['enabled'] = enabled if parent_id: request['parent_id'] = parent_id response['parent_id'] = parent_id response.setdefault('enabled', True) request.setdefault('enabled', True) if description: response['description'] = description request['description'] = description request.setdefault('description', None) return _ProjectData( project_id, project_name, enabled, domain_id, description, parent_id, {'project': response}, {'project': request}, ) def _get_group_data(self, name=None, domain_id=None, description=None): group_id = uuid.uuid4().hex name = name or self.getUniqueString('groupname') domain_id = uuid.UUID(domain_id or uuid.uuid4().hex).hex response = {'id': group_id, 'name': name, 'domain_id': domain_id} request = {'name': name, 'domain_id': domain_id} if description is not None: response['description'] = description request['description'] = description return _GroupData( group_id, name, domain_id, description, {'group': response}, {'group': request}, ) def _get_user_data(self, name=None, password=None, **kwargs): name = name or self.getUniqueString('username') password = password or self.getUniqueString('user_password') user_id = uuid.uuid4().hex response = {'name': name, 'id': user_id} request = {'name': name, 'password': password} if kwargs.get('domain_id'): kwargs['domain_id'] = uuid.UUID(kwargs['domain_id']).hex response['domain_id'] = kwargs.pop('domain_id') request['domain_id'] = response['domain_id'] response['email'] = kwargs.pop('email', None) request['email'] = response['email'] response['enabled'] = kwargs.pop('enabled', True) request['enabled'] = response['enabled'] response['description'] = kwargs.pop('description', None) if response['description']: request['description'] = response['description'] self.assertIs( 0, len(kwargs), message='extra key-word args received on _get_user_data', ) return _UserData( user_id, password, name, response['email'], response['description'], response.get('domain_id'), response.get('enabled'), {'user': response}, {'user': request}, ) def _get_domain_data( self, domain_name=None, description=None, enabled=None ): domain_id = uuid.uuid4().hex domain_name = domain_name or self.getUniqueString('domainName') response = {'id': domain_id, 'name': domain_name} request = {'name': domain_name} if enabled is not None: request['enabled'] = bool(enabled) response['enabled'] = bool(enabled) if description: response['description'] = description request['description'] = description response.setdefault('enabled', True) return _DomainData( domain_id, domain_name, description, {'domain': response}, {'domain': request}, ) def _get_service_data( self, type=None, name=None, description=None, enabled=True ): service_id = uuid.uuid4().hex name = name or uuid.uuid4().hex type = type or uuid.uuid4().hex response = { 'id': service_id, 'name': name, 'type': type, 'enabled': enabled, } if description is not None: response['description'] = description request = response.copy() request.pop('id') return _ServiceData( service_id, name, type, description, enabled, {'service': response}, {'OS-KSADM:service': response}, request, ) def _get_endpoint_v3_data( self, service_id=None, region=None, url=None, interface=None, enabled=True, ): endpoint_id = uuid.uuid4().hex service_id = service_id or uuid.uuid4().hex region = region or uuid.uuid4().hex url = url or 'https://example.com/' interface = interface or uuid.uuid4().hex response = { 'id': endpoint_id, 'service_id': service_id, 'region_id': region, 'interface': interface, 'url': url, 'enabled': enabled, } request = response.copy() request.pop('id') return _EndpointDataV3( endpoint_id, service_id, interface, region, url, enabled, {'endpoint': response}, {'endpoint': request}, ) def _get_role_data(self, role_name=None): role_id = uuid.uuid4().hex role_name = role_name or uuid.uuid4().hex request = {'name': role_name} response = request.copy() response['id'] = role_id return _RoleData( role_id, role_name, {'role': response}, {'role': request} ) def use_broken_keystone(self): self.adapter = self.useFixture(rm_fixture.Fixture()) self.calls = [] self._uri_registry.clear() self.__do_register_uris( [ dict( method='GET', uri='https://identity.example.com/', text=open(self.discovery_json).read(), ), dict( method='POST', uri='https://identity.example.com/v3/auth/tokens', status_code=400, ), ] ) self._make_test_cloud(identity_api_version='3') def use_nothing(self): self.calls = [] self._uri_registry.clear() def get_keystone_v3_token( self, project_name='admin', ): return dict( method='POST', uri='https://identity.example.com/v3/auth/tokens', headers={'X-Subject-Token': self.getUniqueString('KeystoneToken')}, json=self.os_fixture.v3_token, validate=dict( json={ 'auth': { 'identity': { 'methods': ['password'], 'password': { 'user': { 'domain': { 'name': 'default', }, 'name': 'admin', 'password': 'password', } }, }, 'scope': { 'project': { 'domain': {'name': 'default'}, 'name': project_name, } }, } } ), ) def get_keystone_discovery(self): with open(self.discovery_json) as discovery_file: return dict( method='GET', uri='https://identity.example.com/', text=discovery_file.read(), ) def use_keystone_v3(self): self.adapter = self.useFixture(rm_fixture.Fixture()) self.calls = [] self._uri_registry.clear() self.__do_register_uris( [ self.get_keystone_discovery(), self.get_keystone_v3_token(), ] ) self._make_test_cloud(identity_api_version='3') def use_keystone_v2(self): self.adapter = self.useFixture(rm_fixture.Fixture()) self.calls = [] self._uri_registry.clear() self.__do_register_uris( [ self.get_keystone_discovery(), dict( method='POST', uri='https://identity.example.com/v2.0/tokens', json=self.os_fixture.v2_token, ), ] ) self._make_test_cloud( cloud_name='_test_cloud_v2_', identity_api_version='2.0' ) def _make_test_cloud(self, cloud_name='_test_cloud_', **kwargs): test_cloud = os.environ.get('OPENSTACKSDK_OS_CLOUD', cloud_name) self.cloud_config = self.config.get_one( cloud=test_cloud, validate=True, **kwargs ) self.cloud = openstack.connection.Connection( config=self.cloud_config, strict=self.strict_cloud ) def get_cinder_discovery_mock_dict( self, block_storage_version_json='block-storage-version.json', block_storage_discovery_url='https://block-storage.example.com/', ): discovery_fixture = os.path.join( self.fixtures_directory, block_storage_version_json ) return dict( method='GET', uri=block_storage_discovery_url, text=open(discovery_fixture).read(), ) def get_glance_discovery_mock_dict( self, image_version_json='image-version.json', image_discovery_url='https://image.example.com/', ): discovery_fixture = os.path.join( self.fixtures_directory, image_version_json ) return dict( method='GET', uri=image_discovery_url, status_code=300, text=open(discovery_fixture).read(), ) def get_nova_discovery_mock_dict( self, compute_version_json='compute-version.json', compute_discovery_url='https://compute.example.com/v2.1/', ): discovery_fixture = os.path.join( self.fixtures_directory, compute_version_json ) return dict( method='GET', uri=compute_discovery_url, text=open(discovery_fixture).read(), ) def get_placement_discovery_mock_dict( self, discovery_fixture='placement.json' ): discovery_fixture = os.path.join( self.fixtures_directory, discovery_fixture ) return dict( method='GET', uri="https://placement.example.com/", text=open(discovery_fixture).read(), ) def get_designate_discovery_mock_dict(self): discovery_fixture = os.path.join(self.fixtures_directory, "dns.json") return dict( method='GET', uri="https://dns.example.com/", text=open(discovery_fixture).read(), ) def get_ironic_discovery_mock_dict(self): discovery_fixture = os.path.join( self.fixtures_directory, "baremetal.json" ) return dict( method='GET', uri="https://baremetal.example.com/", text=open(discovery_fixture).read(), ) def get_senlin_discovery_mock_dict(self): discovery_fixture = os.path.join( self.fixtures_directory, "clustering.json" ) return dict( method='GET', uri="https://clustering.example.com/", text=open(discovery_fixture).read(), ) def use_compute_discovery( self, compute_version_json='compute-version.json', compute_discovery_url='https://compute.example.com/v2.1/', ): self.__do_register_uris( [ self.get_nova_discovery_mock_dict( compute_version_json, compute_discovery_url ), ] ) def get_cyborg_discovery_mock_dict(self): discovery_fixture = os.path.join( self.fixtures_directory, "accelerator.json" ) return dict( method='GET', uri="https://accelerator.example.com/", text=open(discovery_fixture).read(), ) def get_manila_discovery_mock_dict(self): discovery_fixture = os.path.join( self.fixtures_directory, "shared-file-system.json" ) return dict( method='GET', uri="https://shared-file-system.example.com/", text=open(discovery_fixture).read(), ) def use_glance( self, image_version_json='image-version.json', image_discovery_url='https://image.example.com/', ): # NOTE(notmorgan): This method is only meant to be used in "setUp" # where the ordering of the url being registered is tightly controlled # if the functionality of .use_glance is meant to be used during an # actual test case, use .get_glance_discovery_mock and apply to the # right location in the mock_uris when calling .register_uris self.__do_register_uris( [ self.get_glance_discovery_mock_dict( image_version_json, image_discovery_url ) ] ) def use_cinder(self): self.__do_register_uris([self.get_cinder_discovery_mock_dict()]) def use_placement(self, **kwargs): self.__do_register_uris( [self.get_placement_discovery_mock_dict(**kwargs)] ) def use_designate(self): # NOTE(slaweq): This method is only meant to be used in "setUp" # where the ordering of the url being registered is tightly controlled # if the functionality of .use_designate is meant to be used during an # actual test case, use .get_designate_discovery_mock and apply to the # right location in the mock_uris when calling .register_uris self.__do_register_uris([self.get_designate_discovery_mock_dict()]) def use_ironic(self): # NOTE(TheJulia): This method is only meant to be used in "setUp" # where the ordering of the url being registered is tightly controlled # if the functionality of .use_ironic is meant to be used during an # actual test case, use .get_ironic_discovery_mock and apply to the # right location in the mock_uris when calling .register_uris self.__do_register_uris([self.get_ironic_discovery_mock_dict()]) def use_senlin(self): # NOTE(elachance): This method is only meant to be used in "setUp" # where the ordering of the url being registered is tightly controlled # if the functionality of .use_senlin is meant to be used during an # actual test case, use .get_senlin_discovery_mock and apply to the # right location in the mock_uris when calling .register_uris self.__do_register_uris([self.get_senlin_discovery_mock_dict()]) def use_cyborg(self): # NOTE(s_shogo): This method is only meant to be used in "setUp" # where the ordering of the url being registered is tightly controlled # if the functionality of .use_cyborg is meant to be used during an # actual test case, use .get_cyborg_discovery_mock and apply to the # right location in the mock_uris when calling .register_uris self.__do_register_uris([self.get_cyborg_discovery_mock_dict()]) def use_manila(self): # NOTE(gouthamr): This method is only meant to be used in "setUp" # where the ordering of the url being registered is tightly controlled # if the functionality of .use_manila is meant to be used during an # actual test case, use .get_manila_discovery_mock and apply to the # right location in the mock_uris when calling .register_uris self.__do_register_uris([self.get_manila_discovery_mock_dict()]) def register_uris(self, uri_mock_list=None): """Mock a list of URIs and responses via requests mock. This method may be called only once per test-case to avoid odd and difficult to debug interactions. Discovery and Auth request mocking happens separately from this method. :param uri_mock_list: List of dictionaries that template out what is passed to requests_mock fixture's `register_uri`. Format is: {'method': , 'uri': , ... } Common keys to pass in the dictionary: * json: the json response (dict) * status_code: the HTTP status (int) * validate: The request body (dict) to validate with assert_calls all key-word arguments that are valid to send to requests_mock are supported. This list should be in the order in which calls are made. When `assert_calls` is executed, order here will be validated. Duplicate URIs and Methods are allowed and will be collapsed into a single matcher. Each response will be returned in order as the URI+Method is hit. :type uri_mock_list: list :return: None """ assert not self.__register_uris_called self.__do_register_uris(uri_mock_list or []) self.__register_uris_called = True def __do_register_uris(self, uri_mock_list=None): for to_mock in uri_mock_list: kw_params = { k: to_mock.pop(k) for k in ('request_headers', 'complete_qs', '_real_http') if k in to_mock } method = to_mock.pop('method') uri = to_mock.pop('uri') # NOTE(notmorgan): make sure the delimiter is non-url-safe, in this # case "|" is used so that the split can be a bit easier on # maintainers of this code. key = '{method}|{uri}|{params}'.format( method=method, uri=uri, params=kw_params ) validate = to_mock.pop('validate', {}) valid_keys = {'json', 'headers', 'params', 'data'} invalid_keys = set(validate.keys()) - valid_keys if invalid_keys: raise TypeError( "Invalid values passed to validate: {keys}".format( keys=invalid_keys ) ) headers = structures.CaseInsensitiveDict( to_mock.pop('headers', {}) ) if 'content-type' not in headers: headers['content-type'] = 'application/json' if 'exc' not in to_mock: to_mock['headers'] = headers self.calls += [dict(method=method, url=uri, **validate)] self._uri_registry.setdefault( key, {'response_list': [], 'kw_params': kw_params} ) if self._uri_registry[key]['kw_params'] != kw_params: raise AssertionError( 'PROGRAMMING ERROR: key-word-params ' 'should be part of the uri_key and cannot change, ' 'it will affect the matcher in requests_mock. ' '%(old)r != %(new)r' % { 'old': self._uri_registry[key]['kw_params'], 'new': kw_params, } ) self._uri_registry[key]['response_list'].append(to_mock) for mocked, params in self._uri_registry.items(): mock_method, mock_uri, _ignored = mocked.split('|', 2) self.adapter.register_uri( mock_method, mock_uri, params['response_list'], **params['kw_params'], ) def assert_no_calls(self): # TODO(mordred) For now, creating the adapter for self.conn is # triggering catalog lookups. Make sure no_calls is only 2. # When we can make that on-demand through a descriptor object, # drop this to 0. self.assertEqual(2, len(self.adapter.request_history)) def assert_calls(self, stop_after=None, do_count=True): for x, (call, history) in enumerate( zip(self.calls, self.adapter.request_history) ): if stop_after and x > stop_after: break call_uri_parts = urllib.parse.urlparse(call['url']) history_uri_parts = urllib.parse.urlparse(history.url) self.assertEqual( ( call['method'], call_uri_parts.scheme, call_uri_parts.netloc, call_uri_parts.path, call_uri_parts.params, urllib.parse.parse_qs(call_uri_parts.query), ), ( history.method, history_uri_parts.scheme, history_uri_parts.netloc, history_uri_parts.path, history_uri_parts.params, urllib.parse.parse_qs(history_uri_parts.query), ), ( 'REST mismatch on call %(index)d. Expected %(call)r. ' 'Got %(history)r). ' 'NOTE: query string order differences wont cause mismatch' % { 'index': x, 'call': '{method} {url}'.format( method=call['method'], url=call['url'] ), 'history': '{method} {url}'.format( method=history.method, url=history.url ), } ), ) if 'json' in call: self.assertEqual( call['json'], history.json(), f'json content mismatch in call {x}', ) # headers in a call isn't exhaustive - it's checking to make sure # a specific header or headers are there, not that they are the # only headers if 'headers' in call: for key, value in call['headers'].items(): self.assertEqual( value, history.headers[key], f'header mismatch in call {x}', ) if do_count: self.assertEqual( len(self.calls), len(self.adapter.request_history), "Expected:\n{}'\nGot:\n{}".format( '\n'.join([c['url'] for c in self.calls]), '\n'.join([h.url for h in self.adapter.request_history]), ), ) def assertResourceEqual(self, actual, expected, resource_type): """Helper for the assertEqual which compares Resource object against dictionary representing expected state. :param Resource actual: actual object. :param dict expected: dictionary representing expected object. :param class resource_type: class type to be applied for the expected resource. """ return self.assertEqual( resource_type(**expected).to_dict(computed=False), actual.to_dict(computed=False), ) def assertResourceListEqual(self, actual, expected, resource_type): """Helper for the assertEqual which compares Resource lists object against dictionary representing expected state. :param list actual: List of actual objects. :param listexpected: List of dictionaries representing expected objects. :param class resource_type: class type to be applied for the expected resource. """ self.assertEqual( [resource_type(**f).to_dict(computed=False) for f in expected], [f.to_dict(computed=False) for f in actual], ) class IronicTestCase(TestCase): def setUp(self): super().setUp() self.use_ironic() self.uuid = str(uuid.uuid4()) self.name = self.getUniqueString('name') def get_mock_url(self, **kwargs): kwargs.setdefault('service_type', 'baremetal') kwargs.setdefault('interface', 'public') kwargs.setdefault('base_url_append', 'v1') return super().get_mock_url(**kwargs) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.3973866 openstacksdk-4.0.0/openstack/tests/unit/block_storage/0000775000175000017500000000000000000000000023155 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/__init__.py0000664000175000017500000000000000000000000025254 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4013886 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v2/0000775000175000017500000000000000000000000023504 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v2/__init__.py0000664000175000017500000000000000000000000025603 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v2/test_backup.py0000664000175000017500000001420600000000000026365 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.block_storage.v2 import backup from openstack import exceptions from openstack.tests.unit import base FAKE_ID = "6685584b-1eac-4da6-b5c3-555430cf68ff" BACKUP = { "availability_zone": "az1", "container": "volumebackups", "created_at": "2018-04-02T10:35:27.000000", "updated_at": "2018-04-03T10:35:27.000000", "description": 'description', "fail_reason": 'fail reason', "id": FAKE_ID, "name": "backup001", "object_count": 22, "size": 1, "status": "available", "volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6", "is_incremental": True, "has_dependent_backups": False, } class TestBackup(base.TestCase): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) self.resp.headers = {} self.resp.status_code = 202 self.sess = mock.Mock(spec=adapter.Adapter) self.sess.get = mock.Mock() self.sess.post = mock.Mock(return_value=self.resp) self.sess.default_microversion = None def test_basic(self): sot = backup.Backup(BACKUP) self.assertEqual("backup", sot.resource_key) self.assertEqual("backups", sot.resources_key) self.assertEqual("/backups", sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_get) self.assertTrue(sot.allow_fetch) self.assertDictEqual( { "all_tenants": "all_tenants", "limit": "limit", "marker": "marker", "name": "name", "project_id": "project_id", "sort_dir": "sort_dir", "sort_key": "sort_key", "status": "status", "volume_id": "volume_id", }, sot._query_mapping._mapping, ) def test_create(self): sot = backup.Backup(**BACKUP) self.assertEqual(BACKUP["id"], sot.id) self.assertEqual(BACKUP["name"], sot.name) self.assertEqual(BACKUP["status"], sot.status) self.assertEqual(BACKUP["container"], sot.container) self.assertEqual(BACKUP["availability_zone"], sot.availability_zone) self.assertEqual(BACKUP["created_at"], sot.created_at) self.assertEqual(BACKUP["updated_at"], sot.updated_at) self.assertEqual(BACKUP["description"], sot.description) self.assertEqual(BACKUP["fail_reason"], sot.fail_reason) self.assertEqual(BACKUP["volume_id"], sot.volume_id) self.assertEqual(BACKUP["object_count"], sot.object_count) self.assertEqual(BACKUP["is_incremental"], sot.is_incremental) self.assertEqual(BACKUP["size"], sot.size) self.assertEqual( BACKUP["has_dependent_backups"], sot.has_dependent_backups ) def test_create_incremental(self): sot = backup.Backup(is_incremental=True) sot2 = backup.Backup(is_incremental=False) create_response = mock.Mock() create_response.status_code = 200 create_response.json.return_value = {} create_response.headers = {} self.sess.post.return_value = create_response sot.create(self.sess) self.sess.post.assert_called_with( '/backups', headers={}, json={ 'backup': { 'incremental': True, } }, microversion=None, params={}, ) sot2.create(self.sess) self.sess.post.assert_called_with( '/backups', headers={}, json={ 'backup': { 'incremental': False, } }, microversion=None, params={}, ) def test_restore(self): sot = backup.Backup(**BACKUP) self.assertEqual(sot, sot.restore(self.sess, 'vol', 'name')) url = 'backups/%s/restore' % FAKE_ID body = {"restore": {"volume_id": "vol", "name": "name"}} self.sess.post.assert_called_with(url, json=body) def test_restore_name(self): sot = backup.Backup(**BACKUP) self.assertEqual(sot, sot.restore(self.sess, name='name')) url = 'backups/%s/restore' % FAKE_ID body = {"restore": {"name": "name"}} self.sess.post.assert_called_with(url, json=body) def test_restore_vol_id(self): sot = backup.Backup(**BACKUP) self.assertEqual(sot, sot.restore(self.sess, volume_id='vol')) url = 'backups/%s/restore' % FAKE_ID body = {"restore": {"volume_id": "vol"}} self.sess.post.assert_called_with(url, json=body) def test_restore_no_params(self): sot = backup.Backup(**BACKUP) self.assertRaises(exceptions.SDKException, sot.restore, self.sess) def test_force_delete(self): sot = backup.Backup(**BACKUP) self.assertIsNone(sot.force_delete(self.sess)) url = 'backups/%s/action' % FAKE_ID body = {'os-force_delete': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_reset(self): sot = backup.Backup(**BACKUP) self.assertIsNone(sot.reset(self.sess, 'new_status')) url = 'backups/%s/action' % FAKE_ID body = {'os-reset_status': {'status': 'new_status'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v2/test_capabilities.py0000664000175000017500000000722600000000000027555 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v2 import capabilities from openstack.tests.unit import base CAPABILITIES = { "namespace": "OS::Storage::Capabilities::fake", "vendor_name": "OpenStack", "volume_backend_name": "lvmdriver-1", "pool_name": "pool", "driver_version": "2.0.0", "storage_protocol": "iSCSI", "display_name": "Capabilities of Cinder LVM driver", "description": "These are volume type options", "visibility": "public", "replication_targets": [], "properties": { "compression": { "title": "Compression", "description": "Enables compression.", "type": "boolean", }, "qos": { "title": "QoS", "description": "Enables QoS.", "type": "boolean", }, "replication": { "title": "Replication", "description": "Enables replication.", "type": "boolean", }, "thin_provisioning": { "title": "Thin Provisioning", "description": "Sets thin provisioning.", "type": "boolean", }, }, } class TestCapabilites(base.TestCase): def test_basic(self): capabilities_resource = capabilities.Capabilities() self.assertEqual(None, capabilities_resource.resource_key) self.assertEqual(None, capabilities_resource.resources_key) self.assertEqual("/capabilities", capabilities_resource.base_path) self.assertTrue(capabilities_resource.allow_fetch) self.assertFalse(capabilities_resource.allow_create) self.assertFalse(capabilities_resource.allow_commit) self.assertFalse(capabilities_resource.allow_delete) self.assertFalse(capabilities_resource.allow_list) def test_make_capabilities(self): capabilities_resource = capabilities.Capabilities(**CAPABILITIES) self.assertEqual( CAPABILITIES["description"], capabilities_resource.description ) self.assertEqual( CAPABILITIES["display_name"], capabilities_resource.display_name ) self.assertEqual( CAPABILITIES["driver_version"], capabilities_resource.driver_version, ) self.assertEqual( CAPABILITIES["namespace"], capabilities_resource.namespace ) self.assertEqual( CAPABILITIES["pool_name"], capabilities_resource.pool_name ) self.assertEqual( CAPABILITIES["properties"], capabilities_resource.properties ) self.assertEqual( CAPABILITIES["replication_targets"], capabilities_resource.replication_targets, ) self.assertEqual( CAPABILITIES["storage_protocol"], capabilities_resource.storage_protocol, ) self.assertEqual( CAPABILITIES["vendor_name"], capabilities_resource.vendor_name ) self.assertEqual( CAPABILITIES["visibility"], capabilities_resource.visibility ) self.assertEqual( CAPABILITIES["volume_backend_name"], capabilities_resource.volume_backend_name, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v2/test_extension.py0000664000175000017500000000371700000000000027141 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v2 import extension from openstack.tests.unit import base EXTENSION = { "alias": "os-hosts", "description": "Admin-only host administration.", "links": [], "name": "Hosts", "namespace": "https://docs.openstack.org/volume/ext/hosts/api/v1.1", "updated": "2011-06-29T00:00:00+00:00", } class TestExtension(base.TestCase): def test_basic(self): extension_resource = extension.Extension() self.assertEqual('extensions', extension_resource.resources_key) self.assertEqual('/extensions', extension_resource.base_path) self.assertFalse(extension_resource.allow_create) self.assertFalse(extension_resource.allow_fetch) self.assertFalse(extension_resource.allow_commit) self.assertFalse(extension_resource.allow_delete) self.assertTrue(extension_resource.allow_list) def test_make_extension(self): extension_resource = extension.Extension(**EXTENSION) self.assertEqual(EXTENSION['alias'], extension_resource.alias) self.assertEqual( EXTENSION['description'], extension_resource.description ) self.assertEqual(EXTENSION['links'], extension_resource.links) self.assertEqual(EXTENSION['name'], extension_resource.name) self.assertEqual(EXTENSION['namespace'], extension_resource.namespace) self.assertEqual(EXTENSION['updated'], extension_resource.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v2/test_limits.py0000664000175000017500000001752500000000000026430 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v2 import limits from openstack.tests.unit import base ABSOLUTE_LIMIT = { "totalSnapshotsUsed": 1, "maxTotalBackups": 10, "maxTotalVolumeGigabytes": 1000, "maxTotalSnapshots": 10, "maxTotalBackupGigabytes": 1000, "totalBackupGigabytesUsed": 1, "maxTotalVolumes": 10, "totalVolumesUsed": 2, "totalBackupsUsed": 3, "totalGigabytesUsed": 2, } RATE_LIMIT = { "verb": "POST", "value": 80, "remaining": 80, "unit": "MINUTE", "next-available": "2021-02-23T22:08:00Z", } RATE_LIMITS = {"regex": ".*", "uri": "*", "limit": [RATE_LIMIT]} LIMIT = {"rate": [RATE_LIMITS], "absolute": ABSOLUTE_LIMIT} class TestAbsoluteLimit(base.TestCase): def test_basic(self): limit_resource = limits.AbsoluteLimit() self.assertIsNone(limit_resource.resource_key) self.assertIsNone(limit_resource.resources_key) self.assertEqual('', limit_resource.base_path) self.assertFalse(limit_resource.allow_create) self.assertFalse(limit_resource.allow_fetch) self.assertFalse(limit_resource.allow_delete) self.assertFalse(limit_resource.allow_commit) self.assertFalse(limit_resource.allow_list) def test_make_absolute_limit(self): limit_resource = limits.AbsoluteLimit(**ABSOLUTE_LIMIT) self.assertEqual( ABSOLUTE_LIMIT['totalSnapshotsUsed'], limit_resource.total_snapshots_used, ) self.assertEqual( ABSOLUTE_LIMIT['maxTotalBackups'], limit_resource.max_total_backups ) self.assertEqual( ABSOLUTE_LIMIT['maxTotalVolumeGigabytes'], limit_resource.max_total_volume_gigabytes, ) self.assertEqual( ABSOLUTE_LIMIT['maxTotalSnapshots'], limit_resource.max_total_snapshots, ) self.assertEqual( ABSOLUTE_LIMIT['maxTotalBackupGigabytes'], limit_resource.max_total_backup_gigabytes, ) self.assertEqual( ABSOLUTE_LIMIT['totalBackupGigabytesUsed'], limit_resource.total_backup_gigabytes_used, ) self.assertEqual( ABSOLUTE_LIMIT['maxTotalVolumes'], limit_resource.max_total_volumes ) self.assertEqual( ABSOLUTE_LIMIT['totalVolumesUsed'], limit_resource.total_volumes_used, ) self.assertEqual( ABSOLUTE_LIMIT['totalBackupsUsed'], limit_resource.total_backups_used, ) self.assertEqual( ABSOLUTE_LIMIT['totalGigabytesUsed'], limit_resource.total_gigabytes_used, ) class TestRateLimit(base.TestCase): def test_basic(self): limit_resource = limits.RateLimit() self.assertIsNone(limit_resource.resource_key) self.assertIsNone(limit_resource.resources_key) self.assertEqual('', limit_resource.base_path) self.assertFalse(limit_resource.allow_create) self.assertFalse(limit_resource.allow_fetch) self.assertFalse(limit_resource.allow_delete) self.assertFalse(limit_resource.allow_commit) self.assertFalse(limit_resource.allow_list) def test_make_rate_limit(self): limit_resource = limits.RateLimit(**RATE_LIMIT) self.assertEqual(RATE_LIMIT['verb'], limit_resource.verb) self.assertEqual(RATE_LIMIT['value'], limit_resource.value) self.assertEqual(RATE_LIMIT['remaining'], limit_resource.remaining) self.assertEqual(RATE_LIMIT['unit'], limit_resource.unit) self.assertEqual( RATE_LIMIT['next-available'], limit_resource.next_available ) class TestRateLimits(base.TestCase): def test_basic(self): limit_resource = limits.RateLimits() self.assertIsNone(limit_resource.resource_key) self.assertIsNone(limit_resource.resources_key) self.assertEqual('', limit_resource.base_path) self.assertFalse(limit_resource.allow_create) self.assertFalse(limit_resource.allow_fetch) self.assertFalse(limit_resource.allow_delete) self.assertFalse(limit_resource.allow_commit) self.assertFalse(limit_resource.allow_list) def _test_rate_limit(self, expected, actual): self.assertEqual(expected[0]['verb'], actual[0].verb) self.assertEqual(expected[0]['value'], actual[0].value) self.assertEqual(expected[0]['remaining'], actual[0].remaining) self.assertEqual(expected[0]['unit'], actual[0].unit) self.assertEqual( expected[0]['next-available'], actual[0].next_available ) def test_make_rate_limits(self): limit_resource = limits.RateLimits(**RATE_LIMITS) self.assertEqual(RATE_LIMITS['regex'], limit_resource.regex) self.assertEqual(RATE_LIMITS['uri'], limit_resource.uri) self._test_rate_limit(RATE_LIMITS['limit'], limit_resource.limits) class TestLimit(base.TestCase): def test_basic(self): limit_resource = limits.Limits() self.assertEqual('limits', limit_resource.resource_key) self.assertEqual('/limits', limit_resource.base_path) self.assertTrue(limit_resource.allow_fetch) self.assertFalse(limit_resource.allow_create) self.assertFalse(limit_resource.allow_commit) self.assertFalse(limit_resource.allow_delete) self.assertFalse(limit_resource.allow_list) def _test_absolute_limit(self, expected, actual): self.assertEqual( expected['totalSnapshotsUsed'], actual.total_snapshots_used ) self.assertEqual(expected['maxTotalBackups'], actual.max_total_backups) self.assertEqual( expected['maxTotalVolumeGigabytes'], actual.max_total_volume_gigabytes, ) self.assertEqual( expected['maxTotalSnapshots'], actual.max_total_snapshots ) self.assertEqual( expected['maxTotalBackupGigabytes'], actual.max_total_backup_gigabytes, ) self.assertEqual( expected['totalBackupGigabytesUsed'], actual.total_backup_gigabytes_used, ) self.assertEqual(expected['maxTotalVolumes'], actual.max_total_volumes) self.assertEqual( expected['totalVolumesUsed'], actual.total_volumes_used ) self.assertEqual( expected['totalBackupsUsed'], actual.total_backups_used ) self.assertEqual( expected['totalGigabytesUsed'], actual.total_gigabytes_used ) def _test_rate_limit(self, expected, actual): self.assertEqual(expected[0]['verb'], actual[0].verb) self.assertEqual(expected[0]['value'], actual[0].value) self.assertEqual(expected[0]['remaining'], actual[0].remaining) self.assertEqual(expected[0]['unit'], actual[0].unit) self.assertEqual( expected[0]['next-available'], actual[0].next_available ) def _test_rate_limits(self, expected, actual): self.assertEqual(expected[0]['regex'], actual[0].regex) self.assertEqual(expected[0]['uri'], actual[0].uri) self._test_rate_limit(expected[0]['limit'], actual[0].limits) def test_make_limit(self): limit_resource = limits.Limits(**LIMIT) self._test_rate_limits(LIMIT['rate'], limit_resource.rate) self._test_absolute_limit(LIMIT['absolute'], limit_resource.absolute) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v2/test_proxy.py0000664000175000017500000004536200000000000026310 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.block_storage.v2 import _proxy from openstack.block_storage.v2 import backup from openstack.block_storage.v2 import capabilities from openstack.block_storage.v2 import limits from openstack.block_storage.v2 import quota_class_set from openstack.block_storage.v2 import quota_set from openstack.block_storage.v2 import snapshot from openstack.block_storage.v2 import stats from openstack.block_storage.v2 import type from openstack.block_storage.v2 import volume from openstack.identity.v3 import project from openstack import proxy as proxy_base from openstack.tests.unit import test_proxy_base class TestVolumeProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) class TestVolume(TestVolumeProxy): def test_volume_get(self): self.verify_get(self.proxy.get_volume, volume.Volume) def test_volume_find(self): self.verify_find( self.proxy.find_volume, volume.Volume, method_kwargs={'all_projects': True}, expected_kwargs={ 'list_base_path': '/volumes/detail', 'all_projects': True, }, ) def test_volumes_detailed(self): self.verify_list( self.proxy.volumes, volume.Volume, method_kwargs={"details": True, "all_projects": True}, expected_kwargs={ "base_path": "/volumes/detail", "all_projects": True, }, ) def test_volumes_not_detailed(self): self.verify_list( self.proxy.volumes, volume.Volume, method_kwargs={"details": False, "all_projects": True}, expected_kwargs={"all_projects": True}, ) def test_volume_create_attrs(self): self.verify_create(self.proxy.create_volume, volume.Volume) def test_volume_delete(self): self.verify_delete(self.proxy.delete_volume, volume.Volume, False) def test_volume_delete_ignore(self): self.verify_delete(self.proxy.delete_volume, volume.Volume, True) def test_volume_delete_force(self): self._verify( "openstack.block_storage.v2.volume.Volume.force_delete", self.proxy.delete_volume, method_args=["value"], method_kwargs={"force": True}, expected_args=[self.proxy], ) def test_get_volume_metadata(self): self._verify( "openstack.block_storage.v2.volume.Volume.fetch_metadata", self.proxy.get_volume_metadata, method_args=["value"], expected_args=[self.proxy], expected_result=volume.Volume(id="value", metadata={}), ) def test_set_volume_metadata(self): kwargs = {"a": "1", "b": "2"} id = "an_id" self._verify( "openstack.block_storage.v2.volume.Volume.set_metadata", self.proxy.set_volume_metadata, method_args=[id], method_kwargs=kwargs, method_result=volume.Volume.existing(id=id, metadata=kwargs), expected_args=[self.proxy], expected_kwargs={'metadata': kwargs}, expected_result=volume.Volume.existing(id=id, metadata=kwargs), ) def test_delete_volume_metadata(self): self._verify( "openstack.block_storage.v2.volume.Volume.delete_metadata_item", self.proxy.delete_volume_metadata, expected_result=None, method_args=["value", ["key"]], expected_args=[self.proxy, "key"], ) def test_backend_pools(self): self.verify_list(self.proxy.backend_pools, stats.Pools) def test_volume_wait_for(self): value = volume.Volume(id='1234') self.verify_wait_for_status( self.proxy.wait_for_status, method_args=[value], expected_args=[self.proxy, value, 'available', ['error'], 2, 120], expected_kwargs={'callback': None}, ) class TestVolumeActions(TestVolumeProxy): def test_volume_extend(self): self._verify( "openstack.block_storage.v2.volume.Volume.extend", self.proxy.extend_volume, method_args=["value", "new-size"], expected_args=[self.proxy, "new-size"], ) def test_volume_set_readonly_no_argument(self): self._verify( "openstack.block_storage.v2.volume.Volume.set_readonly", self.proxy.set_volume_readonly, method_args=["value"], expected_args=[self.proxy, True], ) def test_volume_set_readonly_false(self): self._verify( "openstack.block_storage.v2.volume.Volume.set_readonly", self.proxy.set_volume_readonly, method_args=["value", False], expected_args=[self.proxy, False], ) def test_volume_set_bootable(self): self._verify( "openstack.block_storage.v2.volume.Volume.set_bootable_status", self.proxy.set_volume_bootable_status, method_args=["value", True], expected_args=[self.proxy, True], ) def test_volume_reset_volume_status(self): self._verify( "openstack.block_storage.v2.volume.Volume.reset_status", self.proxy.reset_volume_status, method_args=["value", '1', '2', '3'], expected_args=[self.proxy, '1', '2', '3'], ) def test_set_volume_image_metadata(self): self._verify( "openstack.block_storage.v2.volume.Volume.set_image_metadata", self.proxy.set_volume_image_metadata, method_args=["value"], method_kwargs={'foo': 'bar'}, expected_args=[self.proxy], expected_kwargs={'metadata': {'foo': 'bar'}}, ) def test_delete_volume_image_metadata(self): self._verify( "openstack.block_storage.v2.volume.Volume.delete_image_metadata", self.proxy.delete_volume_image_metadata, method_args=["value"], expected_args=[self.proxy], ) def test_delete_volume_image_metadata__with_keys(self): self._verify( "openstack.block_storage.v2.volume.Volume.delete_image_metadata_item", self.proxy.delete_volume_image_metadata, method_args=["value", ['foo']], expected_args=[self.proxy, 'foo'], ) def test_attach_instance(self): self._verify( "openstack.block_storage.v2.volume.Volume.attach", self.proxy.attach_volume, method_args=["value", '1'], method_kwargs={'instance': '2'}, expected_args=[self.proxy, '1', '2', None], ) def test_attach_host(self): self._verify( "openstack.block_storage.v2.volume.Volume.attach", self.proxy.attach_volume, method_args=["value", '1'], method_kwargs={'host_name': '3'}, expected_args=[self.proxy, '1', None, '3'], ) def test_detach_defaults(self): self._verify( "openstack.block_storage.v2.volume.Volume.detach", self.proxy.detach_volume, method_args=["value", '1'], expected_args=[self.proxy, '1', False, None], ) def test_detach_force(self): self._verify( "openstack.block_storage.v2.volume.Volume.detach", self.proxy.detach_volume, method_args=["value", '1', True, {'a': 'b'}], expected_args=[self.proxy, '1', True, {'a': 'b'}], ) def test_unmanage(self): self._verify( "openstack.block_storage.v2.volume.Volume.unmanage", self.proxy.unmanage_volume, method_args=["value"], expected_args=[self.proxy], ) def test_migrate_default(self): self._verify( "openstack.block_storage.v2.volume.Volume.migrate", self.proxy.migrate_volume, method_args=["value", '1'], expected_args=[self.proxy, '1', False, False], ) def test_migrate_nondefault(self): self._verify( "openstack.block_storage.v2.volume.Volume.migrate", self.proxy.migrate_volume, method_args=["value", '1', True, True], expected_args=[self.proxy, '1', True, True], ) def test_complete_migration(self): self._verify( "openstack.block_storage.v2.volume.Volume.complete_migration", self.proxy.complete_volume_migration, method_args=["value", '1'], expected_args=[self.proxy, "1", False], ) def test_complete_migration_error(self): self._verify( "openstack.block_storage.v2.volume.Volume.complete_migration", self.proxy.complete_volume_migration, method_args=["value", "1", True], expected_args=[self.proxy, "1", True], ) class TestBackup(TestVolumeProxy): def test_backups_detailed(self): self.verify_list( self.proxy.backups, backup.Backup, method_kwargs={"details": True, "query": 1}, expected_kwargs={"query": 1, "base_path": "/backups/detail"}, ) def test_backups_not_detailed(self): self.verify_list( self.proxy.backups, backup.Backup, method_kwargs={"details": False, "query": 1}, expected_kwargs={"query": 1}, ) def test_backup_get(self): self.verify_get(self.proxy.get_backup, backup.Backup) def test_backup_find(self): self.verify_find( self.proxy.find_backup, backup.Backup, expected_kwargs={'list_base_path': '/backups/detail'}, ) def test_backup_delete(self): self.verify_delete(self.proxy.delete_backup, backup.Backup, False) def test_backup_delete_ignore(self): self.verify_delete(self.proxy.delete_backup, backup.Backup, True) def test_backup_delete_force(self): self._verify( "openstack.block_storage.v2.backup.Backup.force_delete", self.proxy.delete_backup, method_args=["value"], method_kwargs={"force": True}, expected_args=[self.proxy], ) def test_backup_create_attrs(self): self.verify_create(self.proxy.create_backup, backup.Backup) def test_backup_restore(self): self._verify( 'openstack.block_storage.v2.backup.Backup.restore', self.proxy.restore_backup, method_args=['volume_id'], method_kwargs={'volume_id': 'vol_id', 'name': 'name'}, expected_args=[self.proxy], expected_kwargs={'volume_id': 'vol_id', 'name': 'name'}, ) def test_backup_reset(self): self._verify( "openstack.block_storage.v2.backup.Backup.reset", self.proxy.reset_backup, method_args=["value", "new_status"], expected_args=[self.proxy, "new_status"], ) class TestLimit(TestVolumeProxy): def test_limits_get(self): self.verify_get( self.proxy.get_limits, limits.Limits, method_args=[], expected_kwargs={'requires_id': False}, ) class TestCapabilities(TestVolumeProxy): def test_capabilites_get(self): self.verify_get(self.proxy.get_capabilities, capabilities.Capabilities) class TestSnapshot(TestVolumeProxy): def test_snapshot_get(self): self.verify_get(self.proxy.get_snapshot, snapshot.Snapshot) def test_snapshot_find(self): self.verify_find( self.proxy.find_snapshot, snapshot.Snapshot, method_kwargs={'all_projects': True}, expected_kwargs={ 'list_base_path': '/snapshots/detail', 'all_projects': True, }, ) def test_snapshots_detailed(self): self.verify_list( self.proxy.snapshots, snapshot.SnapshotDetail, method_kwargs={"details": True, "all_projects": True}, expected_kwargs={"all_projects": True}, ) def test_snapshots_not_detailed(self): self.verify_list( self.proxy.snapshots, snapshot.Snapshot, method_kwargs={"details": False, "all_projects": True}, expected_kwargs={"all_projects": 1}, ) def test_snapshot_create_attrs(self): self.verify_create(self.proxy.create_snapshot, snapshot.Snapshot) def test_snapshot_delete(self): self.verify_delete( self.proxy.delete_snapshot, snapshot.Snapshot, False ) def test_snapshot_delete_ignore(self): self.verify_delete(self.proxy.delete_snapshot, snapshot.Snapshot, True) def test_reset(self): self._verify( "openstack.block_storage.v2.snapshot.Snapshot.reset", self.proxy.reset_snapshot, method_args=["value", "new_status"], expected_args=[self.proxy, "new_status"], ) def test_get_snapshot_metadata(self): self._verify( "openstack.block_storage.v2.snapshot.Snapshot.fetch_metadata", self.proxy.get_snapshot_metadata, method_args=["value"], expected_args=[self.proxy], expected_result=snapshot.Snapshot(id="value", metadata={}), ) def test_set_snapshot_metadata(self): kwargs = {"a": "1", "b": "2"} id = "an_id" self._verify( "openstack.block_storage.v2.snapshot.Snapshot.set_metadata", self.proxy.set_snapshot_metadata, method_args=[id], method_kwargs=kwargs, method_result=snapshot.Snapshot.existing(id=id, metadata=kwargs), expected_args=[self.proxy], expected_kwargs={'metadata': kwargs}, expected_result=snapshot.Snapshot.existing(id=id, metadata=kwargs), ) def test_delete_snapshot_metadata(self): self._verify( "openstack.block_storage.v2.snapshot.Snapshot." "delete_metadata_item", self.proxy.delete_snapshot_metadata, expected_result=None, method_args=["value", ["key"]], expected_args=[self.proxy, "key"], ) class TestType(TestVolumeProxy): def test_type_get(self): self.verify_get(self.proxy.get_type, type.Type) def test_type_find(self): self.verify_find(self.proxy.find_type, type.Type) def test_types(self): self.verify_list(self.proxy.types, type.Type) def test_type_create_attrs(self): self.verify_create(self.proxy.create_type, type.Type) def test_type_delete(self): self.verify_delete(self.proxy.delete_type, type.Type, False) def test_type_delete_ignore(self): self.verify_delete(self.proxy.delete_type, type.Type, True) def test_type_get_private_access(self): self._verify( "openstack.block_storage.v2.type.Type.get_private_access", self.proxy.get_type_access, method_args=["value"], expected_args=[self.proxy], ) def test_type_add_private_access(self): self._verify( "openstack.block_storage.v2.type.Type.add_private_access", self.proxy.add_type_access, method_args=["value", "a"], expected_args=[self.proxy, "a"], ) def test_type_remove_private_access(self): self._verify( "openstack.block_storage.v2.type.Type.remove_private_access", self.proxy.remove_type_access, method_args=["value", "a"], expected_args=[self.proxy, "a"], ) class TestQuotaClassSet(TestVolumeProxy): def test_quota_class_set_get(self): self.verify_get( self.proxy.get_quota_class_set, quota_class_set.QuotaClassSet ) def test_quota_class_set_update(self): self.verify_update( self.proxy.update_quota_class_set, quota_class_set.QuotaClassSet, False, ) class TestQuotaSet(TestVolumeProxy): def test_quota_set_get(self): self._verify( 'openstack.resource.Resource.fetch', self.proxy.get_quota_set, method_args=['prj'], expected_args=[self.proxy], expected_kwargs={ 'error_message': None, 'requires_id': False, 'usage': False, }, method_result=quota_set.QuotaSet(), expected_result=quota_set.QuotaSet(), ) def test_quota_set_get_query(self): self._verify( 'openstack.resource.Resource.fetch', self.proxy.get_quota_set, method_args=['prj'], method_kwargs={'usage': True, 'user_id': 'uid'}, expected_args=[self.proxy], expected_kwargs={ 'error_message': None, 'requires_id': False, 'usage': True, 'user_id': 'uid', }, ) def test_quota_set_get_defaults(self): self._verify( 'openstack.resource.Resource.fetch', self.proxy.get_quota_set_defaults, method_args=['prj'], expected_args=[self.proxy], expected_kwargs={ 'error_message': None, 'requires_id': False, 'base_path': '/os-quota-sets/defaults', }, ) def test_quota_set_reset(self): self._verify( 'openstack.resource.Resource.delete', self.proxy.revert_quota_set, method_args=['prj'], method_kwargs={'user_id': 'uid'}, expected_args=[self.proxy], expected_kwargs={'user_id': 'uid'}, ) @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_quota_set_update(self, mock_get): fake_project = project.Project(id='prj') mock_get.side_effect = [fake_project] self._verify( 'openstack.proxy.Proxy._update', self.proxy.update_quota_set, method_args=['prj'], method_kwargs={'volumes': 123}, expected_args=[quota_set.QuotaSet, None], expected_kwargs={'project_id': 'prj', 'volumes': 123}, ) mock_get.assert_called_once_with(project.Project, 'prj') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v2/test_snapshot.py0000664000175000017500000000670600000000000026765 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.block_storage.v2 import snapshot from openstack.tests.unit import base FAKE_ID = "ffa9bc5e-1172-4021-acaf-cdcd78a9584d" SNAPSHOT = { "status": "creating", "description": "Daily backup", "created_at": "2015-03-09T12:14:57.233772", "updated_at": None, "metadata": {}, "volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635", "size": 1, "id": FAKE_ID, "name": "snap-001", "force": "true", } DETAILS = { "os-extended-snapshot-attributes:progress": "100%", "os-extended-snapshot-attributes:project_id": "0c2eba2c5af04d3f9e9d0d410b371fde", # noqa: E501 } DETAILED_SNAPSHOT = SNAPSHOT.copy() DETAILED_SNAPSHOT.update(**DETAILS) class TestSnapshot(base.TestCase): def test_basic(self): sot = snapshot.Snapshot(SNAPSHOT) self.assertEqual("snapshot", sot.resource_key) self.assertEqual("snapshots", sot.resources_key) self.assertEqual("/snapshots", sot.base_path) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { "name": "name", "status": "status", "all_projects": "all_tenants", "volume_id": "volume_id", "limit": "limit", "marker": "marker", }, sot._query_mapping._mapping, ) def test_create_basic(self): sot = snapshot.Snapshot(**SNAPSHOT) self.assertEqual(SNAPSHOT["id"], sot.id) self.assertEqual(SNAPSHOT["status"], sot.status) self.assertEqual(SNAPSHOT["created_at"], sot.created_at) self.assertEqual(SNAPSHOT["updated_at"], sot.updated_at) self.assertEqual(SNAPSHOT["metadata"], sot.metadata) self.assertEqual(SNAPSHOT["volume_id"], sot.volume_id) self.assertEqual(SNAPSHOT["size"], sot.size) self.assertEqual(SNAPSHOT["name"], sot.name) self.assertTrue(sot.is_forced) class TestSnapshotActions(base.TestCase): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) self.resp.headers = {} self.resp.status_code = 202 self.sess = mock.Mock(spec=adapter.Adapter) self.sess.get = mock.Mock() self.sess.post = mock.Mock(return_value=self.resp) self.sess.default_microversion = None def test_reset(self): sot = snapshot.Snapshot(**SNAPSHOT) self.assertIsNone(sot.reset(self.sess, 'new_status')) url = 'snapshots/%s/action' % FAKE_ID body = {'os-reset_status': {'status': 'new_status'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v2/test_stats.py0000664000175000017500000000301000000000000026245 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v2 import stats from openstack.tests.unit import base POOLS = { "name": "pool1", "capabilities": { "updated": "2014-10-28T00=00=00-00=00", "total_capacity": 1024, "free_capacity": 100, "volume_backend_name": "pool1", "reserved_percentage": "0", "driver_version": "1.0.0", "storage_protocol": "iSCSI", "QoS_support": "false", }, } class TestBackendPools(base.TestCase): def setUp(self): super().setUp() def test_basic(self): sot = stats.Pools(POOLS) self.assertEqual("", sot.resource_key) self.assertEqual("pools", sot.resources_key) self.assertEqual( "/scheduler-stats/get_pools?detail=True", sot.base_path ) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertFalse(sot.allow_commit) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v2/test_type.py0000664000175000017500000000640700000000000026105 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.block_storage.v2 import type from openstack.tests.unit import base FAKE_ID = "6685584b-1eac-4da6-b5c3-555430cf68ff" TYPE = {"extra_specs": {"capabilities": "gpu"}, "id": FAKE_ID, "name": "SSD"} class TestType(base.TestCase): def setUp(self): super().setUp() self.extra_specs_result = {"extra_specs": {"go": "cubs", "boo": "sox"}} self.resp = mock.Mock() self.resp.body = None self.resp.status_code = 200 self.resp.json = mock.Mock(return_value=self.resp.body) self.sess = mock.Mock(spec=adapter.Adapter) self.sess.default_microversion = '3.0' self.sess.post = mock.Mock(return_value=self.resp) self.sess._get_connection = mock.Mock(return_value=self.cloud) def test_basic(self): sot = type.Type(**TYPE) self.assertEqual("volume_type", sot.resource_key) self.assertEqual("volume_types", sot.resources_key) self.assertEqual("/types", sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertFalse(sot.allow_commit) def test_new(self): sot = type.Type.new(id=FAKE_ID) self.assertEqual(FAKE_ID, sot.id) def test_create(self): sot = type.Type(**TYPE) self.assertEqual(TYPE["id"], sot.id) self.assertEqual(TYPE["extra_specs"], sot.extra_specs) self.assertEqual(TYPE["name"], sot.name) def test_get_private_access(self): sot = type.Type(**TYPE) response = mock.Mock() response.status_code = 200 response.body = { "volume_type_access": [{"project_id": "a", "volume_type_id": "b"}] } response.json = mock.Mock(return_value=response.body) self.sess.get = mock.Mock(return_value=response) self.assertEqual( response.body["volume_type_access"], sot.get_private_access(self.sess), ) self.sess.get.assert_called_with( "types/%s/os-volume-type-access" % sot.id ) def test_add_private_access(self): sot = type.Type(**TYPE) self.assertIsNone(sot.add_private_access(self.sess, "a")) url = "types/%s/action" % sot.id body = {"addProjectAccess": {"project": "a"}} self.sess.post.assert_called_with(url, json=body) def test_remove_private_access(self): sot = type.Type(**TYPE) self.assertIsNone(sot.remove_private_access(self.sess, "a")) url = "types/%s/action" % sot.id body = {"removeProjectAccess": {"project": "a"}} self.sess.post.assert_called_with(url, json=body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v2/test_volume.py0000664000175000017500000003317400000000000026434 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from keystoneauth1 import adapter from openstack.block_storage.v2 import volume from openstack.tests.unit import base FAKE_ID = "6685584b-1eac-4da6-b5c3-555430cf68ff" IMAGE_METADATA = { 'container_format': 'bare', 'min_ram': '64', 'disk_format': 'qcow2', 'image_name': 'TestVM', 'image_id': '625d4f2c-cf67-4af3-afb6-c7220f766947', 'checksum': '64d7c1cd2b6f60c92c14662941cb7913', 'min_disk': '0', 'size': '13167616', } VOLUME = { "status": "creating", "name": "my_volume", "attachments": [], "availability_zone": "nova", "bootable": "false", "created_at": "2015-03-09T12:14:57.233772", "updated_at": None, "description": "something", "volume_type": "some_type", "snapshot_id": "93c2e2aa-7744-4fd6-a31a-80c4726b08d7", "source_volid": None, "imageRef": "some_image", "metadata": {}, "volume_image_metadata": IMAGE_METADATA, "id": FAKE_ID, "size": 10, "os-vol-host-attr:host": "127.0.0.1", "os-vol-tenant-attr:tenant_id": "some tenant", "os-vol-mig-status-attr:migstat": "done", "os-vol-mig-status-attr:name_id": "93c2e2aa-7744-4fd6-a31a-80c4726b08d7", "replication_status": "nah", "os-volume-replication:extended_status": "really nah", "consistencygroup_id": "123asf-asdf123", "os-volume-replication:driver_data": "ahasadfasdfasdfasdfsdf", "snapshot_id": "93c2e2aa-7744-4fd6-a31a-80c4726b08d7", "encrypted": "false", "OS-SCH-HNT:scheduler_hints": { "same_host": [ "a0cf03a5-d921-4877-bb5c-86d26cf818e1", "8c19174f-4220-44f0-824a-cd1eeef10287", ] }, } class TestVolume(base.TestCase): def test_basic(self): sot = volume.Volume(VOLUME) self.assertEqual("volume", sot.resource_key) self.assertEqual("volumes", sot.resources_key) self.assertEqual("/volumes", sot.base_path) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { "name": "name", "status": "status", "all_projects": "all_tenants", "project_id": "project_id", "limit": "limit", "marker": "marker", }, sot._query_mapping._mapping, ) def test_create(self): sot = volume.Volume(**VOLUME) self.assertEqual(VOLUME["id"], sot.id) self.assertEqual(VOLUME["status"], sot.status) self.assertEqual(VOLUME["attachments"], sot.attachments) self.assertEqual(VOLUME["availability_zone"], sot.availability_zone) self.assertFalse(sot.is_bootable) self.assertEqual(VOLUME["created_at"], sot.created_at) self.assertEqual(VOLUME["updated_at"], sot.updated_at) self.assertEqual(VOLUME["description"], sot.description) self.assertEqual(VOLUME["volume_type"], sot.volume_type) self.assertEqual(VOLUME["snapshot_id"], sot.snapshot_id) self.assertEqual(VOLUME["source_volid"], sot.source_volume_id) self.assertEqual(VOLUME["metadata"], sot.metadata) self.assertEqual( VOLUME["volume_image_metadata"], sot.volume_image_metadata ) self.assertEqual(VOLUME["size"], sot.size) self.assertEqual(VOLUME["imageRef"], sot.image_id) self.assertEqual(VOLUME["os-vol-host-attr:host"], sot.host) self.assertEqual( VOLUME["os-vol-tenant-attr:tenant_id"], sot.project_id ) self.assertEqual( VOLUME["os-vol-mig-status-attr:migstat"], sot.migration_status ) self.assertEqual( VOLUME["os-vol-mig-status-attr:name_id"], sot.migration_id ) self.assertEqual(VOLUME["replication_status"], sot.replication_status) self.assertEqual( VOLUME["os-volume-replication:extended_status"], sot.extended_replication_status, ) self.assertEqual( VOLUME["consistencygroup_id"], sot.consistency_group_id ) self.assertEqual( VOLUME["os-volume-replication:driver_data"], sot.replication_driver_data, ) self.assertDictEqual( VOLUME["OS-SCH-HNT:scheduler_hints"], sot.scheduler_hints ) self.assertFalse(sot.is_encrypted) class TestVolumeActions(TestVolume): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.status_code = 200 self.resp.json = mock.Mock(return_value=self.resp.body) self.sess = mock.Mock(spec=adapter.Adapter) self.sess.default_microversion = '3.0' self.sess.post = mock.Mock(return_value=self.resp) self.sess._get_connection = mock.Mock(return_value=self.cloud) def test_extend(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.extend(self.sess, '20')) url = 'volumes/%s/action' % FAKE_ID body = {"os-extend": {"new_size": "20"}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_set_volume_readonly(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.set_readonly(self.sess, True)) url = 'volumes/%s/action' % FAKE_ID body = {'os-update_readonly_flag': {'readonly': True}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_set_volume_readonly_false(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.set_readonly(self.sess, False)) url = 'volumes/%s/action' % FAKE_ID body = {'os-update_readonly_flag': {'readonly': False}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_set_volume_bootable(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.set_bootable_status(self.sess)) url = 'volumes/%s/action' % FAKE_ID body = {'os-set_bootable': {'bootable': True}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_set_volume_bootable_false(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.set_bootable_status(self.sess, False)) url = 'volumes/%s/action' % FAKE_ID body = {'os-set_bootable': {'bootable': False}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_set_image_metadata(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.set_image_metadata(self.sess, {'foo': 'bar'})) url = 'volumes/%s/action' % FAKE_ID body = {'os-set_image_metadata': {'foo': 'bar'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_delete_image_metadata(self): _volume = copy.deepcopy(VOLUME) _volume['metadata'] = { 'foo': 'bar', 'baz': 'wow', } sot = volume.Volume(**_volume) self.assertIsNone(sot.delete_image_metadata(self.sess)) url = 'volumes/%s/action' % FAKE_ID body_a = {'os-unset_image_metadata': 'foo'} body_b = {'os-unset_image_metadata': 'baz'} self.sess.post.assert_has_calls( [ mock.call( url, json=body_a, microversion=sot._max_microversion ), mock.call( url, json=body_b, microversion=sot._max_microversion ), ] ) def test_delete_image_metadata_item(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.delete_image_metadata_item(self.sess, 'foo')) url = 'volumes/%s/action' % FAKE_ID body = {'os-unset_image_metadata': 'foo'} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_reset_status(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.reset_status(self.sess, '1', '2', '3')) url = 'volumes/%s/action' % FAKE_ID body = { 'os-reset_status': { 'status': '1', 'attach_status': '2', 'migration_status': '3', } } self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_reset_status__single_option(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.reset_status(self.sess, status='1')) url = 'volumes/%s/action' % FAKE_ID body = { 'os-reset_status': { 'status': '1', } } self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_attach_instance(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.attach(self.sess, '1', '2')) url = 'volumes/%s/action' % FAKE_ID body = {'os-attach': {'mountpoint': '1', 'instance_uuid': '2'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_detach(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.detach(self.sess, '1')) url = 'volumes/%s/action' % FAKE_ID body = {'os-detach': {'attachment_id': '1'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_detach_force(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.detach(self.sess, '1', force=True)) url = 'volumes/%s/action' % FAKE_ID body = {'os-force_detach': {'attachment_id': '1'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_unmanage(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.unmanage(self.sess)) url = 'volumes/%s/action' % FAKE_ID body = {'os-unmanage': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_retype(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.retype(self.sess, '1')) url = 'volumes/%s/action' % FAKE_ID body = {'os-retype': {'new_type': '1'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_retype_mp(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.retype(self.sess, '1', migration_policy='2')) url = 'volumes/%s/action' % FAKE_ID body = {'os-retype': {'new_type': '1', 'migration_policy': '2'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_migrate(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.migrate(self.sess, host='1')) url = 'volumes/%s/action' % FAKE_ID body = {'os-migrate_volume': {'host': '1'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_migrate_flags(self): sot = volume.Volume(**VOLUME) self.assertIsNone( sot.migrate( self.sess, host='1', force_host_copy=True, lock_volume=True ) ) url = 'volumes/%s/action' % FAKE_ID body = { 'os-migrate_volume': { 'host': '1', 'force_host_copy': True, 'lock_volume': True, } } self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_complete_migration(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.complete_migration(self.sess, new_volume_id='1')) url = 'volumes/%s/action' % FAKE_ID body = { 'os-migrate_volume_completion': {'new_volume': '1', 'error': False} } self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_complete_migration_error(self): sot = volume.Volume(**VOLUME) self.assertIsNone( sot.complete_migration(self.sess, new_volume_id='1', error=True) ) url = 'volumes/%s/action' % FAKE_ID body = { 'os-migrate_volume_completion': {'new_volume': '1', 'error': True} } self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_force_delete(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.force_delete(self.sess)) url = 'volumes/%s/action' % FAKE_ID body = {'os-force_delete': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4053905 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/0000775000175000017500000000000000000000000023505 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/__init__.py0000664000175000017500000000000000000000000025604 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/test_attachment.py0000664000175000017500000001430700000000000027253 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.block_storage.v3 import attachment from openstack import resource from openstack.tests.unit import base FAKE_ID = "92dc3671-d0ab-4370-8058-c88a71661ec5" FAKE_VOL_ID = "138e4a2e-85ef-4f96-a0d0-9f3ef9f32987" FAKE_INSTANCE_UUID = "ee9ae89e-d4fc-4c95-93ad-d9e80f240cae" CONNECTION_INFO = { "access_mode": "rw", "attachment_id": "92dc3671-d0ab-4370-8058-c88a71661ec5", "auth_enabled": True, "auth_username": "cinder", "cacheable": False, "cluster_name": "ceph", "discard": True, "driver_volume_type": "rbd", "encrypted": False, "hosts": ["127.0.0.1"], "name": "volumes/volume-138e4a2e-85ef-4f96-a0d0-9f3ef9f32987", "ports": ["6789"], "secret_type": "ceph", "secret_uuid": "e5d27872-64ab-4d8c-8c25-4dbdc522fbbf", "volume_id": "138e4a2e-85ef-4f96-a0d0-9f3ef9f32987", } CONNECTOR = { "do_local_attach": False, "host": "devstack-VirtualBox", "initiator": "iqn.2005-03.org.open-iscsi:1f6474a01f9a", "ip": "127.0.0.1", "multipath": False, "nqn": "nqn.2014-08.org.nvmexpress:uuid:4dfe457e-6206-4a61-b547-5a9d0e2fa557", "nvme_native_multipath": False, "os_type": "linux", "platform": "x86_64", "system_uuid": "2f4d1bf2-8a9e-864f-80ec-d265222bf145", "uuid": "87c73a20-e7f9-4370-ad85-5829b54675d7", } ATTACHMENT = { "id": FAKE_ID, "status": "attached", "instance": FAKE_INSTANCE_UUID, "volume_id": FAKE_VOL_ID, "attached_at": "2023-07-07T10:30:40.000000", "detached_at": None, "attach_mode": "rw", "connection_info": CONNECTION_INFO, } class TestAttachment(base.TestCase): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) self.resp.headers = {} self.resp.status_code = 202 self.sess = mock.Mock(spec=adapter.Adapter) self.sess.get = mock.Mock() self.sess.post = mock.Mock(return_value=self.resp) self.sess.put = mock.Mock(return_value=self.resp) self.sess.default_microversion = "3.54" def test_basic(self): sot = attachment.Attachment(ATTACHMENT) self.assertEqual("attachment", sot.resource_key) self.assertEqual("attachments", sot.resources_key) self.assertEqual("/attachments", sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_get) self.assertTrue(sot.allow_commit) self.assertIsNotNone(sot._max_microversion) self.assertDictEqual( { "limit": "limit", "marker": "marker", }, sot._query_mapping._mapping, ) def test_create_resource(self): sot = attachment.Attachment(**ATTACHMENT) self.assertEqual(ATTACHMENT["id"], sot.id) self.assertEqual(ATTACHMENT["status"], sot.status) self.assertEqual(ATTACHMENT["instance"], sot.instance) self.assertEqual(ATTACHMENT["volume_id"], sot.volume_id) self.assertEqual(ATTACHMENT["attached_at"], sot.attached_at) self.assertEqual(ATTACHMENT["detached_at"], sot.detached_at) self.assertEqual(ATTACHMENT["attach_mode"], sot.attach_mode) self.assertEqual(ATTACHMENT["connection_info"], sot.connection_info) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=True, ) @mock.patch.object(resource.Resource, '_translate_response') def test_create_no_mode_no_instance_id(self, mock_translate, mock_mv): self.sess.default_microversion = "3.27" mock_mv.return_value = False sot = attachment.Attachment() FAKE_MODE = "rw" sot.create( self.sess, volume_id=FAKE_VOL_ID, connector=CONNECTOR, instance=None, mode=FAKE_MODE, ) self.sess.post.assert_called_with( '/attachments', json={'attachment': {}}, headers={}, microversion="3.27", params={ 'volume_id': FAKE_VOL_ID, 'connector': CONNECTOR, 'instance': None, 'mode': 'rw', }, ) self.sess.default_microversion = "3.54" @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=True, ) @mock.patch.object(resource.Resource, '_translate_response') def test_create_with_mode_with_instance_id(self, mock_translate, mock_mv): sot = attachment.Attachment() FAKE_MODE = "rw" sot.create( self.sess, volume_id=FAKE_VOL_ID, connector=CONNECTOR, instance=FAKE_INSTANCE_UUID, mode=FAKE_MODE, ) self.sess.post.assert_called_with( '/attachments', json={'attachment': {}}, headers={}, microversion="3.54", params={ 'volume_id': FAKE_VOL_ID, 'connector': CONNECTOR, 'instance': FAKE_INSTANCE_UUID, 'mode': FAKE_MODE, }, ) @mock.patch.object(resource.Resource, '_translate_response') def test_complete(self, mock_translate): sot = attachment.Attachment() sot.id = FAKE_ID sot.complete(self.sess) self.sess.post.assert_called_with( '/attachments/%s/action' % FAKE_ID, json={ 'os-complete': '92dc3671-d0ab-4370-8058-c88a71661ec5', }, microversion="3.54", ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/test_availability_zone.py0000664000175000017500000000241200000000000030622 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v3 import availability_zone as az from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { "id": IDENTIFIER, "zoneState": {"available": True}, "zoneName": "zone1", } class TestAvailabilityZone(base.TestCase): def test_basic(self): sot = az.AvailabilityZone() self.assertEqual('availabilityZoneInfo', sot.resources_key) self.assertEqual('/os-availability-zone', sot.base_path) self.assertTrue(sot.allow_list) def test_make_it(self): sot = az.AvailabilityZone(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['zoneState'], sot.state) self.assertEqual(EXAMPLE['zoneName'], sot.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/test_backup.py0000664000175000017500000001537600000000000026377 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.block_storage.v3 import backup from openstack import exceptions from openstack.tests.unit import base FAKE_ID = "6685584b-1eac-4da6-b5c3-555430cf68ff" BACKUP = { "availability_zone": "az1", "container": "volumebackups", "created_at": "2018-04-02T10:35:27.000000", "updated_at": "2018-04-03T10:35:27.000000", "description": 'description', "encryption_key_id": "fake_encry_id", "fail_reason": 'fail reason', "id": FAKE_ID, "name": "backup001", "object_count": 22, "size": 1, "status": "available", "volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6", "is_incremental": True, "has_dependent_backups": False, "os-backup-project-attr:project_id": "2c67a14be9314c5dae2ee6c4ec90cf0b", "user_id": "515ba0dd59f84f25a6a084a45d8d93b2", "metadata": {"key": "value"}, } class TestBackup(base.TestCase): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) self.resp.headers = {} self.resp.status_code = 202 self.sess = mock.Mock(spec=adapter.Adapter) self.sess.get = mock.Mock() self.sess.post = mock.Mock(return_value=self.resp) self.sess.default_microversion = "3.64" def test_basic(self): sot = backup.Backup(BACKUP) self.assertEqual("backup", sot.resource_key) self.assertEqual("backups", sot.resources_key) self.assertEqual("/backups", sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_get) self.assertTrue(sot.allow_fetch) self.assertIsNotNone(sot._max_microversion) self.assertDictEqual( { "limit": "limit", "marker": "marker", "offset": "offset", "project_id": "project_id", "name": "name", "status": "status", "volume_id": "volume_id", "sort_dir": "sort_dir", "sort_key": "sort_key", "sort": "sort", "all_projects": "all_tenants", }, sot._query_mapping._mapping, ) def test_create(self): sot = backup.Backup(**BACKUP) self.assertEqual(BACKUP["id"], sot.id) self.assertEqual(BACKUP["name"], sot.name) self.assertEqual(BACKUP["status"], sot.status) self.assertEqual(BACKUP["container"], sot.container) self.assertEqual(BACKUP["availability_zone"], sot.availability_zone) self.assertEqual(BACKUP["created_at"], sot.created_at) self.assertEqual(BACKUP["updated_at"], sot.updated_at) self.assertEqual(BACKUP["description"], sot.description) self.assertEqual(BACKUP["fail_reason"], sot.fail_reason) self.assertEqual(BACKUP["volume_id"], sot.volume_id) self.assertEqual(BACKUP["object_count"], sot.object_count) self.assertEqual(BACKUP["is_incremental"], sot.is_incremental) self.assertEqual(BACKUP["size"], sot.size) self.assertEqual( BACKUP["has_dependent_backups"], sot.has_dependent_backups ) self.assertEqual( BACKUP['os-backup-project-attr:project_id'], sot.project_id ) self.assertEqual(BACKUP['metadata'], sot.metadata) self.assertEqual(BACKUP['user_id'], sot.user_id) self.assertEqual(BACKUP['encryption_key_id'], sot.encryption_key_id) def test_create_incremental(self): sot = backup.Backup(is_incremental=True) sot2 = backup.Backup(is_incremental=False) create_response = mock.Mock() create_response.status_code = 200 create_response.json.return_value = {} create_response.headers = {} self.sess.post.return_value = create_response sot.create(self.sess) self.sess.post.assert_called_with( '/backups', headers={}, json={ 'backup': { 'incremental': True, } }, microversion="3.64", params={}, ) sot2.create(self.sess) self.sess.post.assert_called_with( '/backups', headers={}, json={ 'backup': { 'incremental': False, } }, microversion="3.64", params={}, ) def test_restore(self): sot = backup.Backup(**BACKUP) self.assertEqual(sot, sot.restore(self.sess, 'vol', 'name')) url = 'backups/%s/restore' % FAKE_ID body = {"restore": {"volume_id": "vol", "name": "name"}} self.sess.post.assert_called_with(url, json=body) def test_restore_name(self): sot = backup.Backup(**BACKUP) self.assertEqual(sot, sot.restore(self.sess, name='name')) url = 'backups/%s/restore' % FAKE_ID body = {"restore": {"name": "name"}} self.sess.post.assert_called_with(url, json=body) def test_restore_vol_id(self): sot = backup.Backup(**BACKUP) self.assertEqual(sot, sot.restore(self.sess, volume_id='vol')) url = 'backups/%s/restore' % FAKE_ID body = {"restore": {"volume_id": "vol"}} self.sess.post.assert_called_with(url, json=body) def test_restore_no_params(self): sot = backup.Backup(**BACKUP) self.assertRaises(exceptions.SDKException, sot.restore, self.sess) def test_force_delete(self): sot = backup.Backup(**BACKUP) self.assertIsNone(sot.force_delete(self.sess)) url = 'backups/%s/action' % FAKE_ID body = {'os-force_delete': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_reset(self): sot = backup.Backup(**BACKUP) self.assertIsNone(sot.reset(self.sess, 'new_status')) url = 'backups/%s/action' % FAKE_ID body = {'os-reset_status': {'status': 'new_status'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/test_block_storage_summary.py0000664000175000017500000000461400000000000031516 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from openstack.block_storage.v3 import block_storage_summary as summary from openstack.tests.unit import base BLOCK_STORAGE_SUMMARY_312 = { "total_size": "4", "total_count": "2", "metadata": {"key1": "value1"}, } BLOCK_STORAGE_SUMMARY_326 = copy.deepcopy(BLOCK_STORAGE_SUMMARY_312) BLOCK_STORAGE_SUMMARY_326['metadata'] = {"key1": "value1"} class TestBlockStorageSummary(base.TestCase): def test_basic(self): summary_resource = summary.BlockStorageSummary() self.assertEqual(None, summary_resource.resource_key) self.assertEqual(None, summary_resource.resources_key) self.assertEqual("/volumes/summary", summary_resource.base_path) self.assertTrue(summary_resource.allow_fetch) self.assertFalse(summary_resource.allow_create) self.assertFalse(summary_resource.allow_commit) self.assertFalse(summary_resource.allow_delete) self.assertFalse(summary_resource.allow_list) def test_get_summary_312(self): summary_resource = summary.BlockStorageSummary( **BLOCK_STORAGE_SUMMARY_312 ) self.assertEqual( BLOCK_STORAGE_SUMMARY_312["total_size"], summary_resource.total_size, ) self.assertEqual( BLOCK_STORAGE_SUMMARY_312["total_count"], summary_resource.total_count, ) def test_get_summary_326(self): summary_resource = summary.BlockStorageSummary( **BLOCK_STORAGE_SUMMARY_326 ) self.assertEqual( BLOCK_STORAGE_SUMMARY_326["total_size"], summary_resource.total_size, ) self.assertEqual( BLOCK_STORAGE_SUMMARY_326["total_count"], summary_resource.total_count, ) self.assertEqual( BLOCK_STORAGE_SUMMARY_326["metadata"], summary_resource.metadata ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/test_capabilities.py0000664000175000017500000000722600000000000027556 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v3 import capabilities from openstack.tests.unit import base CAPABILITIES = { "namespace": "OS::Storage::Capabilities::fake", "vendor_name": "OpenStack", "volume_backend_name": "lvmdriver-1", "pool_name": "pool", "driver_version": "2.0.0", "storage_protocol": "iSCSI", "display_name": "Capabilities of Cinder LVM driver", "description": "These are volume type options", "visibility": "public", "replication_targets": [], "properties": { "compression": { "title": "Compression", "description": "Enables compression.", "type": "boolean", }, "qos": { "title": "QoS", "description": "Enables QoS.", "type": "boolean", }, "replication": { "title": "Replication", "description": "Enables replication.", "type": "boolean", }, "thin_provisioning": { "title": "Thin Provisioning", "description": "Sets thin provisioning.", "type": "boolean", }, }, } class TestCapabilites(base.TestCase): def test_basic(self): capabilities_resource = capabilities.Capabilities() self.assertEqual(None, capabilities_resource.resource_key) self.assertEqual(None, capabilities_resource.resources_key) self.assertEqual("/capabilities", capabilities_resource.base_path) self.assertTrue(capabilities_resource.allow_fetch) self.assertFalse(capabilities_resource.allow_create) self.assertFalse(capabilities_resource.allow_commit) self.assertFalse(capabilities_resource.allow_delete) self.assertFalse(capabilities_resource.allow_list) def test_make_capabilities(self): capabilities_resource = capabilities.Capabilities(**CAPABILITIES) self.assertEqual( CAPABILITIES["description"], capabilities_resource.description ) self.assertEqual( CAPABILITIES["display_name"], capabilities_resource.display_name ) self.assertEqual( CAPABILITIES["driver_version"], capabilities_resource.driver_version, ) self.assertEqual( CAPABILITIES["namespace"], capabilities_resource.namespace ) self.assertEqual( CAPABILITIES["pool_name"], capabilities_resource.pool_name ) self.assertEqual( CAPABILITIES["properties"], capabilities_resource.properties ) self.assertEqual( CAPABILITIES["replication_targets"], capabilities_resource.replication_targets, ) self.assertEqual( CAPABILITIES["storage_protocol"], capabilities_resource.storage_protocol, ) self.assertEqual( CAPABILITIES["vendor_name"], capabilities_resource.vendor_name ) self.assertEqual( CAPABILITIES["visibility"], capabilities_resource.visibility ) self.assertEqual( CAPABILITIES["volume_backend_name"], capabilities_resource.volume_backend_name, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/test_extension.py0000664000175000017500000000346700000000000027144 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v3 import extension from openstack.tests.unit import base EXTENSION = { "alias": "os-hosts", "description": "Admin-only host administration.", "links": [], "name": "Hosts", "updated": "2011-06-29T00:00:00+00:00", } class TestExtension(base.TestCase): def test_basic(self): extension_resource = extension.Extension() self.assertEqual('extensions', extension_resource.resources_key) self.assertEqual('/extensions', extension_resource.base_path) self.assertFalse(extension_resource.allow_create) self.assertFalse(extension_resource.allow_fetch) self.assertFalse(extension_resource.allow_commit) self.assertFalse(extension_resource.allow_delete) self.assertTrue(extension_resource.allow_list) def test_make_extension(self): extension_resource = extension.Extension(**EXTENSION) self.assertEqual(EXTENSION['alias'], extension_resource.alias) self.assertEqual( EXTENSION['description'], extension_resource.description ) self.assertEqual(EXTENSION['links'], extension_resource.links) self.assertEqual(EXTENSION['name'], extension_resource.name) self.assertEqual(EXTENSION['updated'], extension_resource.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/test_group.py0000664000175000017500000001154100000000000026254 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from keystoneauth1 import adapter from openstack.block_storage.v3 import group from openstack.tests.unit import base GROUP_ID = "6f519a48-3183-46cf-a32f-41815f813986" GROUP = { "id": GROUP_ID, "status": "available", "availability_zone": "az1", "created_at": "2015-09-16T09:28:52.000000", "name": "first_group", "description": "my first group", "group_type": "29514915-5208-46ab-9ece-1cc4688ad0c1", "volume_types": ["c4daaf47-c530-4901-b28e-f5f0a359c4e6"], "volumes": ["a2cdf1ad-5497-4e57-bd7d-f573768f3d03"], "group_snapshot_id": None, "source_group_id": None, "project_id": "7ccf4863071f44aeb8f141f65780c51b", } class TestGroup(base.TestCase): def test_basic(self): resource = group.Group() self.assertEqual("group", resource.resource_key) self.assertEqual("groups", resource.resources_key) self.assertEqual("/groups", resource.base_path) self.assertTrue(resource.allow_create) self.assertTrue(resource.allow_fetch) self.assertTrue(resource.allow_delete) self.assertTrue(resource.allow_commit) self.assertTrue(resource.allow_list) def test_make_resource(self): resource = group.Group(**GROUP) self.assertEqual(GROUP["id"], resource.id) self.assertEqual(GROUP["status"], resource.status) self.assertEqual( GROUP["availability_zone"], resource.availability_zone ) self.assertEqual(GROUP["created_at"], resource.created_at) self.assertEqual(GROUP["name"], resource.name) self.assertEqual(GROUP["description"], resource.description) self.assertEqual(GROUP["group_type"], resource.group_type) self.assertEqual(GROUP["volume_types"], resource.volume_types) self.assertEqual(GROUP["volumes"], resource.volumes) self.assertEqual( GROUP["group_snapshot_id"], resource.group_snapshot_id ) self.assertEqual(GROUP["source_group_id"], resource.source_group_id) self.assertEqual(GROUP["project_id"], resource.project_id) class TestGroupAction(base.TestCase): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) self.resp.headers = {} self.resp.status_code = 202 self.sess = mock.Mock(spec=adapter.Adapter) self.sess.get = mock.Mock() self.sess.post = mock.Mock(return_value=self.resp) self.sess.default_microversion = '3.38' def test_delete(self): sot = group.Group(**GROUP) self.assertIsNone(sot.delete(self.sess)) url = 'groups/%s/action' % GROUP_ID body = {'delete': {'delete-volumes': False}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_reset(self): sot = group.Group(**GROUP) self.assertIsNone(sot.reset(self.sess, 'new_status')) url = 'groups/%s/action' % GROUP_ID body = {'reset_status': {'status': 'new_status'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion, ) def test_create_from_source(self): resp = mock.Mock() resp.body = {'group': copy.deepcopy(GROUP)} resp.json = mock.Mock(return_value=resp.body) resp.headers = {} resp.status_code = 202 self.sess.post = mock.Mock(return_value=resp) sot = group.Group.create_from_source( self.sess, group_snapshot_id='9a591346-e595-4bc1-94e7-08f264406b63', source_group_id='6c5259f6-42ed-4e41-8ffe-e1c667ae9dff', name='group_from_source', description='a group from source', ) self.assertIsNotNone(sot) url = 'groups/action' body = { 'create-from-src': { 'name': 'group_from_source', 'description': 'a group from source', 'group_snapshot_id': '9a591346-e595-4bc1-94e7-08f264406b63', 'source_group_id': '6c5259f6-42ed-4e41-8ffe-e1c667ae9dff', }, } self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/test_group_snapshot.py0000664000175000017500000000435500000000000030200 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v3 import group_snapshot from openstack.tests.unit import base GROUP_SNAPSHOT = { "id": "6f519a48-3183-46cf-a32f-41815f813986", "group_id": "6f519a48-3183-46cf-a32f-41815f814444", "status": "available", "created_at": "2015-09-16T09:28:52.000000", "name": "my_group_snapshot1", "description": "my first group snapshot", "group_type_id": "7270c56e-6354-4528-8e8b-f54dee2232c8", "project_id": "7ccf4863071f44aeb8f141f65780c51b", } class TestGroupSnapshot(base.TestCase): def test_basic(self): resource = group_snapshot.GroupSnapshot() self.assertEqual("group_snapshot", resource.resource_key) self.assertEqual("group_snapshots", resource.resources_key) self.assertEqual("/group_snapshots", resource.base_path) self.assertTrue(resource.allow_create) self.assertTrue(resource.allow_fetch) self.assertTrue(resource.allow_delete) self.assertTrue(resource.allow_list) self.assertFalse(resource.allow_commit) def test_make_resource(self): resource = group_snapshot.GroupSnapshot(**GROUP_SNAPSHOT) self.assertEqual(GROUP_SNAPSHOT["created_at"], resource.created_at) self.assertEqual(GROUP_SNAPSHOT["description"], resource.description) self.assertEqual(GROUP_SNAPSHOT["group_id"], resource.group_id) self.assertEqual( GROUP_SNAPSHOT["group_type_id"], resource.group_type_id ) self.assertEqual(GROUP_SNAPSHOT["id"], resource.id) self.assertEqual(GROUP_SNAPSHOT["name"], resource.name) self.assertEqual(GROUP_SNAPSHOT["project_id"], resource.project_id) self.assertEqual(GROUP_SNAPSHOT["status"], resource.status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/test_group_type.py0000664000175000017500000001207600000000000027321 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.block_storage.v3 import group_type from openstack.tests.unit import base GROUP_TYPE = { "id": "6685584b-1eac-4da6-b5c3-555430cf68ff", "name": "grp-type-001", "description": "group type 001", "is_public": True, "group_specs": {"consistent_group_snapshot_enabled": " False"}, } class TestGroupType(base.TestCase): def setUp(self): super().setUp() self.sess = mock.Mock(spec=adapter.Adapter) self.sess.default_microversion = 1 self.sess._get_connection = mock.Mock(return_value=self.cloud) def test_basic(self): resource = group_type.GroupType() self.assertEqual("group_type", resource.resource_key) self.assertEqual("group_types", resource.resources_key) self.assertEqual("/group_types", resource.base_path) self.assertTrue(resource.allow_create) self.assertTrue(resource.allow_fetch) self.assertTrue(resource.allow_delete) self.assertTrue(resource.allow_commit) self.assertTrue(resource.allow_list) def test_make_resource(self): resource = group_type.GroupType(**GROUP_TYPE) self.assertEqual(GROUP_TYPE["id"], resource.id) self.assertEqual(GROUP_TYPE["name"], resource.name) self.assertEqual(GROUP_TYPE["description"], resource.description) self.assertEqual(GROUP_TYPE["is_public"], resource.is_public) self.assertEqual(GROUP_TYPE["group_specs"], resource.group_specs) def test_fetch_group_specs(self): sot = group_type.GroupType(**GROUP_TYPE) resp = mock.Mock() resp.body = {'group_specs': {'a': 'b', 'c': 'd'}} resp.json = mock.Mock(return_value=resp.body) resp.status_code = 200 self.sess.get = mock.Mock(return_value=resp) rsp = sot.fetch_group_specs(self.sess) self.sess.get.assert_called_with( f"group_types/{GROUP_TYPE['id']}/group_specs", microversion=self.sess.default_microversion, ) self.assertEqual(resp.body['group_specs'], rsp.group_specs) self.assertIsInstance(rsp, group_type.GroupType) def test_create_group_specs(self): sot = group_type.GroupType(**GROUP_TYPE) specs = {'a': 'b', 'c': 'd'} resp = mock.Mock() resp.body = {'group_specs': specs} resp.json = mock.Mock(return_value=resp.body) resp.status_code = 200 self.sess.post = mock.Mock(return_value=resp) rsp = sot.create_group_specs(self.sess, specs) self.sess.post.assert_called_with( f"group_types/{GROUP_TYPE['id']}/group_specs", json={'group_specs': specs}, microversion=self.sess.default_microversion, ) self.assertEqual(resp.body['group_specs'], rsp.group_specs) self.assertIsInstance(rsp, group_type.GroupType) def test_get_group_specs_property(self): sot = group_type.GroupType(**GROUP_TYPE) resp = mock.Mock() resp.body = {'a': 'b'} resp.json = mock.Mock(return_value=resp.body) resp.status_code = 200 self.sess.get = mock.Mock(return_value=resp) rsp = sot.get_group_specs_property(self.sess, 'a') self.sess.get.assert_called_with( f"group_types/{GROUP_TYPE['id']}/group_specs/a", microversion=self.sess.default_microversion, ) self.assertEqual('b', rsp) def test_update_group_specs_property(self): sot = group_type.GroupType(**GROUP_TYPE) resp = mock.Mock() resp.body = {'a': 'b'} resp.json = mock.Mock(return_value=resp.body) resp.status_code = 200 self.sess.put = mock.Mock(return_value=resp) rsp = sot.update_group_specs_property(self.sess, 'a', 'b') self.sess.put.assert_called_with( f"group_types/{GROUP_TYPE['id']}/group_specs/a", json={'a': 'b'}, microversion=self.sess.default_microversion, ) self.assertEqual('b', rsp) def test_delete_group_specs_property(self): sot = group_type.GroupType(**GROUP_TYPE) resp = mock.Mock() resp.body = None resp.json = mock.Mock(return_value=resp.body) resp.status_code = 200 self.sess.delete = mock.Mock(return_value=resp) rsp = sot.delete_group_specs_property(self.sess, 'a') self.sess.delete.assert_called_with( f"group_types/{GROUP_TYPE['id']}/group_specs/a", microversion=self.sess.default_microversion, ) self.assertIsNone(rsp) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/test_limits.py0000664000175000017500000001752500000000000026431 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v3 import limits from openstack.tests.unit import base ABSOLUTE_LIMIT = { "totalSnapshotsUsed": 1, "maxTotalBackups": 10, "maxTotalVolumeGigabytes": 1000, "maxTotalSnapshots": 10, "maxTotalBackupGigabytes": 1000, "totalBackupGigabytesUsed": 1, "maxTotalVolumes": 10, "totalVolumesUsed": 2, "totalBackupsUsed": 3, "totalGigabytesUsed": 2, } RATE_LIMIT = { "verb": "POST", "value": 80, "remaining": 80, "unit": "MINUTE", "next-available": "2021-02-23T22:08:00Z", } RATE_LIMITS = {"regex": ".*", "uri": "*", "limit": [RATE_LIMIT]} LIMIT = {"rate": [RATE_LIMITS], "absolute": ABSOLUTE_LIMIT} class TestAbsoluteLimit(base.TestCase): def test_basic(self): limit_resource = limits.AbsoluteLimit() self.assertIsNone(limit_resource.resource_key) self.assertIsNone(limit_resource.resources_key) self.assertEqual('', limit_resource.base_path) self.assertFalse(limit_resource.allow_create) self.assertFalse(limit_resource.allow_fetch) self.assertFalse(limit_resource.allow_delete) self.assertFalse(limit_resource.allow_commit) self.assertFalse(limit_resource.allow_list) def test_make_absolute_limit(self): limit_resource = limits.AbsoluteLimit(**ABSOLUTE_LIMIT) self.assertEqual( ABSOLUTE_LIMIT['totalSnapshotsUsed'], limit_resource.total_snapshots_used, ) self.assertEqual( ABSOLUTE_LIMIT['maxTotalBackups'], limit_resource.max_total_backups ) self.assertEqual( ABSOLUTE_LIMIT['maxTotalVolumeGigabytes'], limit_resource.max_total_volume_gigabytes, ) self.assertEqual( ABSOLUTE_LIMIT['maxTotalSnapshots'], limit_resource.max_total_snapshots, ) self.assertEqual( ABSOLUTE_LIMIT['maxTotalBackupGigabytes'], limit_resource.max_total_backup_gigabytes, ) self.assertEqual( ABSOLUTE_LIMIT['totalBackupGigabytesUsed'], limit_resource.total_backup_gigabytes_used, ) self.assertEqual( ABSOLUTE_LIMIT['maxTotalVolumes'], limit_resource.max_total_volumes ) self.assertEqual( ABSOLUTE_LIMIT['totalVolumesUsed'], limit_resource.total_volumes_used, ) self.assertEqual( ABSOLUTE_LIMIT['totalBackupsUsed'], limit_resource.total_backups_used, ) self.assertEqual( ABSOLUTE_LIMIT['totalGigabytesUsed'], limit_resource.total_gigabytes_used, ) class TestRateLimit(base.TestCase): def test_basic(self): limit_resource = limits.RateLimit() self.assertIsNone(limit_resource.resource_key) self.assertIsNone(limit_resource.resources_key) self.assertEqual('', limit_resource.base_path) self.assertFalse(limit_resource.allow_create) self.assertFalse(limit_resource.allow_fetch) self.assertFalse(limit_resource.allow_delete) self.assertFalse(limit_resource.allow_commit) self.assertFalse(limit_resource.allow_list) def test_make_rate_limit(self): limit_resource = limits.RateLimit(**RATE_LIMIT) self.assertEqual(RATE_LIMIT['verb'], limit_resource.verb) self.assertEqual(RATE_LIMIT['value'], limit_resource.value) self.assertEqual(RATE_LIMIT['remaining'], limit_resource.remaining) self.assertEqual(RATE_LIMIT['unit'], limit_resource.unit) self.assertEqual( RATE_LIMIT['next-available'], limit_resource.next_available ) class TestRateLimits(base.TestCase): def test_basic(self): limit_resource = limits.RateLimits() self.assertIsNone(limit_resource.resource_key) self.assertIsNone(limit_resource.resources_key) self.assertEqual('', limit_resource.base_path) self.assertFalse(limit_resource.allow_create) self.assertFalse(limit_resource.allow_fetch) self.assertFalse(limit_resource.allow_delete) self.assertFalse(limit_resource.allow_commit) self.assertFalse(limit_resource.allow_list) def _test_rate_limit(self, expected, actual): self.assertEqual(expected[0]['verb'], actual[0].verb) self.assertEqual(expected[0]['value'], actual[0].value) self.assertEqual(expected[0]['remaining'], actual[0].remaining) self.assertEqual(expected[0]['unit'], actual[0].unit) self.assertEqual( expected[0]['next-available'], actual[0].next_available ) def test_make_rate_limits(self): limit_resource = limits.RateLimits(**RATE_LIMITS) self.assertEqual(RATE_LIMITS['regex'], limit_resource.regex) self.assertEqual(RATE_LIMITS['uri'], limit_resource.uri) self._test_rate_limit(RATE_LIMITS['limit'], limit_resource.limits) class TestLimit(base.TestCase): def test_basic(self): limit_resource = limits.Limits() self.assertEqual('limits', limit_resource.resource_key) self.assertEqual('/limits', limit_resource.base_path) self.assertTrue(limit_resource.allow_fetch) self.assertFalse(limit_resource.allow_create) self.assertFalse(limit_resource.allow_commit) self.assertFalse(limit_resource.allow_delete) self.assertFalse(limit_resource.allow_list) def _test_absolute_limit(self, expected, actual): self.assertEqual( expected['totalSnapshotsUsed'], actual.total_snapshots_used ) self.assertEqual(expected['maxTotalBackups'], actual.max_total_backups) self.assertEqual( expected['maxTotalVolumeGigabytes'], actual.max_total_volume_gigabytes, ) self.assertEqual( expected['maxTotalSnapshots'], actual.max_total_snapshots ) self.assertEqual( expected['maxTotalBackupGigabytes'], actual.max_total_backup_gigabytes, ) self.assertEqual( expected['totalBackupGigabytesUsed'], actual.total_backup_gigabytes_used, ) self.assertEqual(expected['maxTotalVolumes'], actual.max_total_volumes) self.assertEqual( expected['totalVolumesUsed'], actual.total_volumes_used ) self.assertEqual( expected['totalBackupsUsed'], actual.total_backups_used ) self.assertEqual( expected['totalGigabytesUsed'], actual.total_gigabytes_used ) def _test_rate_limit(self, expected, actual): self.assertEqual(expected[0]['verb'], actual[0].verb) self.assertEqual(expected[0]['value'], actual[0].value) self.assertEqual(expected[0]['remaining'], actual[0].remaining) self.assertEqual(expected[0]['unit'], actual[0].unit) self.assertEqual( expected[0]['next-available'], actual[0].next_available ) def _test_rate_limits(self, expected, actual): self.assertEqual(expected[0]['regex'], actual[0].regex) self.assertEqual(expected[0]['uri'], actual[0].uri) self._test_rate_limit(expected[0]['limit'], actual[0].limits) def test_make_limit(self): limit_resource = limits.Limits(**LIMIT) self._test_rate_limits(LIMIT['rate'], limit_resource.rate) self._test_absolute_limit(LIMIT['absolute'], limit_resource.absolute) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/test_proxy.py0000664000175000017500000010537200000000000026307 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.block_storage.v3 import _proxy from openstack.block_storage.v3 import backup from openstack.block_storage.v3 import capabilities from openstack.block_storage.v3 import extension from openstack.block_storage.v3 import group from openstack.block_storage.v3 import group_snapshot from openstack.block_storage.v3 import group_type from openstack.block_storage.v3 import quota_class_set from openstack.block_storage.v3 import quota_set from openstack.block_storage.v3 import resource_filter from openstack.block_storage.v3 import service from openstack.block_storage.v3 import snapshot from openstack.block_storage.v3 import stats from openstack.block_storage.v3 import type from openstack.block_storage.v3 import volume from openstack.identity.v3 import project from openstack import proxy as proxy_base from openstack.tests.unit import test_proxy_base class TestVolumeProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) class TestVolume(TestVolumeProxy): def test_volume_get(self): self.verify_get(self.proxy.get_volume, volume.Volume) def test_volume_find(self): self.verify_find( self.proxy.find_volume, volume.Volume, method_kwargs={'all_projects': True}, expected_kwargs={ "list_base_path": "/volumes/detail", "all_projects": True, }, ) def test_volumes_detailed(self): self.verify_list( self.proxy.volumes, volume.Volume, method_kwargs={"details": True, "query": 1}, expected_kwargs={"query": 1, "base_path": "/volumes/detail"}, ) def test_volumes_not_detailed(self): self.verify_list( self.proxy.volumes, volume.Volume, method_kwargs={"details": False, "query": 1}, expected_kwargs={"query": 1}, ) def test_volume_create_attrs(self): self.verify_create(self.proxy.create_volume, volume.Volume) def test_volume_delete(self): self.verify_delete(self.proxy.delete_volume, volume.Volume, False) def test_volume_delete_ignore(self): self.verify_delete(self.proxy.delete_volume, volume.Volume, True) def test_volume_delete_force(self): self._verify( "openstack.block_storage.v3.volume.Volume.force_delete", self.proxy.delete_volume, method_args=["value"], method_kwargs={"force": True}, expected_args=[self.proxy], ) def test_get_volume_metadata(self): self._verify( "openstack.block_storage.v3.volume.Volume.fetch_metadata", self.proxy.get_volume_metadata, method_args=["value"], expected_args=[self.proxy], expected_result=volume.Volume(id="value", metadata={}), ) def test_set_volume_metadata(self): kwargs = {"a": "1", "b": "2"} id = "an_id" self._verify( "openstack.block_storage.v3.volume.Volume.set_metadata", self.proxy.set_volume_metadata, method_args=[id], method_kwargs=kwargs, method_result=volume.Volume.existing(id=id, metadata=kwargs), expected_args=[self.proxy], expected_kwargs={'metadata': kwargs}, expected_result=volume.Volume.existing(id=id, metadata=kwargs), ) def test_delete_volume_metadata(self): self._verify( "openstack.block_storage.v3.volume.Volume.delete_metadata_item", self.proxy.delete_volume_metadata, expected_result=None, method_args=["value", ["key"]], expected_args=[self.proxy, "key"], ) def test_volume_wait_for(self): value = volume.Volume(id='1234') self.verify_wait_for_status( self.proxy.wait_for_status, method_args=[value], expected_args=[self.proxy, value, 'available', ['error'], 2, 120], expected_kwargs={'callback': None}, ) class TestPools(TestVolumeProxy): def test_backend_pools(self): self.verify_list(self.proxy.backend_pools, stats.Pools) class TestLimit(TestVolumeProxy): def test_limits_get(self): self._verify( 'openstack.resource.Resource.fetch', self.proxy.get_limits, method_args=[], method_kwargs={'project': 'foo'}, expected_args=[self.proxy], expected_kwargs={'requires_id': False, 'project_id': 'foo'}, ) class TestCapabilities(TestVolumeProxy): def test_capabilites_get(self): self.verify_get(self.proxy.get_capabilities, capabilities.Capabilities) class TestResourceFilter(TestVolumeProxy): def test_resource_filters(self): self.verify_list( self.proxy.resource_filters, resource_filter.ResourceFilter ) class TestGroup(TestVolumeProxy): def test_group_get(self): self.verify_get(self.proxy.get_group, group.Group) def test_group_find(self): self.verify_find( self.proxy.find_group, group.Group, expected_kwargs={'list_base_path': '/groups/detail'}, ) def test_groups(self): self.verify_list(self.proxy.groups, group.Group) def test_group_create(self): self.verify_create(self.proxy.create_group, group.Group) def test_group_create_from_source(self): self._verify( "openstack.block_storage.v3.group.Group.create_from_source", self.proxy.create_group_from_source, method_args=[], expected_args=[self.proxy], ) def test_group_delete(self): self._verify( "openstack.block_storage.v3.group.Group.delete", self.proxy.delete_group, method_args=['delete_volumes'], expected_args=[self.proxy], expected_kwargs={'delete_volumes': False}, ) def test_group_update(self): self.verify_update(self.proxy.update_group, group.Group) def reset_group_state(self): self._verify(self.proxy.reset_group_state, group.Group) class TestGroupSnapshot(TestVolumeProxy): def test_group_snapshot_get(self): self.verify_get( self.proxy.get_group_snapshot, group_snapshot.GroupSnapshot ) def test_group_snapshot_find(self): self.verify_find( self.proxy.find_group_snapshot, group_snapshot.GroupSnapshot, expected_kwargs={ 'list_base_path': '/group_snapshots/detail', }, ) def test_group_snapshots(self): self.verify_list( self.proxy.group_snapshots, group_snapshot.GroupSnapshot, expected_kwargs={}, ) def test_group_snapshots__detailed(self): self.verify_list( self.proxy.group_snapshots, group_snapshot.GroupSnapshot, method_kwargs={'details': True, 'query': 1}, expected_kwargs={ 'query': 1, 'base_path': '/group_snapshots/detail', }, ) def test_group_snapshot_create(self): self.verify_create( self.proxy.create_group_snapshot, group_snapshot.GroupSnapshot ) def test_group_snapshot_delete(self): self.verify_delete( self.proxy.delete_group_snapshot, group_snapshot.GroupSnapshot, False, ) def test_group_snapshot_delete_ignore(self): self.verify_delete( self.proxy.delete_group_snapshot, group_snapshot.GroupSnapshot, True, ) class TestGroupType(TestVolumeProxy): def test_group_type_get(self): self.verify_get(self.proxy.get_group_type, group_type.GroupType) def test_group_type_find(self): self.verify_find(self.proxy.find_group_type, group_type.GroupType) def test_group_types(self): self.verify_list(self.proxy.group_types, group_type.GroupType) def test_group_type_create(self): self.verify_create(self.proxy.create_group_type, group_type.GroupType) def test_group_type_delete(self): self.verify_delete( self.proxy.delete_group_type, group_type.GroupType, False ) def test_group_type_delete_ignore(self): self.verify_delete( self.proxy.delete_group_type, group_type.GroupType, True ) def test_group_type_update(self): self.verify_update(self.proxy.update_group_type, group_type.GroupType) def test_group_type_fetch_group_specs(self): self._verify( "openstack.block_storage.v3.group_type.GroupType.fetch_group_specs", # noqa: E501 self.proxy.fetch_group_type_group_specs, method_args=["value"], expected_args=[self.proxy], ) def test_group_type_create_group_specs(self): self._verify( "openstack.block_storage.v3.group_type.GroupType.create_group_specs", # noqa: E501 self.proxy.create_group_type_group_specs, method_args=["value", {'a': 'b'}], expected_args=[self.proxy], expected_kwargs={"specs": {'a': 'b'}}, ) def test_group_type_get_group_specs_prop(self): self._verify( "openstack.block_storage.v3.group_type.GroupType.get_group_specs_property", # noqa: E501 self.proxy.get_group_type_group_specs_property, method_args=["value", "prop"], expected_args=[self.proxy, "prop"], ) def test_group_type_update_group_specs_prop(self): self._verify( "openstack.block_storage.v3.group_type.GroupType.update_group_specs_property", # noqa: E501 self.proxy.update_group_type_group_specs_property, method_args=["value", "prop", "val"], expected_args=[self.proxy, "prop", "val"], ) def test_group_type_delete_group_specs_prop(self): self._verify( "openstack.block_storage.v3.group_type.GroupType.delete_group_specs_property", # noqa: E501 self.proxy.delete_group_type_group_specs_property, method_args=["value", "prop"], expected_args=[self.proxy, "prop"], ) class TestService(TestVolumeProxy): def test_services(self): self.verify_list(self.proxy.services, service.Service) def test_enable_service(self): self._verify( 'openstack.block_storage.v3.service.Service.enable', self.proxy.enable_service, method_args=["value"], expected_args=[self.proxy], ) def test_disable_service(self): self._verify( 'openstack.block_storage.v3.service.Service.disable', self.proxy.disable_service, method_args=["value"], expected_kwargs={"reason": None}, expected_args=[self.proxy], ) def test_thaw_service(self): self._verify( 'openstack.block_storage.v3.service.Service.thaw', self.proxy.thaw_service, method_args=["value"], expected_args=[self.proxy], ) def test_freeze_service(self): self._verify( 'openstack.block_storage.v3.service.Service.freeze', self.proxy.freeze_service, method_args=["value"], expected_args=[self.proxy], ) def test_failover_service(self): self._verify( 'openstack.block_storage.v3.service.Service.failover', self.proxy.failover_service, method_args=["value"], expected_args=[self.proxy], expected_kwargs={"backend_id": None, "cluster": None}, ) class TestExtension(TestVolumeProxy): def test_extensions(self): self.verify_list(self.proxy.extensions, extension.Extension) class TestVolumeActions(TestVolumeProxy): def test_volume_extend(self): self._verify( "openstack.block_storage.v3.volume.Volume.extend", self.proxy.extend_volume, method_args=["value", "new-size"], expected_args=[self.proxy, "new-size"], ) def test_volume_set_readonly_no_argument(self): self._verify( "openstack.block_storage.v3.volume.Volume.set_readonly", self.proxy.set_volume_readonly, method_args=["value"], expected_args=[self.proxy, True], ) def test_volume_set_readonly_false(self): self._verify( "openstack.block_storage.v3.volume.Volume.set_readonly", self.proxy.set_volume_readonly, method_args=["value", False], expected_args=[self.proxy, False], ) def test_volume_set_bootable(self): self._verify( "openstack.block_storage.v3.volume.Volume.set_bootable_status", self.proxy.set_volume_bootable_status, method_args=["value", True], expected_args=[self.proxy, True], ) def test_volume_reset_volume_status(self): self._verify( "openstack.block_storage.v3.volume.Volume.reset_status", self.proxy.reset_volume_status, method_args=["value", '1', '2', '3'], expected_args=[self.proxy, '1', '2', '3'], ) def test_set_volume_image_metadata(self): self._verify( "openstack.block_storage.v3.volume.Volume.set_image_metadata", self.proxy.set_volume_image_metadata, method_args=["value"], method_kwargs={'foo': 'bar'}, expected_args=[self.proxy], expected_kwargs={'metadata': {'foo': 'bar'}}, ) def test_delete_volume_image_metadata(self): self._verify( "openstack.block_storage.v3.volume.Volume.delete_image_metadata", self.proxy.delete_volume_image_metadata, method_args=["value"], expected_args=[self.proxy], ) def test_delete_volume_image_metadata__with_keys(self): self._verify( "openstack.block_storage.v3.volume.Volume.delete_image_metadata_item", self.proxy.delete_volume_image_metadata, method_args=["value", ['foo']], expected_args=[self.proxy, 'foo'], ) def test_volume_revert_to_snapshot(self): self._verify( "openstack.block_storage.v3.volume.Volume.revert_to_snapshot", self.proxy.revert_volume_to_snapshot, method_args=["value", '1'], expected_args=[self.proxy, '1'], ) def test_attach_instance(self): self._verify( "openstack.block_storage.v3.volume.Volume.attach", self.proxy.attach_volume, method_args=["value", '1'], method_kwargs={'instance': '2'}, expected_args=[self.proxy, '1', '2', None], ) def test_attach_host(self): self._verify( "openstack.block_storage.v3.volume.Volume.attach", self.proxy.attach_volume, method_args=["value", '1'], method_kwargs={'host_name': '3'}, expected_args=[self.proxy, '1', None, '3'], ) def test_detach_defaults(self): self._verify( "openstack.block_storage.v3.volume.Volume.detach", self.proxy.detach_volume, method_args=["value", '1'], expected_args=[self.proxy, '1', False, None], ) def test_detach_force(self): self._verify( "openstack.block_storage.v3.volume.Volume.detach", self.proxy.detach_volume, method_args=["value", '1', True, {'a': 'b'}], expected_args=[self.proxy, '1', True, {'a': 'b'}], ) def test_unmanage(self): self._verify( "openstack.block_storage.v3.volume.Volume.unmanage", self.proxy.unmanage_volume, method_args=["value"], expected_args=[self.proxy], ) def test_migrate_default(self): self._verify( "openstack.block_storage.v3.volume.Volume.migrate", self.proxy.migrate_volume, method_args=["value", '1'], expected_args=[self.proxy, '1', False, False, None], ) def test_migrate_nondefault(self): self._verify( "openstack.block_storage.v3.volume.Volume.migrate", self.proxy.migrate_volume, method_args=["value", '1', True, True], expected_args=[self.proxy, '1', True, True, None], ) def test_migrate_cluster(self): self._verify( "openstack.block_storage.v3.volume.Volume.migrate", self.proxy.migrate_volume, method_args=["value"], method_kwargs={'cluster': '3'}, expected_args=[self.proxy, None, False, False, '3'], ) def test_complete_migration(self): self._verify( "openstack.block_storage.v3.volume.Volume.complete_migration", self.proxy.complete_volume_migration, method_args=["value", '1'], expected_args=[self.proxy, "1", False], ) def test_complete_migration_error(self): self._verify( "openstack.block_storage.v3.volume.Volume.complete_migration", self.proxy.complete_volume_migration, method_args=["value", "1", True], expected_args=[self.proxy, "1", True], ) def test_upload_to_image(self): self._verify( "openstack.block_storage.v3.volume.Volume.upload_to_image", self.proxy.upload_volume_to_image, method_args=["value", "1"], expected_args=[self.proxy, "1"], expected_kwargs={ "force": False, "disk_format": None, "container_format": None, "visibility": None, "protected": None, }, ) def test_upload_to_image_extended(self): self._verify( "openstack.block_storage.v3.volume.Volume.upload_to_image", self.proxy.upload_volume_to_image, method_args=["value", "1"], method_kwargs={ "disk_format": "2", "container_format": "3", "visibility": "4", "protected": "5", }, expected_args=[self.proxy, "1"], expected_kwargs={ "force": False, "disk_format": "2", "container_format": "3", "visibility": "4", "protected": "5", }, ) def test_reserve(self): self._verify( "openstack.block_storage.v3.volume.Volume.reserve", self.proxy.reserve_volume, method_args=["value"], expected_args=[self.proxy], ) def test_unreserve(self): self._verify( "openstack.block_storage.v3.volume.Volume.unreserve", self.proxy.unreserve_volume, method_args=["value"], expected_args=[self.proxy], ) def test_begin_detaching(self): self._verify( "openstack.block_storage.v3.volume.Volume.begin_detaching", self.proxy.begin_volume_detaching, method_args=["value"], expected_args=[self.proxy], ) def test_abort_detaching(self): self._verify( "openstack.block_storage.v3.volume.Volume.abort_detaching", self.proxy.abort_volume_detaching, method_args=["value"], expected_args=[self.proxy], ) def test_init_attachment(self): self._verify( "openstack.block_storage.v3.volume.Volume.init_attachment", self.proxy.init_volume_attachment, method_args=["value", "1"], expected_args=[self.proxy, "1"], ) def test_terminate_attachment(self): self._verify( "openstack.block_storage.v3.volume.Volume.terminate_attachment", self.proxy.terminate_volume_attachment, method_args=["value", "1"], expected_args=[self.proxy, "1"], ) class TestBackup(TestVolumeProxy): def test_backups_detailed(self): # NOTE: mock has_service self.proxy._connection = mock.Mock() self.proxy._connection.has_service = mock.Mock(return_value=True) self.verify_list( self.proxy.backups, backup.Backup, method_kwargs={"details": True, "query": 1}, expected_kwargs={"query": 1, "base_path": "/backups/detail"}, ) def test_backups_not_detailed(self): # NOTE: mock has_service self.proxy._connection = mock.Mock() self.proxy._connection.has_service = mock.Mock(return_value=True) self.verify_list( self.proxy.backups, backup.Backup, method_kwargs={"details": False, "query": 1}, expected_kwargs={"query": 1}, ) def test_backup_get(self): # NOTE: mock has_service self.proxy._connection = mock.Mock() self.proxy._connection.has_service = mock.Mock(return_value=True) self.verify_get(self.proxy.get_backup, backup.Backup) def test_backup_find(self): # NOTE: mock has_service self.proxy._connection = mock.Mock() self.proxy._connection.has_service = mock.Mock(return_value=True) self.verify_find( self.proxy.find_backup, backup.Backup, expected_kwargs={'list_base_path': '/backups/detail'}, ) def test_backup_delete(self): # NOTE: mock has_service self.proxy._connection = mock.Mock() self.proxy._connection.has_service = mock.Mock(return_value=True) self.verify_delete(self.proxy.delete_backup, backup.Backup, False) def test_backup_delete_ignore(self): # NOTE: mock has_service self.proxy._connection = mock.Mock() self.proxy._connection.has_service = mock.Mock(return_value=True) self.verify_delete(self.proxy.delete_backup, backup.Backup, True) def test_backup_delete_force(self): self._verify( "openstack.block_storage.v3.backup.Backup.force_delete", self.proxy.delete_backup, method_args=["value"], method_kwargs={"force": True}, expected_args=[self.proxy], ) def test_backup_create_attrs(self): # NOTE: mock has_service self.proxy._connection = mock.Mock() self.proxy._connection.has_service = mock.Mock(return_value=True) self.verify_create(self.proxy.create_backup, backup.Backup) def test_backup_restore(self): # NOTE: mock has_service self.proxy._connection = mock.Mock() self.proxy._connection.has_service = mock.Mock(return_value=True) self._verify( 'openstack.block_storage.v3.backup.Backup.restore', self.proxy.restore_backup, method_args=['volume_id'], method_kwargs={'volume_id': 'vol_id', 'name': 'name'}, expected_args=[self.proxy], expected_kwargs={'volume_id': 'vol_id', 'name': 'name'}, ) def test_backup_reset(self): self._verify( "openstack.block_storage.v3.backup.Backup.reset", self.proxy.reset_backup, method_args=["value", "new_status"], expected_args=[self.proxy, "new_status"], ) class TestSnapshot(TestVolumeProxy): def test_snapshot_get(self): self.verify_get(self.proxy.get_snapshot, snapshot.Snapshot) def test_snapshot_find(self): self.verify_find( self.proxy.find_snapshot, snapshot.Snapshot, method_kwargs={'all_projects': True}, expected_kwargs={ 'list_base_path': '/snapshots/detail', 'all_projects': True, }, ) def test_snapshots_detailed(self): self.verify_list( self.proxy.snapshots, snapshot.SnapshotDetail, method_kwargs={"details": True, "query": 1}, expected_kwargs={"query": 1, "base_path": "/snapshots/detail"}, ) def test_snapshots_not_detailed(self): self.verify_list( self.proxy.snapshots, snapshot.Snapshot, method_kwargs={"details": False, "query": 1}, expected_kwargs={"query": 1}, ) def test_snapshot_create_attrs(self): self.verify_create(self.proxy.create_snapshot, snapshot.Snapshot) def test_snapshot_update(self): self.verify_update(self.proxy.update_snapshot, snapshot.Snapshot) def test_snapshot_delete(self): self.verify_delete( self.proxy.delete_snapshot, snapshot.Snapshot, False ) def test_snapshot_delete_ignore(self): self.verify_delete(self.proxy.delete_snapshot, snapshot.Snapshot, True) def test_snapshot_delete_force(self): self._verify( "openstack.block_storage.v3.snapshot.Snapshot.force_delete", self.proxy.delete_snapshot, method_args=["value"], method_kwargs={"force": True}, expected_args=[self.proxy], ) def test_reset(self): self._verify( "openstack.block_storage.v3.snapshot.Snapshot.reset", self.proxy.reset_snapshot, method_args=["value", "new_status"], expected_args=[self.proxy, "new_status"], ) def test_set_status(self): self._verify( "openstack.block_storage.v3.snapshot.Snapshot.set_status", self.proxy.set_snapshot_status, method_args=["value", "new_status"], expected_args=[self.proxy, "new_status", None], ) def test_set_status_percentage(self): self._verify( "openstack.block_storage.v3.snapshot.Snapshot.set_status", self.proxy.set_snapshot_status, method_args=["value", "new_status", "per"], expected_args=[self.proxy, "new_status", "per"], ) def test_get_snapshot_metadata(self): self._verify( "openstack.block_storage.v3.snapshot.Snapshot.fetch_metadata", self.proxy.get_snapshot_metadata, method_args=["value"], expected_args=[self.proxy], expected_result=snapshot.Snapshot(id="value", metadata={}), ) def test_set_snapshot_metadata(self): kwargs = {"a": "1", "b": "2"} id = "an_id" self._verify( "openstack.block_storage.v3.snapshot.Snapshot.set_metadata", self.proxy.set_snapshot_metadata, method_args=[id], method_kwargs=kwargs, method_result=snapshot.Snapshot.existing(id=id, metadata=kwargs), expected_args=[self.proxy], expected_kwargs={'metadata': kwargs}, expected_result=snapshot.Snapshot.existing(id=id, metadata=kwargs), ) def test_delete_snapshot_metadata(self): self._verify( "openstack.block_storage.v3.snapshot.Snapshot." "delete_metadata_item", self.proxy.delete_snapshot_metadata, expected_result=None, method_args=["value", ["key"]], expected_args=[self.proxy, "key"], ) def test_manage_snapshot(self): kwargs = { "volume_id": "fake_id", "remote_source": "fake_volume", "snapshot_name": "fake_snap", "description": "test_snap", "property": {"k": "v"}, } self._verify( "openstack.block_storage.v3.snapshot.Snapshot.manage", self.proxy.manage_snapshot, method_kwargs=kwargs, method_result=snapshot.Snapshot(id="fake_id"), expected_args=[self.proxy], expected_kwargs=kwargs, expected_result=snapshot.Snapshot(id="fake_id"), ) class TestType(TestVolumeProxy): def test_type_get(self): self.verify_get(self.proxy.get_type, type.Type) def test_type_find(self): self.verify_find(self.proxy.find_type, type.Type) def test_types(self): self.verify_list(self.proxy.types, type.Type) def test_type_create_attrs(self): self.verify_create(self.proxy.create_type, type.Type) def test_type_delete(self): self.verify_delete(self.proxy.delete_type, type.Type, False) def test_type_delete_ignore(self): self.verify_delete(self.proxy.delete_type, type.Type, True) def test_type_update(self): self.verify_update(self.proxy.update_type, type.Type) def test_type_extra_specs_update(self): kwargs = {"a": "1", "b": "2"} id = "an_id" self._verify( "openstack.block_storage.v3.type.Type.set_extra_specs", self.proxy.update_type_extra_specs, method_args=[id], method_kwargs=kwargs, method_result=type.Type.existing(id=id, extra_specs=kwargs), expected_args=[self.proxy], expected_kwargs=kwargs, expected_result=kwargs, ) def test_type_extra_specs_delete(self): self._verify( "openstack.block_storage.v3.type.Type.delete_extra_specs", self.proxy.delete_type_extra_specs, expected_result=None, method_args=["value", "key"], expected_args=[self.proxy, "key"], ) def test_type_get_private_access(self): self._verify( "openstack.block_storage.v3.type.Type.get_private_access", self.proxy.get_type_access, method_args=["value"], expected_args=[self.proxy], ) def test_type_add_private_access(self): self._verify( "openstack.block_storage.v3.type.Type.add_private_access", self.proxy.add_type_access, method_args=["value", "a"], expected_args=[self.proxy, "a"], ) def test_type_remove_private_access(self): self._verify( "openstack.block_storage.v3.type.Type.remove_private_access", self.proxy.remove_type_access, method_args=["value", "a"], expected_args=[self.proxy, "a"], ) def test_type_encryption_get(self): self.verify_get( self.proxy.get_type_encryption, type.TypeEncryption, method_args=['value'], expected_args=[], expected_kwargs={'volume_type_id': 'value', 'requires_id': False}, ) def test_type_encryption_create(self): self.verify_create( self.proxy.create_type_encryption, type.TypeEncryption, method_kwargs={'volume_type': 'id'}, expected_kwargs={'volume_type_id': 'id'}, ) def test_type_encryption_update(self): # Verify that the get call was made with correct kwargs self.verify_get( self.proxy.get_type_encryption, type.TypeEncryption, method_args=['value'], expected_args=[], expected_kwargs={'volume_type_id': 'value', 'requires_id': False}, ) self.verify_update( self.proxy.update_type_encryption, type.TypeEncryption ) def test_type_encryption_delete(self): # Verify that the get call was made with correct kwargs self.verify_get( self.proxy.get_type_encryption, type.TypeEncryption, method_args=['value'], expected_args=[], expected_kwargs={'volume_type_id': 'value', 'requires_id': False}, ) self.verify_delete( self.proxy.delete_type_encryption, type.TypeEncryption, False ) def test_type_encryption_delete_ignore(self): self.verify_delete( self.proxy.delete_type_encryption, type.TypeEncryption, True ) class TestQuotaClassSet(TestVolumeProxy): def test_quota_class_set_get(self): self.verify_get( self.proxy.get_quota_class_set, quota_class_set.QuotaClassSet ) def test_quota_class_set_update(self): self.verify_update( self.proxy.update_quota_class_set, quota_class_set.QuotaClassSet, False, ) class TestQuotaSet(TestVolumeProxy): def test_quota_set_get(self): self._verify( 'openstack.resource.Resource.fetch', self.proxy.get_quota_set, method_args=['prj'], expected_args=[self.proxy], expected_kwargs={ 'error_message': None, 'requires_id': False, 'usage': False, }, method_result=quota_set.QuotaSet(), expected_result=quota_set.QuotaSet(), ) def test_quota_set_get_query(self): self._verify( 'openstack.resource.Resource.fetch', self.proxy.get_quota_set, method_args=['prj'], method_kwargs={'usage': True, 'user_id': 'uid'}, expected_args=[self.proxy], expected_kwargs={ 'error_message': None, 'requires_id': False, 'usage': True, 'user_id': 'uid', }, ) def test_quota_set_get_defaults(self): self._verify( 'openstack.resource.Resource.fetch', self.proxy.get_quota_set_defaults, method_args=['prj'], expected_args=[self.proxy], expected_kwargs={ 'error_message': None, 'requires_id': False, 'base_path': '/os-quota-sets/defaults', }, ) def test_quota_set_reset(self): self._verify( 'openstack.resource.Resource.delete', self.proxy.revert_quota_set, method_args=['prj'], method_kwargs={'user_id': 'uid'}, expected_args=[self.proxy], expected_kwargs={'user_id': 'uid'}, ) @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_quota_set_update(self, mock_get): fake_project = project.Project(id='prj') mock_get.side_effect = [fake_project] self._verify( 'openstack.proxy.Proxy._update', self.proxy.update_quota_set, method_args=['prj'], method_kwargs={'volumes': 123}, expected_args=[quota_set.QuotaSet, None], expected_kwargs={'project_id': 'prj', 'volumes': 123}, ) mock_get.assert_called_once_with(project.Project, 'prj') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/test_resource_filter.py0000664000175000017500000000327300000000000030317 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v3 import resource_filter from openstack.tests.unit import base RESOURCE_FILTER = { 'filters': [ 'name', 'status', 'image_metadata', 'bootable', 'migration_status', ], 'resource': 'volume', } class TestResourceFilter(base.TestCase): def test_basic(self): resource = resource_filter.ResourceFilter() self.assertEqual('resource_filters', resource.resources_key) self.assertEqual('/resource_filters', resource.base_path) self.assertFalse(resource.allow_create) self.assertFalse(resource.allow_fetch) self.assertFalse(resource.allow_commit) self.assertFalse(resource.allow_delete) self.assertTrue(resource.allow_list) self.assertDictEqual( { "resource": "resource", }, resource._query_mapping._mapping, ) def test_make_resource_filter(self): resource = resource_filter.ResourceFilter(**RESOURCE_FILTER) self.assertEqual(RESOURCE_FILTER['filters'], resource.filters) self.assertEqual(RESOURCE_FILTER['resource'], resource.resource) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/test_service.py0000664000175000017500000001306400000000000026562 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.block_storage.v3 import service from openstack.tests.unit import base EXAMPLE = { "binary": "cinder-scheduler", "disabled_reason": None, "host": "devstack", "state": "up", "status": "enabled", "updated_at": "2017-06-29T05:50:35.000000", "zone": "nova", } class TestService(base.TestCase): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = {'service': {}} self.resp.json = mock.Mock(return_value=self.resp.body) self.resp.status_code = 200 self.resp.headers = {} self.sess = mock.Mock() self.sess.put = mock.Mock(return_value=self.resp) self.sess.default_microversion = '3.0' def test_basic(self): sot = service.Service() self.assertIsNone(sot.resource_key) self.assertEqual('services', sot.resources_key) self.assertEqual('/os-services', sot.base_path) self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_list) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_delete) self.assertDictEqual( { 'binary': 'binary', 'host': 'host', 'limit': 'limit', 'marker': 'marker', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = service.Service(**EXAMPLE) self.assertEqual(EXAMPLE['binary'], sot.binary) self.assertEqual(EXAMPLE['binary'], sot.name) self.assertEqual(EXAMPLE['disabled_reason'], sot.disabled_reason) self.assertEqual(EXAMPLE['host'], sot.host) self.assertEqual(EXAMPLE['state'], sot.state) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['zone'], sot.availability_zone) def test_enable(self): sot = service.Service(**EXAMPLE) res = sot.enable(self.sess) self.assertIsNotNone(res) url = 'os-services/enable' body = { 'binary': 'cinder-scheduler', 'host': 'devstack', } self.sess.put.assert_called_with( url, json=body, microversion=self.sess.default_microversion, ) def test_disable(self): sot = service.Service(**EXAMPLE) res = sot.disable(self.sess) self.assertIsNotNone(res) url = 'os-services/disable' body = { 'binary': 'cinder-scheduler', 'host': 'devstack', } self.sess.put.assert_called_with( url, json=body, microversion=self.sess.default_microversion, ) def test_disable__with_reason(self): sot = service.Service(**EXAMPLE) reason = 'fencing' res = sot.disable(self.sess, reason=reason) self.assertIsNotNone(res) url = 'os-services/disable-log-reason' body = { 'binary': 'cinder-scheduler', 'host': 'devstack', 'disabled_reason': reason, } self.sess.put.assert_called_with( url, json=body, microversion=self.sess.default_microversion, ) def test_thaw(self): sot = service.Service(**EXAMPLE) res = sot.thaw(self.sess) self.assertIsNotNone(res) url = 'os-services/thaw' body = {'host': 'devstack'} self.sess.put.assert_called_with( url, json=body, microversion=self.sess.default_microversion, ) def test_freeze(self): sot = service.Service(**EXAMPLE) res = sot.freeze(self.sess) self.assertIsNotNone(res) url = 'os-services/freeze' body = {'host': 'devstack'} self.sess.put.assert_called_with( url, json=body, microversion=self.sess.default_microversion, ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=False, ) def test_failover(self, mock_supports): sot = service.Service(**EXAMPLE) res = sot.failover(self.sess) self.assertIsNotNone(res) url = 'os-services/failover_host' body = {'host': 'devstack'} self.sess.put.assert_called_with( url, json=body, microversion=self.sess.default_microversion, ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=True, ) def test_failover__with_cluster(self, mock_supports): self.sess.default_microversion = '3.26' sot = service.Service(**EXAMPLE) res = sot.failover(self.sess, cluster='foo', backend_id='bar') self.assertIsNotNone(res) url = 'os-services/failover' body = { 'host': 'devstack', 'cluster': 'foo', 'backend_id': 'bar', } self.sess.put.assert_called_with( url, json=body, microversion='3.26', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/test_snapshot.py0000664000175000017500000001534300000000000026763 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from keystoneauth1 import adapter from openstack.block_storage.v3 import snapshot from openstack.tests.unit import base FAKE_ID = "ffa9bc5e-1172-4021-acaf-cdcd78a9584d" FAKE_VOLUME_ID = "5aa119a8-d25b-45a7-8d1b-88e127885635" SNAPSHOT = { "status": "creating", "description": "Daily backup", "created_at": "2015-03-09T12:14:57.233772", "updated_at": None, "metadata": {}, "volume_id": FAKE_VOLUME_ID, "size": 1, "id": FAKE_ID, "name": "snap-001", "force": "true", "os-extended-snapshot-attributes:progress": "100%", "os-extended-snapshot-attributes:project_id": "0c2eba2c5af04d3f9e9d0d410b371fde", # noqa: E501 } class TestSnapshot(base.TestCase): def test_basic(self): sot = snapshot.Snapshot(SNAPSHOT) self.assertEqual("snapshot", sot.resource_key) self.assertEqual("snapshots", sot.resources_key) self.assertEqual("/snapshots", sot.base_path) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { "name": "name", "status": "status", "volume_id": "volume_id", "project_id": "project_id", "limit": "limit", "offset": "offset", "marker": "marker", "sort_dir": "sort_dir", "sort_key": "sort_key", "sort": "sort", "all_projects": "all_tenants", }, sot._query_mapping._mapping, ) def test_create_basic(self): sot = snapshot.Snapshot(**SNAPSHOT) self.assertEqual(SNAPSHOT["id"], sot.id) self.assertEqual(SNAPSHOT["status"], sot.status) self.assertEqual(SNAPSHOT["created_at"], sot.created_at) self.assertEqual(SNAPSHOT["updated_at"], sot.updated_at) self.assertEqual(SNAPSHOT["metadata"], sot.metadata) self.assertEqual(SNAPSHOT["volume_id"], sot.volume_id) self.assertEqual(SNAPSHOT["size"], sot.size) self.assertEqual(SNAPSHOT["name"], sot.name) self.assertEqual( SNAPSHOT["os-extended-snapshot-attributes:progress"], sot.progress ) self.assertEqual( SNAPSHOT["os-extended-snapshot-attributes:project_id"], sot.project_id, ) self.assertTrue(sot.is_forced) class TestSnapshotActions(base.TestCase): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) self.resp.headers = {} self.resp.status_code = 202 self.sess = mock.Mock(spec=adapter.Adapter) self.sess.get = mock.Mock() self.sess.post = mock.Mock(return_value=self.resp) self.sess.default_microversion = None def test_force_delete(self): sot = snapshot.Snapshot(**SNAPSHOT) self.assertIsNone(sot.force_delete(self.sess)) url = 'snapshots/%s/action' % FAKE_ID body = {'os-force_delete': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_reset(self): sot = snapshot.Snapshot(**SNAPSHOT) self.assertIsNone(sot.reset(self.sess, 'new_status')) url = 'snapshots/%s/action' % FAKE_ID body = {'os-reset_status': {'status': 'new_status'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_set_status(self): sot = snapshot.Snapshot(**SNAPSHOT) self.assertIsNone(sot.set_status(self.sess, 'new_status')) url = 'snapshots/%s/action' % FAKE_ID body = {'os-update_snapshot_status': {'status': 'new_status'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=True, ) def test_manage(self, mock_mv): resp = mock.Mock() resp.body = {'snapshot': copy.deepcopy(SNAPSHOT)} resp.json = mock.Mock(return_value=resp.body) resp.headers = {} resp.status_code = 202 self.sess.post = mock.Mock(return_value=resp) sot = snapshot.Snapshot.manage( self.sess, volume_id=FAKE_VOLUME_ID, ref=FAKE_ID ) self.assertIsNotNone(sot) url = '/manageable_snapshots' body = { 'snapshot': { 'volume_id': FAKE_VOLUME_ID, 'ref': FAKE_ID, 'name': None, 'description': None, 'metadata': None, } } self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=False, ) def test_manage_pre_38(self, mock_mv): resp = mock.Mock() resp.body = {'snapshot': copy.deepcopy(SNAPSHOT)} resp.json = mock.Mock(return_value=resp.body) resp.headers = {} resp.status_code = 202 self.sess.post = mock.Mock(return_value=resp) sot = snapshot.Snapshot.manage( self.sess, volume_id=FAKE_VOLUME_ID, ref=FAKE_ID ) self.assertIsNotNone(sot) url = '/os-snapshot-manage' body = { 'snapshot': { 'volume_id': FAKE_VOLUME_ID, 'ref': FAKE_ID, 'name': None, 'description': None, 'metadata': None, } } self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_unmanage(self): sot = snapshot.Snapshot(**SNAPSHOT) self.assertIsNone(sot.unmanage(self.sess)) url = 'snapshots/%s/action' % FAKE_ID body = {'os-unmanage': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/test_transfer.py0000664000175000017500000001103300000000000026740 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.block_storage.v3 import transfer from openstack import resource from openstack.tests.unit import base FAKE_ID = "09d18b36-9e8d-4438-a4da-3f5eff5e1130" FAKE_VOL_ID = "390de1bc-19d1-41e7-ba67-c492bb36cae5" FAKE_VOL_NAME = "test-volume" FAKE_TRANSFER = "7d048960-7c3f-4bf0-952f-4312fdea1dec" FAKE_AUTH_KEY = "95bc670c0068821d" TRANSFER = { "auth_key": FAKE_AUTH_KEY, "created_at": "2023-06-27T08:47:23.035010", "id": FAKE_ID, "name": FAKE_VOL_NAME, "volume_id": FAKE_VOL_ID, } class TestTransfer(base.TestCase): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) self.resp.headers = {} self.resp.status_code = 202 self.sess = mock.Mock(spec=adapter.Adapter) self.sess.post = mock.Mock(return_value=self.resp) self.sess.default_microversion = "3.55" def test_basic(self): tr = transfer.Transfer(TRANSFER) self.assertEqual("transfer", tr.resource_key) self.assertEqual("transfers", tr.resources_key) self.assertEqual("/volume-transfers", tr.base_path) self.assertTrue(tr.allow_create) self.assertIsNotNone(tr._max_microversion) self.assertDictEqual( { "limit": "limit", "marker": "marker", }, tr._query_mapping._mapping, ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=True, ) @mock.patch.object(resource.Resource, '_translate_response') def test_create(self, mock_mv, mock_translate): sot = transfer.Transfer() sot.create(self.sess, volume_id=FAKE_VOL_ID, name=FAKE_VOL_NAME) self.sess.post.assert_called_with( '/volume-transfers', json={'transfer': {}}, microversion="3.55", headers={}, params={'volume_id': FAKE_VOL_ID, 'name': FAKE_VOL_NAME}, ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=False, ) @mock.patch.object(resource.Resource, '_translate_response') def test_create_pre_v355(self, mock_mv, mock_translate): self.sess.default_microversion = "3.0" sot = transfer.Transfer() sot.create(self.sess, volume_id=FAKE_VOL_ID, name=FAKE_VOL_NAME) self.sess.post.assert_called_with( '/os-volume-transfer', json={'transfer': {}}, microversion="3.0", headers={}, params={'volume_id': FAKE_VOL_ID, 'name': FAKE_VOL_NAME}, ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=True, ) @mock.patch.object(resource.Resource, '_translate_response') def test_accept(self, mock_mv, mock_translate): sot = transfer.Transfer() sot.id = FAKE_TRANSFER sot.accept(self.sess, auth_key=FAKE_AUTH_KEY) self.sess.post.assert_called_with( 'volume-transfers/%s/accept' % FAKE_TRANSFER, json={ 'accept': { 'auth_key': FAKE_AUTH_KEY, } }, microversion="3.55", ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=False, ) @mock.patch.object(resource.Resource, '_translate_response') def test_accept_pre_v355(self, mock_mv, mock_translate): self.sess.default_microversion = "3.0" sot = transfer.Transfer() sot.id = FAKE_TRANSFER sot.accept(self.sess, auth_key=FAKE_AUTH_KEY) self.sess.post.assert_called_with( 'os-volume-transfer/%s/accept' % FAKE_TRANSFER, json={ 'accept': { 'auth_key': FAKE_AUTH_KEY, } }, microversion="3.0", ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/test_type.py0000664000175000017500000001232600000000000026103 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.block_storage.v3 import type from openstack import exceptions from openstack.tests.unit import base FAKE_ID = "6685584b-1eac-4da6-b5c3-555430cf68ff" TYPE = { "extra_specs": {"capabilities": "gpu"}, "id": FAKE_ID, "name": "SSD", "description": "Test type", } class TestType(base.TestCase): def setUp(self): super().setUp() self.extra_specs_result = {"extra_specs": {"go": "cubs", "boo": "sox"}} self.resp = mock.Mock() self.resp.body = None self.resp.status_code = 200 self.resp.json = mock.Mock(return_value=self.resp.body) self.sess = mock.Mock(spec=adapter.Adapter) self.sess.default_microversion = '3.0' self.sess.post = mock.Mock(return_value=self.resp) self.sess._get_connection = mock.Mock(return_value=self.cloud) def test_basic(self): sot = type.Type(**TYPE) self.assertEqual("volume_type", sot.resource_key) self.assertEqual("volume_types", sot.resources_key) self.assertEqual("/types", sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_commit) def test_new(self): sot = type.Type.new(id=FAKE_ID) self.assertEqual(FAKE_ID, sot.id) def test_create(self): sot = type.Type(**TYPE) self.assertEqual(TYPE["id"], sot.id) self.assertEqual(TYPE["extra_specs"], sot.extra_specs) self.assertEqual(TYPE["name"], sot.name) self.assertEqual(TYPE["description"], sot.description) def test_set_extra_specs(self): response = mock.Mock() response.status_code = 200 response.json.return_value = self.extra_specs_result sess = mock.Mock() sess.post.return_value = response sot = type.Type(id=FAKE_ID) set_specs = {"lol": "rofl"} result = sot.set_extra_specs(sess, **set_specs) self.assertEqual(result, self.extra_specs_result["extra_specs"]) sess.post.assert_called_once_with( "types/" + FAKE_ID + "/extra_specs", headers={}, json={"extra_specs": set_specs}, ) def test_set_extra_specs_error(self): sess = mock.Mock() response = mock.Mock() response.status_code = 400 response.content = None sess.post.return_value = response sot = type.Type(id=FAKE_ID) set_specs = {"lol": "rofl"} self.assertRaises( exceptions.BadRequestException, sot.set_extra_specs, sess, **set_specs ) def test_delete_extra_specs(self): sess = mock.Mock() response = mock.Mock() response.status_code = 200 sess.delete.return_value = response sot = type.Type(id=FAKE_ID) key = "hey" sot.delete_extra_specs(sess, [key]) sess.delete.assert_called_once_with( "types/" + FAKE_ID + "/extra_specs/" + key, headers={}, ) def test_delete_extra_specs_error(self): sess = mock.Mock() response = mock.Mock() response.status_code = 400 response.content = None sess.delete.return_value = response sot = type.Type(id=FAKE_ID) key = "hey" self.assertRaises( exceptions.BadRequestException, sot.delete_extra_specs, sess, [key] ) def test_get_private_access(self): sot = type.Type(**TYPE) response = mock.Mock() response.status_code = 200 response.body = { "volume_type_access": [{"project_id": "a", "volume_type_id": "b"}] } response.json = mock.Mock(return_value=response.body) self.sess.get = mock.Mock(return_value=response) self.assertEqual( response.body["volume_type_access"], sot.get_private_access(self.sess), ) self.sess.get.assert_called_with( "types/%s/os-volume-type-access" % sot.id ) def test_add_private_access(self): sot = type.Type(**TYPE) self.assertIsNone(sot.add_private_access(self.sess, "a")) url = "types/%s/action" % sot.id body = {"addProjectAccess": {"project": "a"}} self.sess.post.assert_called_with(url, json=body) def test_remove_private_access(self): sot = type.Type(**TYPE) self.assertIsNone(sot.remove_private_access(self.sess, "a")) url = "types/%s/action" % sot.id body = {"removeProjectAccess": {"project": "a"}} self.sess.post.assert_called_with(url, json=body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/test_type_encryption.py0000664000175000017500000000466300000000000030362 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v3 import type from openstack.tests.unit import base FAKE_ID = "479394ab-2f25-416e-8f58-721d8e5e29de" TYPE_ID = "22373aed-c4a8-4072-b66c-bf0a90dc9a12" TYPE_ENC = { "key_size": 256, "volume_type_id": TYPE_ID, "encryption_id": FAKE_ID, "provider": "nova.volume.encryptors.luks.LuksEncryptor", "control_location": "front-end", "cipher": "aes-xts-plain64", "deleted": False, "created_at": "2020-10-07T07:52:30.000000", "updated_at": "2020-10-08T07:42:45.000000", "deleted_at": None, } class TestTypeEncryption(base.TestCase): def test_basic(self): sot = type.TypeEncryption(**TYPE_ENC) self.assertEqual("encryption", sot.resource_key) self.assertEqual("encryption", sot.resources_key) self.assertEqual("/types/%(volume_type_id)s/encryption", sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertFalse(sot.allow_list) self.assertTrue(sot.allow_commit) def test_new(self): sot = type.TypeEncryption.new(encryption_id=FAKE_ID) self.assertEqual(FAKE_ID, sot.encryption_id) def test_create(self): sot = type.TypeEncryption(**TYPE_ENC) self.assertEqual(TYPE_ENC["volume_type_id"], sot.volume_type_id) self.assertEqual(TYPE_ENC["encryption_id"], sot.encryption_id) self.assertEqual(TYPE_ENC["key_size"], sot.key_size) self.assertEqual(TYPE_ENC["provider"], sot.provider) self.assertEqual(TYPE_ENC["control_location"], sot.control_location) self.assertEqual(TYPE_ENC["cipher"], sot.cipher) self.assertEqual(TYPE_ENC["deleted"], sot.deleted) self.assertEqual(TYPE_ENC["created_at"], sot.created_at) self.assertEqual(TYPE_ENC["updated_at"], sot.updated_at) self.assertEqual(TYPE_ENC["deleted_at"], sot.deleted_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/block_storage/v3/test_volume.py0000664000175000017500000005706700000000000026444 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from keystoneauth1 import adapter from openstack.block_storage.v3 import volume from openstack import exceptions from openstack.tests.unit import base FAKE_ID = "6685584b-1eac-4da6-b5c3-555430cf68ff" IMAGE_METADATA = { 'container_format': 'bare', 'min_ram': '64', 'disk_format': 'qcow2', 'image_name': 'TestVM', 'image_id': '625d4f2c-cf67-4af3-afb6-c7220f766947', 'checksum': '64d7c1cd2b6f60c92c14662941cb7913', 'min_disk': '0', 'size': '13167616', } FAKE_HOST = "fake_host@fake_backend#fake_pool" VOLUME = { "status": "creating", "name": "my_volume", "attachments": [], "availability_zone": "nova", "bootable": "false", "created_at": "2015-03-09T12:14:57.233772", "updated_at": None, "description": "something", "volume_type": "some_type", "snapshot_id": "93c2e2aa-7744-4fd6-a31a-80c4726b08d7", "source_volid": None, "imageRef": "some_image", "metadata": {}, "multiattach": False, "volume_image_metadata": IMAGE_METADATA, "id": FAKE_ID, "size": 10, "os-vol-host-attr:host": "127.0.0.1", "os-vol-tenant-attr:tenant_id": "some tenant", "os-vol-mig-status-attr:migstat": "done", "os-vol-mig-status-attr:name_id": "93c2e2aa-7744-4fd6-a31a-80c4726b08d7", "replication_status": "nah", "os-volume-replication:extended_status": "really nah", "consistencygroup_id": "123asf-asdf123", "os-volume-replication:driver_data": "ahasadfasdfasdfasdfsdf", "snapshot_id": "93c2e2aa-7744-4fd6-a31a-80c4726b08d7", "encrypted": "false", "OS-SCH-HNT:scheduler_hints": { "same_host": [ "a0cf03a5-d921-4877-bb5c-86d26cf818e1", "8c19174f-4220-44f0-824a-cd1eeef10287", ] }, } class TestVolume(base.TestCase): def test_basic(self): sot = volume.Volume(VOLUME) self.assertEqual("volume", sot.resource_key) self.assertEqual("volumes", sot.resources_key) self.assertEqual("/volumes", sot.base_path) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { "name": "name", "status": "status", "all_projects": "all_tenants", "project_id": "project_id", "created_at": "created_at", "updated_at": "updated_at", "limit": "limit", "marker": "marker", }, sot._query_mapping._mapping, ) def test_create(self): sot = volume.Volume(**VOLUME) self.assertEqual(VOLUME["id"], sot.id) self.assertEqual(VOLUME["status"], sot.status) self.assertEqual(VOLUME["attachments"], sot.attachments) self.assertEqual(VOLUME["availability_zone"], sot.availability_zone) self.assertFalse(sot.is_bootable) self.assertEqual(VOLUME["created_at"], sot.created_at) self.assertEqual(VOLUME["updated_at"], sot.updated_at) self.assertEqual(VOLUME["description"], sot.description) self.assertEqual(VOLUME["volume_type"], sot.volume_type) self.assertEqual(VOLUME["snapshot_id"], sot.snapshot_id) self.assertEqual(VOLUME["source_volid"], sot.source_volume_id) self.assertEqual(VOLUME["metadata"], sot.metadata) self.assertEqual(VOLUME["multiattach"], sot.is_multiattach) self.assertEqual( VOLUME["volume_image_metadata"], sot.volume_image_metadata ) self.assertEqual(VOLUME["size"], sot.size) self.assertEqual(VOLUME["imageRef"], sot.image_id) self.assertEqual(VOLUME["os-vol-host-attr:host"], sot.host) self.assertEqual( VOLUME["os-vol-tenant-attr:tenant_id"], sot.project_id ) self.assertEqual( VOLUME["os-vol-mig-status-attr:migstat"], sot.migration_status ) self.assertEqual( VOLUME["os-vol-mig-status-attr:name_id"], sot.migration_id ) self.assertEqual(VOLUME["replication_status"], sot.replication_status) self.assertEqual( VOLUME["os-volume-replication:extended_status"], sot.extended_replication_status, ) self.assertEqual( VOLUME["consistencygroup_id"], sot.consistency_group_id ) self.assertEqual( VOLUME["os-volume-replication:driver_data"], sot.replication_driver_data, ) self.assertFalse(sot.is_encrypted) self.assertDictEqual( VOLUME["OS-SCH-HNT:scheduler_hints"], sot.scheduler_hints ) class TestVolumeActions(TestVolume): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.status_code = 200 self.resp.json = mock.Mock(return_value=self.resp.body) self.sess = mock.Mock(spec=adapter.Adapter) self.sess.default_microversion = '3.0' self.sess.post = mock.Mock(return_value=self.resp) self.sess._get_connection = mock.Mock(return_value=self.cloud) def test_extend(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.extend(self.sess, '20')) url = 'volumes/%s/action' % FAKE_ID body = {"os-extend": {"new_size": "20"}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_set_volume_readonly(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.set_readonly(self.sess, True)) url = 'volumes/%s/action' % FAKE_ID body = {'os-update_readonly_flag': {'readonly': True}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_set_volume_readonly_false(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.set_readonly(self.sess, False)) url = 'volumes/%s/action' % FAKE_ID body = {'os-update_readonly_flag': {'readonly': False}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_set_volume_bootable(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.set_bootable_status(self.sess)) url = 'volumes/%s/action' % FAKE_ID body = {'os-set_bootable': {'bootable': True}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_set_volume_bootable_false(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.set_bootable_status(self.sess, False)) url = 'volumes/%s/action' % FAKE_ID body = {'os-set_bootable': {'bootable': False}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_set_image_metadata(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.set_image_metadata(self.sess, {'foo': 'bar'})) url = 'volumes/%s/action' % FAKE_ID body = {'os-set_image_metadata': {'foo': 'bar'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_delete_image_metadata(self): _volume = copy.deepcopy(VOLUME) _volume['metadata'] = { 'foo': 'bar', 'baz': 'wow', } sot = volume.Volume(**_volume) self.assertIsNone(sot.delete_image_metadata(self.sess)) url = 'volumes/%s/action' % FAKE_ID body_a = {'os-unset_image_metadata': 'foo'} body_b = {'os-unset_image_metadata': 'baz'} self.sess.post.assert_has_calls( [ mock.call( url, json=body_a, microversion=sot._max_microversion ), mock.call( url, json=body_b, microversion=sot._max_microversion ), ] ) def test_delete_image_metadata_item(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.delete_image_metadata_item(self.sess, 'foo')) url = 'volumes/%s/action' % FAKE_ID body = {'os-unset_image_metadata': 'foo'} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_reset_status(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.reset_status(self.sess, '1', '2', '3')) url = 'volumes/%s/action' % FAKE_ID body = { 'os-reset_status': { 'status': '1', 'attach_status': '2', 'migration_status': '3', } } self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_reset_status__single_option(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.reset_status(self.sess, status='1')) url = 'volumes/%s/action' % FAKE_ID body = { 'os-reset_status': { 'status': '1', } } self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) @mock.patch( 'openstack.utils.require_microversion', autospec=True, side_effect=[exceptions.SDKException()], ) def test_revert_to_snapshot_before_340(self, mv_mock): sot = volume.Volume(**VOLUME) self.assertRaises( exceptions.SDKException, sot.revert_to_snapshot, self.sess, '1' ) @mock.patch( 'openstack.utils.require_microversion', autospec=True, side_effect=[None], ) def test_revert_to_snapshot_after_340(self, mv_mock): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.revert_to_snapshot(self.sess, '1')) url = 'volumes/%s/action' % FAKE_ID body = {'revert': {'snapshot_id': '1'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) mv_mock.assert_called_with(self.sess, '3.40') def test_attach_instance(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.attach(self.sess, '1', instance='2')) url = 'volumes/%s/action' % FAKE_ID body = {'os-attach': {'mountpoint': '1', 'instance_uuid': '2'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_attach_host(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.attach(self.sess, '1', host_name='2')) url = 'volumes/%s/action' % FAKE_ID body = {'os-attach': {'mountpoint': '1', 'host_name': '2'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_attach_error(self): sot = volume.Volume(**VOLUME) self.assertRaises(ValueError, sot.attach, self.sess, '1') def test_detach(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.detach(self.sess, '1')) url = 'volumes/%s/action' % FAKE_ID body = {'os-detach': {'attachment_id': '1'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_detach_force(self): sot = volume.Volume(**VOLUME) self.assertIsNone( sot.detach(self.sess, '1', force=True, connector={'a': 'b'}) ) url = 'volumes/%s/action' % FAKE_ID body = { 'os-force_detach': {'attachment_id': '1', 'connector': {'a': 'b'}} } self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_unmanage(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.unmanage(self.sess)) url = 'volumes/%s/action' % FAKE_ID body = {'os-unmanage': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_retype(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.retype(self.sess, '1')) url = 'volumes/%s/action' % FAKE_ID body = {'os-retype': {'new_type': '1'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_retype_mp(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.retype(self.sess, '1', migration_policy='2')) url = 'volumes/%s/action' % FAKE_ID body = {'os-retype': {'new_type': '1', 'migration_policy': '2'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_migrate(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.migrate(self.sess, host='1')) url = 'volumes/%s/action' % FAKE_ID body = {'os-migrate_volume': {'host': '1'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_migrate_flags(self): sot = volume.Volume(**VOLUME) self.assertIsNone( sot.migrate( self.sess, host='1', force_host_copy=True, lock_volume=True ) ) url = 'volumes/%s/action' % FAKE_ID body = { 'os-migrate_volume': { 'host': '1', 'force_host_copy': True, 'lock_volume': True, } } self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) @mock.patch( 'openstack.utils.require_microversion', autospec=True, side_effect=[None], ) def test_migrate_cluster(self, mv_mock): sot = volume.Volume(**VOLUME) self.assertIsNone( sot.migrate( self.sess, cluster='1', force_host_copy=True, lock_volume=True ) ) url = 'volumes/%s/action' % FAKE_ID body = { 'os-migrate_volume': { 'cluster': '1', 'force_host_copy': True, 'lock_volume': True, } } self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) mv_mock.assert_called_with(self.sess, '3.16') def test_complete_migration(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.complete_migration(self.sess, new_volume_id='1')) url = 'volumes/%s/action' % FAKE_ID body = { 'os-migrate_volume_completion': {'new_volume': '1', 'error': False} } self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_complete_migration_error(self): sot = volume.Volume(**VOLUME) self.assertIsNone( sot.complete_migration(self.sess, new_volume_id='1', error=True) ) url = 'volumes/%s/action' % FAKE_ID body = { 'os-migrate_volume_completion': {'new_volume': '1', 'error': True} } self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_force_delete(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.force_delete(self.sess)) url = 'volumes/%s/action' % FAKE_ID body = {'os-force_delete': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_upload_image(self): sot = volume.Volume(**VOLUME) self.resp = mock.Mock() self.resp.body = {'os-volume_upload_image': {'a': 'b'}} self.resp.status_code = 200 self.resp.json = mock.Mock(return_value=self.resp.body) self.sess.post = mock.Mock(return_value=self.resp) self.assertDictEqual({'a': 'b'}, sot.upload_to_image(self.sess, '1')) url = 'volumes/%s/action' % FAKE_ID body = {'os-volume_upload_image': {'image_name': '1', 'force': False}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) @mock.patch( 'openstack.utils.require_microversion', autospec=True, side_effect=[None], ) def test_upload_image_args(self, mv_mock): sot = volume.Volume(**VOLUME) self.resp = mock.Mock() self.resp.body = {'os-volume_upload_image': {'a': 'b'}} self.resp.status_code = 200 self.resp.json = mock.Mock(return_value=self.resp.body) self.sess.post = mock.Mock(return_value=self.resp) self.assertDictEqual( {'a': 'b'}, sot.upload_to_image( self.sess, '1', disk_format='2', container_format='3', visibility='4', protected='5', ), ) url = 'volumes/%s/action' % FAKE_ID body = { 'os-volume_upload_image': { 'image_name': '1', 'force': False, 'disk_format': '2', 'container_format': '3', 'visibility': '4', 'protected': '5', } } self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) mv_mock.assert_called_with(self.sess, '3.1') def test_reserve(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.reserve(self.sess)) url = 'volumes/%s/action' % FAKE_ID body = {'os-reserve': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_unreserve(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.unreserve(self.sess)) url = 'volumes/%s/action' % FAKE_ID body = {'os-unreserve': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_begin_detaching(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.begin_detaching(self.sess)) url = 'volumes/%s/action' % FAKE_ID body = {'os-begin_detaching': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_abort_detaching(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.abort_detaching(self.sess)) url = 'volumes/%s/action' % FAKE_ID body = {'os-roll_detaching': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_init_attachment(self): sot = volume.Volume(**VOLUME) self.resp = mock.Mock() self.resp.body = {'connection_info': {'c': 'd'}} self.resp.status_code = 200 self.resp.json = mock.Mock(return_value=self.resp.body) self.sess.post = mock.Mock(return_value=self.resp) self.assertEqual( {'c': 'd'}, sot.init_attachment(self.sess, {'a': 'b'}) ) url = 'volumes/%s/action' % FAKE_ID body = {'os-initialize_connection': {'connector': {'a': 'b'}}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test_terminate_attachment(self): sot = volume.Volume(**VOLUME) self.assertIsNone(sot.terminate_attachment(self.sess, {'a': 'b'})) url = 'volumes/%s/action' % FAKE_ID body = {'os-terminate_connection': {'connector': {'a': 'b'}}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) def test__prepare_request_body(self): sot = volume.Volume(**VOLUME) body = sot._prepare_request_body(patch=False, prepend_key=True) original_body = copy.deepcopy(sot._body.dirty) # Verify that scheduler hints aren't modified after preparing request # but also not part of 'volume' JSON object self.assertEqual( original_body['OS-SCH-HNT:scheduler_hints'], body['OS-SCH-HNT:scheduler_hints'], ) # Pop scheduler hints to verify other parameters in body original_body.pop('OS-SCH-HNT:scheduler_hints') # Verify that other request parameters are same but in 'volume' JSON self.assertEqual(original_body, body['volume']) def test_create_scheduler_hints(self): sot = volume.Volume(**VOLUME) sot._translate_response = mock.Mock() sot.create(self.sess) url = '/volumes' volume_body = copy.deepcopy(VOLUME) scheduler_hints = volume_body.pop('OS-SCH-HNT:scheduler_hints') body = { "volume": volume_body, 'OS-SCH-HNT:scheduler_hints': scheduler_hints, } self.sess.post.assert_called_with( url, json=body, microversion='3.0', headers={}, params={}, ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=True, ) def test_manage(self, mock_mv): resp = mock.Mock() resp.body = {'volume': copy.deepcopy(VOLUME)} resp.json = mock.Mock(return_value=resp.body) resp.headers = {} resp.status_code = 202 self.sess.post = mock.Mock(return_value=resp) sot = volume.Volume.manage(self.sess, host=FAKE_HOST, ref=FAKE_ID) self.assertIsNotNone(sot) url = '/manageable_volumes' body = { 'volume': { 'host': FAKE_HOST, 'ref': FAKE_ID, 'name': None, 'description': None, 'volume_type': None, 'availability_zone': None, 'metadata': None, 'bootable': False, } } self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=False, ) def test_manage_pre_38(self, mock_mv): resp = mock.Mock() resp.body = {'volume': copy.deepcopy(VOLUME)} resp.json = mock.Mock(return_value=resp.body) resp.headers = {} resp.status_code = 202 self.sess.post = mock.Mock(return_value=resp) sot = volume.Volume.manage(self.sess, host=FAKE_HOST, ref=FAKE_ID) self.assertIsNotNone(sot) url = '/os-volume-manage' body = { 'volume': { 'host': FAKE_HOST, 'ref': FAKE_ID, 'name': None, 'description': None, 'volume_type': None, 'availability_zone': None, 'metadata': None, 'bootable': False, } } self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) ././@PaxHeader0000000000000000000000000000003100000000000011447 xustar000000000000000025 mtime=1725296385.4254 openstacksdk-4.0.0/openstack/tests/unit/cloud/0000775000175000017500000000000000000000000021445 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/__init__.py0000664000175000017500000000000000000000000023544 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test__utils.py0000664000175000017500000003370600000000000024366 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from uuid import uuid4 import testtools from openstack.cloud import _utils from openstack import exceptions from openstack.tests.unit import base RANGE_DATA = [ dict(id=1, key1=1, key2=5), dict(id=2, key1=1, key2=20), dict(id=3, key1=2, key2=10), dict(id=4, key1=2, key2=30), dict(id=5, key1=3, key2=40), dict(id=6, key1=3, key2=40), ] class TestUtils(base.TestCase): def test__filter_list_name_or_id(self): el1 = dict(id=100, name='donald') el2 = dict(id=200, name='pluto') data = [el1, el2] ret = _utils._filter_list(data, 'donald', None) self.assertEqual([el1], ret) def test__filter_list_name_or_id_special(self): el1 = dict(id=100, name='donald') el2 = dict(id=200, name='pluto[2017-01-10]') data = [el1, el2] ret = _utils._filter_list(data, 'pluto[2017-01-10]', None) self.assertEqual([el2], ret) def test__filter_list_name_or_id_partial_bad(self): el1 = dict(id=100, name='donald') el2 = dict(id=200, name='pluto[2017-01-10]') data = [el1, el2] ret = _utils._filter_list(data, 'pluto[2017-01]', None) self.assertEqual([], ret) def test__filter_list_name_or_id_partial_glob(self): el1 = dict(id=100, name='donald') el2 = dict(id=200, name='pluto[2017-01-10]') data = [el1, el2] ret = _utils._filter_list(data, 'pluto*', None) self.assertEqual([el2], ret) def test__filter_list_name_or_id_non_glob_glob(self): el1 = dict(id=100, name='donald') el2 = dict(id=200, name='pluto[2017-01-10]') data = [el1, el2] ret = _utils._filter_list(data, 'pluto', None) self.assertEqual([], ret) def test__filter_list_name_or_id_glob(self): el1 = dict(id=100, name='donald') el2 = dict(id=200, name='pluto') el3 = dict(id=200, name='pluto-2') data = [el1, el2, el3] ret = _utils._filter_list(data, 'pluto*', None) self.assertEqual([el2, el3], ret) def test__filter_list_name_or_id_glob_not_found(self): el1 = dict(id=100, name='donald') el2 = dict(id=200, name='pluto') el3 = dict(id=200, name='pluto-2') data = [el1, el2, el3] ret = _utils._filter_list(data, 'q*', None) self.assertEqual([], ret) def test__filter_list_unicode(self): el1 = dict( id=100, name='中文', last='duck', other=dict(category='duck', financial=dict(status='poor')), ) el2 = dict( id=200, name='中文', last='trump', other=dict(category='human', financial=dict(status='rich')), ) el3 = dict( id=300, name='donald', last='ronald mac', other=dict(category='clown', financial=dict(status='rich')), ) data = [el1, el2, el3] ret = _utils._filter_list( data, '中文', {'other': {'financial': {'status': 'rich'}}} ) self.assertEqual([el2], ret) def test__filter_list_filter(self): el1 = dict(id=100, name='donald', other='duck') el2 = dict(id=200, name='donald', other='trump') data = [el1, el2] ret = _utils._filter_list(data, 'donald', {'other': 'duck'}) self.assertEqual([el1], ret) def test__filter_list_filter_jmespath(self): el1 = dict(id=100, name='donald', other='duck') el2 = dict(id=200, name='donald', other='trump') data = [el1, el2] ret = _utils._filter_list(data, 'donald', "[?other == `duck`]") self.assertEqual([el1], ret) def test__filter_list_dict1(self): el1 = dict( id=100, name='donald', last='duck', other=dict(category='duck') ) el2 = dict( id=200, name='donald', last='trump', other=dict(category='human') ) el3 = dict( id=300, name='donald', last='ronald mac', other=dict(category='clown'), ) data = [el1, el2, el3] ret = _utils._filter_list( data, 'donald', {'other': {'category': 'clown'}} ) self.assertEqual([el3], ret) def test__filter_list_dict2(self): el1 = dict( id=100, name='donald', last='duck', other=dict(category='duck', financial=dict(status='poor')), ) el2 = dict( id=200, name='donald', last='trump', other=dict(category='human', financial=dict(status='rich')), ) el3 = dict( id=300, name='donald', last='ronald mac', other=dict(category='clown', financial=dict(status='rich')), ) data = [el1, el2, el3] ret = _utils._filter_list( data, 'donald', {'other': {'financial': {'status': 'rich'}}} ) self.assertEqual([el2, el3], ret) def test_safe_dict_min_ints(self): """Test integer comparison""" data = [{'f1': 3}, {'f1': 2}, {'f1': 1}] retval = _utils.safe_dict_min('f1', data) self.assertEqual(1, retval) def test_safe_dict_min_strs(self): """Test integer as strings comparison""" data = [{'f1': '3'}, {'f1': '2'}, {'f1': '1'}] retval = _utils.safe_dict_min('f1', data) self.assertEqual(1, retval) def test_safe_dict_min_None(self): """Test None values""" data = [{'f1': 3}, {'f1': None}, {'f1': 1}] retval = _utils.safe_dict_min('f1', data) self.assertEqual(1, retval) def test_safe_dict_min_key_missing(self): """Test missing key for an entry still works""" data = [{'f1': 3}, {'x': 2}, {'f1': 1}] retval = _utils.safe_dict_min('f1', data) self.assertEqual(1, retval) def test_safe_dict_min_key_not_found(self): """Test key not found in any elements returns None""" data = [{'f1': 3}, {'f1': 2}, {'f1': 1}] retval = _utils.safe_dict_min('doesnotexist', data) self.assertIsNone(retval) def test_safe_dict_min_not_int(self): """Test non-integer key value raises OSCE""" data = [{'f1': 3}, {'f1': "aaa"}, {'f1': 1}] with testtools.ExpectedException( exceptions.SDKException, "Search for minimum value failed. " "Value for f1 is not an integer: aaa", ): _utils.safe_dict_min('f1', data) def test_safe_dict_max_ints(self): """Test integer comparison""" data = [{'f1': 3}, {'f1': 2}, {'f1': 1}] retval = _utils.safe_dict_max('f1', data) self.assertEqual(3, retval) def test_safe_dict_max_strs(self): """Test integer as strings comparison""" data = [{'f1': '3'}, {'f1': '2'}, {'f1': '1'}] retval = _utils.safe_dict_max('f1', data) self.assertEqual(3, retval) def test_safe_dict_max_None(self): """Test None values""" data = [{'f1': 3}, {'f1': None}, {'f1': 1}] retval = _utils.safe_dict_max('f1', data) self.assertEqual(3, retval) def test_safe_dict_max_key_missing(self): """Test missing key for an entry still works""" data = [{'f1': 3}, {'x': 2}, {'f1': 1}] retval = _utils.safe_dict_max('f1', data) self.assertEqual(3, retval) def test_safe_dict_max_key_not_found(self): """Test key not found in any elements returns None""" data = [{'f1': 3}, {'f1': 2}, {'f1': 1}] retval = _utils.safe_dict_max('doesnotexist', data) self.assertIsNone(retval) def test_safe_dict_max_not_int(self): """Test non-integer key value raises OSCE""" data = [{'f1': 3}, {'f1': "aaa"}, {'f1': 1}] with testtools.ExpectedException( exceptions.SDKException, "Search for maximum value failed. " "Value for f1 is not an integer: aaa", ): _utils.safe_dict_max('f1', data) def test_parse_range_None(self): self.assertIsNone(_utils.parse_range(None)) def test_parse_range_invalid(self): self.assertIsNone(_utils.parse_range("1024") self.assertIsInstance(retval, tuple) self.assertEqual(">", retval[0]) self.assertEqual(1024, retval[1]) def test_parse_range_le(self): retval = _utils.parse_range("<=1024") self.assertIsInstance(retval, tuple) self.assertEqual("<=", retval[0]) self.assertEqual(1024, retval[1]) def test_parse_range_ge(self): retval = _utils.parse_range(">=1024") self.assertIsInstance(retval, tuple) self.assertEqual(">=", retval[0]) self.assertEqual(1024, retval[1]) def test_range_filter_min(self): retval = _utils.range_filter(RANGE_DATA, "key1", "min") self.assertIsInstance(retval, list) self.assertEqual(2, len(retval)) self.assertEqual(RANGE_DATA[:2], retval) def test_range_filter_max(self): retval = _utils.range_filter(RANGE_DATA, "key1", "max") self.assertIsInstance(retval, list) self.assertEqual(2, len(retval)) self.assertEqual(RANGE_DATA[-2:], retval) def test_range_filter_range(self): retval = _utils.range_filter(RANGE_DATA, "key1", "<3") self.assertIsInstance(retval, list) self.assertEqual(4, len(retval)) self.assertEqual(RANGE_DATA[:4], retval) def test_range_filter_exact(self): retval = _utils.range_filter(RANGE_DATA, "key1", "2") self.assertIsInstance(retval, list) self.assertEqual(2, len(retval)) self.assertEqual(RANGE_DATA[2:4], retval) def test_range_filter_invalid_int(self): with testtools.ExpectedException( exceptions.SDKException, "Invalid range value: <1A0" ): _utils.range_filter(RANGE_DATA, "key1", "<1A0") def test_range_filter_invalid_op(self): with testtools.ExpectedException( exceptions.SDKException, "Invalid range value: <>100" ): _utils.range_filter(RANGE_DATA, "key1", "<>100") def test_get_entity_pass_object(self): obj = mock.Mock(id=uuid4().hex) self.cloud.use_direct_get = True self.assertEqual(obj, _utils._get_entity(self.cloud, '', obj, {})) def test_get_entity_pass_dict(self): d = dict(id=uuid4().hex) self.cloud.use_direct_get = True self.assertEqual(d, _utils._get_entity(self.cloud, '', d, {})) def test_get_entity_no_use_direct_get(self): # test we are defaulting to the search_ methods # if the use_direct_get flag is set to False(default). uuid = uuid4().hex resource = 'network' func = 'search_%ss' % resource filters = {} with mock.patch.object(self.cloud, func) as search: _utils._get_entity(self.cloud, resource, uuid, filters) search.assert_called_once_with(uuid, filters) def test_get_entity_no_uuid_like(self): # test we are defaulting to the search_ methods # if the name_or_id param is a name(string) but not a uuid. self.cloud.use_direct_get = True name = 'name_no_uuid' resource = 'network' func = 'search_%ss' % resource filters = {} with mock.patch.object(self.cloud, func) as search: _utils._get_entity(self.cloud, resource, name, filters) search.assert_called_once_with(name, filters) def test_get_entity_pass_uuid(self): uuid = uuid4().hex self.cloud.use_direct_get = True resources = [ 'flavor', 'image', 'volume', 'network', 'subnet', 'port', 'floating_ip', 'security_group', ] for r in resources: f = 'get_%s_by_id' % r with mock.patch.object(self.cloud, f) as get: _utils._get_entity(self.cloud, r, uuid, {}) get.assert_called_once_with(uuid) def test_get_entity_pass_search_methods(self): self.cloud.use_direct_get = True resources = [ 'flavor', 'image', 'volume', 'network', 'subnet', 'port', 'floating_ip', 'security_group', ] filters = {} name = 'name_no_uuid' for r in resources: f = 'search_%ss' % r with mock.patch.object(self.cloud, f) as search: _utils._get_entity(self.cloud, r, name, {}) search.assert_called_once_with(name, filters) def test_get_entity_get_and_search(self): resources = [ 'flavor', 'image', 'volume', 'network', 'subnet', 'port', 'floating_ip', 'security_group', ] for r in resources: self.assertTrue(hasattr(self.cloud, 'get_%s_by_id' % r)) self.assertTrue(hasattr(self.cloud, 'search_%ss' % r)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_accelerator.py0000664000175000017500000003140200000000000025342 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid from openstack.tests.unit import base DEP_UUID = uuid.uuid4().hex DEP_DICT = { 'uuid': DEP_UUID, 'name': 'dep_name', 'parent_id': None, 'root_id': 1, 'num_accelerators': 4, 'device_id': 0, } DEV_UUID = uuid.uuid4().hex DEV_DICT = { 'id': 1, 'uuid': DEV_UUID, 'name': 'dev_name', 'type': 'test_type', 'vendor': '0x8086', 'model': 'test_model', 'std_board_info': '{"product_id": "0x09c4"}', 'vendor_board_info': 'test_vb_info', } DEV_PROF_UUID = uuid.uuid4().hex DEV_PROF_GROUPS = [ { "resources:ACCELERATOR_FPGA": "1", "trait:CUSTOM_FPGA_INTEL_PAC_ARRIA10": "required", "trait:CUSTOM_FUNCTION_ID_3AFB": "required", }, { "resources:CUSTOM_ACCELERATOR_FOO": "2", "resources:CUSTOM_MEMORY": "200", "trait:CUSTOM_TRAIT_ALWAYS": "required", }, ] DEV_PROF_DICT = { "id": 1, "uuid": DEV_PROF_UUID, "name": 'afaas_example_1', "groups": DEV_PROF_GROUPS, } NEW_DEV_PROF_DICT = copy.copy(DEV_PROF_DICT) ARQ_UUID = uuid.uuid4().hex ARQ_DEV_RP_UUID = uuid.uuid4().hex ARQ_INSTANCE_UUID = uuid.uuid4().hex ARQ_ATTACH_INFO_STR = ( '{"bus": "5e", "device": "00", "domain": "0000", "function": "1"}' ) ARQ_DICT = { 'uuid': ARQ_UUID, 'hostname': 'test_hostname', 'device_profile_name': 'fake-devprof', 'device_profile_group_id': 0, 'device_rp_uuid': ARQ_DEV_RP_UUID, 'instance_uuid': ARQ_INSTANCE_UUID, 'attach_handle_type': 'PCI', 'attach_handle_info': ARQ_ATTACH_INFO_STR, } NEW_ARQ_DICT = copy.copy(ARQ_DICT) class TestAccelerator(base.TestCase): def setUp(self): super().setUp() self.use_cyborg() def test_list_deployables(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'accelerator', 'public', append=['v2', 'deployables'] ), json={'deployables': [DEP_DICT]}, ), ] ) dep_list = self.cloud.list_deployables() self.assertEqual(len(dep_list), 1) self.assertEqual(dep_list[0].id, DEP_DICT['uuid']) self.assertEqual(dep_list[0].name, DEP_DICT['name']) self.assertEqual(dep_list[0].parent_id, DEP_DICT['parent_id']) self.assertEqual(dep_list[0].root_id, DEP_DICT['root_id']) self.assertEqual( dep_list[0].num_accelerators, DEP_DICT['num_accelerators'] ) self.assertEqual(dep_list[0].device_id, DEP_DICT['device_id']) self.assert_calls() def test_list_devices(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'accelerator', 'public', append=['v2', 'devices'] ), json={'devices': [DEV_DICT]}, ), ] ) dev_list = self.cloud.list_devices() self.assertEqual(len(dev_list), 1) self.assertEqual(dev_list[0].id, DEV_DICT['id']) self.assertEqual(dev_list[0].uuid, DEV_DICT['uuid']) self.assertEqual(dev_list[0].name, DEV_DICT['name']) self.assertEqual(dev_list[0].type, DEV_DICT['type']) self.assertEqual(dev_list[0].vendor, DEV_DICT['vendor']) self.assertEqual(dev_list[0].model, DEV_DICT['model']) self.assertEqual( dev_list[0].std_board_info, DEV_DICT['std_board_info'] ) self.assertEqual( dev_list[0].vendor_board_info, DEV_DICT['vendor_board_info'] ) self.assert_calls() def test_list_device_profiles(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'accelerator', 'public', append=['v2', 'device_profiles'], ), json={'device_profiles': [DEV_PROF_DICT]}, ), ] ) dev_prof_list = self.cloud.list_device_profiles() self.assertEqual(len(dev_prof_list), 1) self.assertEqual(dev_prof_list[0].id, DEV_PROF_DICT['id']) self.assertEqual(dev_prof_list[0].uuid, DEV_PROF_DICT['uuid']) self.assertEqual(dev_prof_list[0].name, DEV_PROF_DICT['name']) self.assertEqual(dev_prof_list[0].groups, DEV_PROF_DICT['groups']) self.assert_calls() def test_create_device_profile(self): self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'accelerator', 'public', append=['v2', 'device_profiles'], ), json=NEW_DEV_PROF_DICT, ) ] ) attrs = { 'name': NEW_DEV_PROF_DICT['name'], 'groups': NEW_DEV_PROF_DICT['groups'], } self.assertTrue(self.cloud.create_device_profile(attrs)) self.assert_calls() def test_delete_device_profile(self, filters=None): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'accelerator', 'public', append=[ 'v2', 'device_profiles', DEV_PROF_DICT['name'], ], ), json={"device_profiles": [DEV_PROF_DICT]}, ), dict( method='DELETE', uri=self.get_mock_url( 'accelerator', 'public', append=[ 'v2', 'device_profiles', DEV_PROF_DICT['name'], ], ), json=DEV_PROF_DICT, ), ] ) self.assertTrue( self.cloud.delete_device_profile(DEV_PROF_DICT['name'], filters) ) self.assert_calls() def test_list_accelerator_requests(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'accelerator', 'public', append=['v2', 'accelerator_requests'], ), json={'arqs': [ARQ_DICT]}, ), ] ) arq_list = self.cloud.list_accelerator_requests() self.assertEqual(len(arq_list), 1) self.assertEqual(arq_list[0].uuid, ARQ_DICT['uuid']) self.assertEqual( arq_list[0].device_profile_name, ARQ_DICT['device_profile_name'] ) self.assertEqual( arq_list[0].device_profile_group_id, ARQ_DICT['device_profile_group_id'], ) self.assertEqual( arq_list[0].device_rp_uuid, ARQ_DICT['device_rp_uuid'] ) self.assertEqual(arq_list[0].instance_uuid, ARQ_DICT['instance_uuid']) self.assertEqual( arq_list[0].attach_handle_type, ARQ_DICT['attach_handle_type'] ) self.assertEqual( arq_list[0].attach_handle_info, ARQ_DICT['attach_handle_info'] ) self.assert_calls() def test_create_accelerator_request(self): self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'accelerator', 'public', append=['v2', 'accelerator_requests'], ), json=NEW_ARQ_DICT, ), ] ) attrs = { 'device_profile_name': NEW_ARQ_DICT['device_profile_name'], 'device_profile_group_id': NEW_ARQ_DICT['device_profile_group_id'], } self.assertTrue(self.cloud.create_accelerator_request(attrs)) self.assert_calls() def test_delete_accelerator_request(self, filters=None): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'accelerator', 'public', append=[ 'v2', 'accelerator_requests', ARQ_DICT['uuid'], ], ), json={"accelerator_requests": [ARQ_DICT]}, ), dict( method='DELETE', uri=self.get_mock_url( 'accelerator', 'public', append=[ 'v2', 'accelerator_requests', ARQ_DICT['uuid'], ], ), json=ARQ_DICT, ), ] ) self.assertTrue( self.cloud.delete_accelerator_request(ARQ_DICT['uuid'], filters) ) self.assert_calls() def test_bind_accelerator_request(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'accelerator', 'public', append=[ 'v2', 'accelerator_requests', ARQ_DICT['uuid'], ], ), json={"accelerator_requests": [ARQ_DICT]}, ), dict( method='PATCH', uri=self.get_mock_url( 'accelerator', 'public', append=[ 'v2', 'accelerator_requests', ARQ_DICT['uuid'], ], ), json=ARQ_DICT, ), ] ) properties = [ {'path': '/hostname', 'value': ARQ_DICT['hostname'], 'op': 'add'}, { 'path': '/instance_uuid', 'value': ARQ_DICT['instance_uuid'], 'op': 'add', }, { 'path': '/device_rp_uuid', 'value': ARQ_DICT['device_rp_uuid'], 'op': 'add', }, ] self.assertTrue( self.cloud.bind_accelerator_request(ARQ_DICT['uuid'], properties) ) self.assert_calls() def test_unbind_accelerator_request(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'accelerator', 'public', append=[ 'v2', 'accelerator_requests', ARQ_DICT['uuid'], ], ), json={"accelerator_requests": [ARQ_DICT]}, ), dict( method='PATCH', uri=self.get_mock_url( 'accelerator', 'public', append=[ 'v2', 'accelerator_requests', ARQ_DICT['uuid'], ], ), json=ARQ_DICT, ), ] ) properties = [ {'path': '/hostname', 'op': 'remove'}, {'path': '/instance_uuid', 'op': 'remove'}, {'path': '/device_rp_uuid', 'op': 'remove'}, ] self.assertTrue( self.cloud.unbind_accelerator_request(ARQ_DICT['uuid'], properties) ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_aggregate.py0000664000175000017500000002163600000000000025014 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests import fakes from openstack.tests.unit import base class TestAggregate(base.TestCase): def setUp(self): super().setUp() self.aggregate_name = self.getUniqueString('aggregate') self.fake_aggregate = fakes.make_fake_aggregate(1, self.aggregate_name) self.use_compute_discovery() def test_create_aggregate(self): create_aggregate = self.fake_aggregate.copy() del create_aggregate['metadata'] del create_aggregate['hosts'] self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['os-aggregates'] ), json={'aggregate': create_aggregate}, validate=dict( json={ 'aggregate': { 'name': self.aggregate_name, 'availability_zone': None, } } ), ), ] ) self.cloud.create_aggregate(name=self.aggregate_name) self.assert_calls() def test_create_aggregate_with_az(self): availability_zone = 'az1' az_aggregate = fakes.make_fake_aggregate( 1, self.aggregate_name, availability_zone=availability_zone ) create_aggregate = az_aggregate.copy() del create_aggregate['metadata'] del create_aggregate['hosts'] self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['os-aggregates'] ), json={'aggregate': create_aggregate}, validate=dict( json={ 'aggregate': { 'name': self.aggregate_name, 'availability_zone': availability_zone, } } ), ), ] ) self.cloud.create_aggregate( name=self.aggregate_name, availability_zone=availability_zone ) self.assert_calls() def test_delete_aggregate(self): self.register_uris( [ dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=['os-aggregates', '1'] ), ), ] ) self.assertTrue(self.cloud.delete_aggregate('1')) self.assert_calls() def test_delete_aggregate_by_name(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['os-aggregates', self.aggregate_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['os-aggregates'] ), json={'aggregates': [self.fake_aggregate]}, ), dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=['os-aggregates', '1'] ), ), ] ) self.assertTrue(self.cloud.delete_aggregate(self.aggregate_name)) self.assert_calls() def test_update_aggregate_set_az(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['os-aggregates', '1'] ), json=self.fake_aggregate, ), dict( method='PUT', uri=self.get_mock_url( 'compute', 'public', append=['os-aggregates', '1'] ), json={'aggregate': self.fake_aggregate}, validate=dict( json={ 'aggregate': { 'availability_zone': 'az', } } ), ), ] ) self.cloud.update_aggregate(1, availability_zone='az') self.assert_calls() def test_update_aggregate_unset_az(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['os-aggregates', '1'] ), json=self.fake_aggregate, ), dict( method='PUT', uri=self.get_mock_url( 'compute', 'public', append=['os-aggregates', '1'] ), json={'aggregate': self.fake_aggregate}, validate=dict( json={ 'aggregate': { 'availability_zone': None, } } ), ), ] ) self.cloud.update_aggregate(1, availability_zone=None) self.assert_calls() def test_set_aggregate_metadata(self): metadata = {'key': 'value'} self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['os-aggregates', '1'] ), json=self.fake_aggregate, ), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['os-aggregates', '1', 'action'], ), json={'aggregate': self.fake_aggregate}, validate=dict( json={'set_metadata': {'metadata': metadata}} ), ), ] ) self.cloud.set_aggregate_metadata('1', metadata) self.assert_calls() def test_add_host_to_aggregate(self): hostname = 'host1' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['os-aggregates', '1'] ), json=self.fake_aggregate, ), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['os-aggregates', '1', 'action'], ), json={'aggregate': self.fake_aggregate}, validate=dict(json={'add_host': {'host': hostname}}), ), ] ) self.cloud.add_host_to_aggregate('1', hostname) self.assert_calls() def test_remove_host_from_aggregate(self): hostname = 'host1' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['os-aggregates', '1'] ), json=self.fake_aggregate, ), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['os-aggregates', '1', 'action'], ), json={'aggregate': self.fake_aggregate}, validate=dict(json={'remove_host': {'host': hostname}}), ), ] ) self.cloud.remove_host_from_aggregate('1', hostname) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_availability_zones.py0000664000175000017500000000460400000000000026752 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Red Hat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests import fakes from openstack.tests.unit import base _fake_zone_list = { "availabilityZoneInfo": [ {"hosts": None, "zoneName": "az1", "zoneState": {"available": True}}, {"hosts": None, "zoneName": "nova", "zoneState": {"available": False}}, ] } class TestAvailabilityZoneNames(base.TestCase): def test_list_availability_zone_names(self): self.register_uris( [ dict( method='GET', uri='{endpoint}/os-availability-zone'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json=_fake_zone_list, ), ] ) self.assertEqual(['az1'], self.cloud.list_availability_zone_names()) self.assert_calls() def test_unauthorized_availability_zone_names(self): self.register_uris( [ dict( method='GET', uri='{endpoint}/os-availability-zone'.format( endpoint=fakes.COMPUTE_ENDPOINT ), status_code=403, ), ] ) self.assertEqual([], self.cloud.list_availability_zone_names()) self.assert_calls() def test_list_all_availability_zone_names(self): self.register_uris( [ dict( method='GET', uri='{endpoint}/os-availability-zone'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json=_fake_zone_list, ), ] ) self.assertEqual( ['az1', 'nova'], self.cloud.list_availability_zone_names(unavailable=True), ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_baremetal_node.py0000664000175000017500000024647600000000000026042 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_baremetal_node ---------------------------------- Tests for baremetal node related operations """ import uuid from testscenarios import load_tests_apply_scenarios as load_tests # noqa from openstack import exceptions from openstack.network.v2 import port as _port from openstack.tests import fakes from openstack.tests.unit import base class TestBaremetalNode(base.IronicTestCase): def setUp(self): super().setUp() self.fake_baremetal_node = fakes.make_fake_machine( self.name, self.uuid ) # TODO(TheJulia): Some tests below have fake ports, # since they are required in some processes. Lets refactor # them at some point to use self.fake_baremetal_port. self.fake_baremetal_port = fakes.make_fake_port( '00:01:02:03:04:05', node_id=self.uuid ) def test_list_machines(self): fake_baremetal_two = fakes.make_fake_machine('two', str(uuid.uuid4())) self.register_uris( [ dict( method='GET', uri=self.get_mock_url(resource='nodes'), json={ 'nodes': [self.fake_baremetal_node, fake_baremetal_two] }, ), ] ) machines = self.cloud.list_machines() self.assertEqual(2, len(machines)) self.assertSubdict(self.fake_baremetal_node, machines[0]) self.assertSubdict(fake_baremetal_two, machines[1]) self.assert_calls() def test_get_machine(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) machine = self.cloud.get_machine(self.fake_baremetal_node['uuid']) self.assertEqual(machine['uuid'], self.fake_baremetal_node['uuid']) self.assert_calls() def test_get_machine_by_mac(self): mac_address = '00:01:02:03:04:05' node_uuid = self.fake_baremetal_node['uuid'] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='ports', append=['detail'], qs_elements=['address=%s' % mac_address], ), json={ 'ports': [ {'address': mac_address, 'node_uuid': node_uuid} ] }, ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) machine = self.cloud.get_machine_by_mac(mac_address) self.assertEqual(machine['uuid'], self.fake_baremetal_node['uuid']) self.assert_calls() def test_validate_machine(self): # NOTE(TheJulia): Note: These are only the interfaces # that are validated, and all must be true for an # exception to not be raised. validate_return = { 'boot': { 'result': True, }, 'deploy': { 'result': True, }, 'management': { 'result': True, }, 'power': { 'result': True, }, 'foo': { 'result': False, }, } self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid'], 'validate'], ), json=validate_return, ), ] ) self.cloud.validate_machine(self.fake_baremetal_node['uuid']) self.assert_calls() def test_validate_machine_not_for_deploy(self): validate_return = { 'deploy': { 'result': False, 'reason': 'Not ready', }, 'power': { 'result': True, }, 'foo': { 'result': False, }, } self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid'], 'validate'], ), json=validate_return, ), ] ) self.cloud.validate_machine( self.fake_baremetal_node['uuid'], for_deploy=False ) self.assert_calls() def test_deprecated_validate_node(self): validate_return = { 'deploy': { 'result': True, }, 'power': { 'result': True, }, 'foo': { 'result': False, }, } self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid'], 'validate'], ), json=validate_return, ), ] ) self.cloud.validate_node(self.fake_baremetal_node['uuid']) self.assert_calls() def test_validate_machine_raises_exception(self): validate_return = { 'deploy': { 'result': False, 'reason': 'error!', }, 'power': { 'result': True, 'reason': None, }, 'foo': {'result': True}, } self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid'], 'validate'], ), json=validate_return, ), ] ) self.assertRaises( exceptions.ValidationException, self.cloud.validate_machine, self.fake_baremetal_node['uuid'], ) self.assert_calls() def test_patch_machine(self): test_patch = [{'op': 'remove', 'path': '/instance_info'}] self.fake_baremetal_node['instance_info'] = {} self.register_uris( [ dict( method='PATCH', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, validate=dict(json=test_patch), ), ] ) result = self.cloud.patch_machine( self.fake_baremetal_node['uuid'], test_patch ) self.assertEqual(self.fake_baremetal_node['uuid'], result['uuid']) self.assert_calls() def test_set_node_instance_info(self): test_patch = [{'op': 'add', 'path': '/foo', 'value': 'bar'}] self.register_uris( [ dict( method='PATCH', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, validate=dict(json=test_patch), ), ] ) self.cloud.set_node_instance_info( self.fake_baremetal_node['uuid'], test_patch ) self.assert_calls() def test_purge_node_instance_info(self): test_patch = [{'op': 'remove', 'path': '/instance_info'}] self.fake_baremetal_node['instance_info'] = {} self.register_uris( [ dict( method='PATCH', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, validate=dict(json=test_patch), ), ] ) self.cloud.purge_node_instance_info(self.fake_baremetal_node['uuid']) self.assert_calls() def test_inspect_machine_fail_active(self): self.fake_baremetal_node['provision_state'] = 'active' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.inspect_machine, self.fake_baremetal_node['uuid'], wait=True, timeout=1, ) self.assert_calls() def test_inspect_machine_fail_associated(self): self.fake_baremetal_node['provision_state'] = 'available' self.fake_baremetal_node['instance_uuid'] = '1234' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) self.assertRaisesRegex( exceptions.SDKException, 'associated with an instance', self.cloud.inspect_machine, self.fake_baremetal_node['uuid'], wait=True, timeout=1, ) self.assert_calls() def test_inspect_machine_failed(self): inspecting_node = self.fake_baremetal_node.copy() self.fake_baremetal_node['provision_state'] = 'inspect failed' self.fake_baremetal_node['last_error'] = 'kaboom!' inspecting_node['provision_state'] = 'inspecting' finished_node = self.fake_baremetal_node.copy() finished_node['provision_state'] = 'manageable' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'inspect'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=inspecting_node, ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=finished_node, ), ] ) self.cloud.inspect_machine(self.fake_baremetal_node['uuid']) self.assert_calls() def test_inspect_machine_manageable(self): self.fake_baremetal_node['provision_state'] = 'manageable' inspecting_node = self.fake_baremetal_node.copy() inspecting_node['provision_state'] = 'inspecting' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'inspect'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=inspecting_node, ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) self.cloud.inspect_machine(self.fake_baremetal_node['uuid']) self.assert_calls() def test_inspect_machine_available(self): available_node = self.fake_baremetal_node.copy() available_node['provision_state'] = 'available' manageable_node = self.fake_baremetal_node.copy() manageable_node['provision_state'] = 'manageable' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=available_node, ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'manage'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=manageable_node, ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'inspect'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=manageable_node, ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'provide'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=available_node, ), ] ) self.cloud.inspect_machine(self.fake_baremetal_node['uuid']) self.assert_calls() def test_inspect_machine_available_wait(self): available_node = self.fake_baremetal_node.copy() available_node['provision_state'] = 'available' manageable_node = self.fake_baremetal_node.copy() manageable_node['provision_state'] = 'manageable' inspecting_node = self.fake_baremetal_node.copy() inspecting_node['provision_state'] = 'inspecting' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=available_node, ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'manage'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=available_node, ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=manageable_node, ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'inspect'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=inspecting_node, ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=manageable_node, ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'provide'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=available_node, ), ] ) self.cloud.inspect_machine( self.fake_baremetal_node['uuid'], wait=True, timeout=1 ) self.assert_calls() def test_inspect_machine_wait(self): self.fake_baremetal_node['provision_state'] = 'manageable' inspecting_node = self.fake_baremetal_node.copy() inspecting_node['provision_state'] = 'inspecting' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'inspect'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=inspecting_node, ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=inspecting_node, ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) self.cloud.inspect_machine( self.fake_baremetal_node['uuid'], wait=True, timeout=1 ) self.assert_calls() def test_inspect_machine_inspect_failed(self): self.fake_baremetal_node['provision_state'] = 'manageable' inspecting_node = self.fake_baremetal_node.copy() inspecting_node['provision_state'] = 'inspecting' inspect_fail_node = self.fake_baremetal_node.copy() inspect_fail_node['provision_state'] = 'inspect failed' inspect_fail_node['last_error'] = 'Earth Imploded' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'inspect'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=inspecting_node, ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=inspect_fail_node, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.inspect_machine, self.fake_baremetal_node['uuid'], wait=True, timeout=1, ) self.assert_calls() def test_set_machine_maintenace_state(self): self.register_uris( [ dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'maintenance', ], ), validate=dict(json={'reason': 'no reason'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) self.cloud.set_machine_maintenance_state( self.fake_baremetal_node['uuid'], True, reason='no reason' ) self.assert_calls() def test_set_machine_maintenace_state_false(self): self.register_uris( [ dict( method='DELETE', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'maintenance', ], ), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) self.cloud.set_machine_maintenance_state( self.fake_baremetal_node['uuid'], False ) self.assert_calls def test_remove_machine_from_maintenance(self): self.register_uris( [ dict( method='DELETE', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'maintenance', ], ), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) self.cloud.remove_machine_from_maintenance( self.fake_baremetal_node['uuid'] ) self.assert_calls() def test_set_machine_power_on(self): self.register_uris( [ dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'power', ], ), validate=dict(json={'target': 'power on'}), ), ] ) return_value = self.cloud.set_machine_power_on( self.fake_baremetal_node['uuid'] ) self.assertIsNone(return_value) self.assert_calls() def test_set_machine_power_on_with_retires(self): # NOTE(TheJulia): This logic ends up testing power on/off and reboot # as they all utilize the same helper method. self.register_uris( [ dict( method='PUT', status_code=503, uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'power', ], ), validate=dict(json={'target': 'power on'}), ), dict( method='PUT', status_code=409, uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'power', ], ), validate=dict(json={'target': 'power on'}), ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'power', ], ), validate=dict(json={'target': 'power on'}), ), ] ) return_value = self.cloud.set_machine_power_on( self.fake_baremetal_node['uuid'] ) self.assertIsNone(return_value) self.assert_calls() def test_set_machine_power_off(self): self.register_uris( [ dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'power', ], ), validate=dict(json={'target': 'power off'}), ), ] ) return_value = self.cloud.set_machine_power_off( self.fake_baremetal_node['uuid'] ) self.assertIsNone(return_value) self.assert_calls() def test_set_machine_power_reboot(self): self.register_uris( [ dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'power', ], ), validate=dict(json={'target': 'rebooting'}), ), ] ) return_value = self.cloud.set_machine_power_reboot( self.fake_baremetal_node['uuid'] ) self.assertIsNone(return_value) self.assert_calls() def test_set_machine_power_reboot_failure(self): self.register_uris( [ dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'power', ], ), status_code=400, json={'error': 'invalid'}, validate=dict(json={'target': 'rebooting'}), ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.set_machine_power_reboot, self.fake_baremetal_node['uuid'], ) self.assert_calls() def test_node_set_provision_state(self): deploy_node = self.fake_baremetal_node.copy() deploy_node['provision_state'] = 'deploying' active_node = self.fake_baremetal_node.copy() active_node['provision_state'] = 'active' self.register_uris( [ dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict( json={ 'target': 'active', 'configdrive': 'http://host/file', } ), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) result = self.cloud.node_set_provision_state( self.fake_baremetal_node['uuid'], 'active', configdrive='http://host/file', ) self.assertEqual(self.fake_baremetal_node['uuid'], result['uuid']) self.assert_calls() def test_node_set_provision_state_with_retries(self): deploy_node = self.fake_baremetal_node.copy() deploy_node['provision_state'] = 'deploying' active_node = self.fake_baremetal_node.copy() active_node['provision_state'] = 'active' self.register_uris( [ dict( method='PUT', status_code=409, uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict( json={ 'target': 'active', 'configdrive': 'http://host/file', } ), ), dict( method='PUT', status_code=503, uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict( json={ 'target': 'active', 'configdrive': 'http://host/file', } ), ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict( json={ 'target': 'active', 'configdrive': 'http://host/file', } ), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) self.cloud.node_set_provision_state( self.fake_baremetal_node['uuid'], 'active', configdrive='http://host/file', ) self.assert_calls() def test_node_set_provision_state_wait_timeout(self): deploy_node = self.fake_baremetal_node.copy() deploy_node['provision_state'] = 'deploying' active_node = self.fake_baremetal_node.copy() active_node['provision_state'] = 'active' self.fake_baremetal_node['provision_state'] = 'available' self.register_uris( [ dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'active'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=deploy_node, ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=active_node, ), ] ) return_value = self.cloud.node_set_provision_state( self.fake_baremetal_node['uuid'], 'active', wait=True ) self.assertSubdict(active_node, return_value) self.assert_calls() def test_node_set_provision_state_wait_timeout_fails(self): # Intentionally time out. self.fake_baremetal_node['provision_state'] = 'deploy wait' self.register_uris( [ dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'active'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.node_set_provision_state, self.fake_baremetal_node['uuid'], 'active', wait=True, timeout=0.001, ) self.assert_calls() def test_node_set_provision_state_wait_success(self): self.fake_baremetal_node['provision_state'] = 'active' self.register_uris( [ dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'active'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) return_value = self.cloud.node_set_provision_state( self.fake_baremetal_node['uuid'], 'active', wait=True ) self.assertSubdict(self.fake_baremetal_node, return_value) self.assert_calls() def test_node_set_provision_state_wait_failure_cases(self): self.fake_baremetal_node['provision_state'] = 'foo failed' self.register_uris( [ dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'active'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.node_set_provision_state, self.fake_baremetal_node['uuid'], 'active', wait=True, timeout=300, ) self.assert_calls() def test_node_set_provision_state_wait_provide(self): self.fake_baremetal_node['provision_state'] = 'manageable' available_node = self.fake_baremetal_node.copy() available_node['provision_state'] = 'available' self.register_uris( [ dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'provide'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=available_node, ), ] ) return_value = self.cloud.node_set_provision_state( self.fake_baremetal_node['uuid'], 'provide', wait=True ) self.assertSubdict(available_node, return_value) self.assert_calls() def test_wait_for_baremetal_node_lock_locked(self): self.fake_baremetal_node['reservation'] = 'conductor0' unlocked_node = self.fake_baremetal_node.copy() unlocked_node['reservation'] = None self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=unlocked_node, ), ] ) self.assertIsNone( self.cloud.wait_for_baremetal_node_lock( self.fake_baremetal_node, timeout=1 ) ) self.assert_calls() def test_wait_for_baremetal_node_lock_not_locked(self): self.fake_baremetal_node['reservation'] = None self.assertIsNone( self.cloud.wait_for_baremetal_node_lock( self.fake_baremetal_node, timeout=1 ) ) # NOTE(dtantsur): service discovery apparently requires 3 calls self.assertEqual(3, len(self.adapter.request_history)) def test_wait_for_baremetal_node_lock_timeout(self): self.fake_baremetal_node['reservation'] = 'conductor0' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.wait_for_baremetal_node_lock, self.fake_baremetal_node, timeout=0.001, ) self.assert_calls() def test_activate_node(self): self.fake_baremetal_node['provision_state'] = 'active' self.register_uris( [ dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict( json={ 'target': 'active', 'configdrive': 'http://host/file', } ), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) return_value = self.cloud.activate_node( self.fake_baremetal_node['uuid'], configdrive='http://host/file', wait=True, ) self.assertIsNone(return_value) self.assert_calls() def test_deactivate_node(self): self.fake_baremetal_node['provision_state'] = 'available' self.register_uris( [ dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'deleted'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) return_value = self.cloud.deactivate_node( self.fake_baremetal_node['uuid'], wait=True ) self.assertIsNone(return_value) self.assert_calls() def test_register_machine(self): mac_address = '00:01:02:03:04:05' nics = [{'address': mac_address}] node_uuid = self.fake_baremetal_node['uuid'] # TODO(TheJulia): There is a lot of duplication # in testing creation. Surely this hsould be a helper # or something. We should fix this. node_to_post = { 'driver': None, 'driver_info': None, 'name': self.fake_baremetal_node['name'], 'properties': None, 'uuid': node_uuid, } self.fake_baremetal_node['provision_state'] = 'available' if 'provision_state' in node_to_post: node_to_post.pop('provision_state') self.register_uris( [ dict( method='POST', uri=self.get_mock_url(resource='nodes'), json=self.fake_baremetal_node, validate=dict(json=node_to_post), ), dict( method='POST', uri=self.get_mock_url(resource='ports'), validate=dict( json={'address': mac_address, 'node_uuid': node_uuid} ), json=self.fake_baremetal_port, ), ] ) return_value = self.cloud.register_machine(nics, **node_to_post) self.assertEqual(self.uuid, return_value.id) self.assertSubdict(self.fake_baremetal_node, return_value) self.assert_calls() # TODO(TheJulia): We need to de-duplicate these tests. # Possibly a dedicated class, although we should do it # then as we may find differences that need to be # accounted for newer microversions. def test_register_machine_enroll(self): mac_address = '00:01:02:03:04:05' nics = [{'address': mac_address, 'pxe_enabled': False}] node_uuid = self.fake_baremetal_node['uuid'] node_to_post = { 'chassis_uuid': None, 'driver': None, 'driver_info': None, 'name': self.fake_baremetal_node['name'], 'properties': None, 'uuid': node_uuid, } self.fake_baremetal_node['provision_state'] = 'enroll' manageable_node = self.fake_baremetal_node.copy() manageable_node['provision_state'] = 'manageable' available_node = self.fake_baremetal_node.copy() available_node['provision_state'] = 'available' self.register_uris( [ dict( method='POST', uri=self.get_mock_url(resource='nodes'), validate=dict(json=node_to_post), json=self.fake_baremetal_node, ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'manage'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=manageable_node, ), dict( method='POST', uri=self.get_mock_url(resource='ports'), validate=dict( json={ 'address': mac_address, 'node_uuid': node_uuid, 'pxe_enabled': False, } ), json=self.fake_baremetal_port, ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'provide'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=available_node, ), ] ) return_value = self.cloud.register_machine(nics, **node_to_post) self.assertSubdict(available_node, return_value) self.assert_calls() def test_register_machine_enroll_wait(self): mac_address = self.fake_baremetal_port nics = [{'address': mac_address}] node_uuid = self.fake_baremetal_node['uuid'] node_to_post = { 'chassis_uuid': None, 'driver': None, 'driver_info': None, 'name': self.fake_baremetal_node['name'], 'properties': None, 'uuid': node_uuid, } self.fake_baremetal_node['provision_state'] = 'enroll' manageable_node = self.fake_baremetal_node.copy() manageable_node['provision_state'] = 'manageable' available_node = self.fake_baremetal_node.copy() available_node['provision_state'] = 'available' self.register_uris( [ dict( method='POST', uri=self.get_mock_url(resource='nodes'), validate=dict(json=node_to_post), json=self.fake_baremetal_node, ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'manage'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=manageable_node, ), dict( method='POST', uri=self.get_mock_url(resource='ports'), validate=dict( json={'address': mac_address, 'node_uuid': node_uuid} ), json=self.fake_baremetal_port, ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'provide'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=manageable_node, ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=available_node, ), ] ) return_value = self.cloud.register_machine( nics, wait=True, **node_to_post ) self.assertSubdict(available_node, return_value) self.assert_calls() def test_register_machine_enroll_failure(self): mac_address = '00:01:02:03:04:05' nics = [{'address': mac_address}] node_uuid = self.fake_baremetal_node['uuid'] node_to_post = { 'chassis_uuid': None, 'driver': None, 'driver_info': None, 'name': self.fake_baremetal_node['name'], 'properties': None, 'uuid': node_uuid, } self.fake_baremetal_node['provision_state'] = 'enroll' failed_node = self.fake_baremetal_node.copy() failed_node['reservation'] = 'conductor0' failed_node['provision_state'] = 'enroll' failed_node['last_error'] = 'kaboom!' self.register_uris( [ dict( method='POST', uri=self.get_mock_url(resource='nodes'), json=self.fake_baremetal_node, validate=dict(json=node_to_post), ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'manage'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=failed_node, ), dict( method='DELETE', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.register_machine, nics, **node_to_post ) self.assert_calls() def test_register_machine_enroll_timeout(self): mac_address = '00:01:02:03:04:05' nics = [{'address': mac_address}] node_uuid = self.fake_baremetal_node['uuid'] node_to_post = { 'chassis_uuid': None, 'driver': None, 'driver_info': None, 'name': self.fake_baremetal_node['name'], 'properties': None, 'uuid': node_uuid, } self.fake_baremetal_node['provision_state'] = 'enroll' busy_node = self.fake_baremetal_node.copy() busy_node['reservation'] = 'conductor0' busy_node['provision_state'] = 'verifying' self.register_uris( [ dict( method='POST', uri=self.get_mock_url(resource='nodes'), json=self.fake_baremetal_node, validate=dict(json=node_to_post), ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'manage'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=busy_node, ), dict( method='DELETE', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), ), ] ) # NOTE(TheJulia): This test shortcircuits the timeout loop # such that it executes only once. The very last returned # state to the API is essentially a busy state that we # want to block on until it has cleared. self.assertRaises( exceptions.SDKException, self.cloud.register_machine, nics, timeout=0.001, lock_timeout=0.001, **node_to_post ) self.assert_calls() def test_register_machine_enroll_timeout_wait(self): mac_address = '00:01:02:03:04:05' nics = [{'address': mac_address}] node_uuid = self.fake_baremetal_node['uuid'] node_to_post = { 'chassis_uuid': None, 'driver': None, 'driver_info': None, 'name': self.fake_baremetal_node['name'], 'properties': None, 'uuid': node_uuid, } self.fake_baremetal_node['provision_state'] = 'enroll' manageable_node = self.fake_baremetal_node.copy() manageable_node['provision_state'] = 'manageable' self.register_uris( [ dict( method='POST', uri=self.get_mock_url(resource='nodes'), json=self.fake_baremetal_node, validate=dict(json=node_to_post), ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'manage'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=manageable_node, ), dict( method='POST', uri=self.get_mock_url(resource='ports'), validate=dict( json={'address': mac_address, 'node_uuid': node_uuid} ), json=self.fake_baremetal_port, ), dict( method='PUT', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'states', 'provision', ], ), validate=dict(json={'target': 'provide'}), ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), dict( method='DELETE', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.register_machine, nics, wait=True, timeout=0.001, **node_to_post ) self.assert_calls() def test_register_machine_port_create_failed(self): mac_address = '00:01:02:03:04:05' nics = [{'address': mac_address}] node_uuid = self.fake_baremetal_node['uuid'] node_to_post = { 'chassis_uuid': None, 'driver': None, 'driver_info': None, 'name': self.fake_baremetal_node['name'], 'properties': None, 'uuid': node_uuid, } self.fake_baremetal_node['provision_state'] = 'available' self.register_uris( [ dict( method='POST', uri=self.get_mock_url(resource='nodes'), json=self.fake_baremetal_node, validate=dict(json=node_to_post), ), dict( method='POST', uri=self.get_mock_url(resource='ports'), status_code=400, json={'error': 'no ports for you'}, validate=dict( json={'address': mac_address, 'node_uuid': node_uuid} ), ), dict( method='DELETE', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), ), ] ) self.assertRaisesRegex( exceptions.SDKException, 'no ports for you', self.cloud.register_machine, nics, **node_to_post ) self.assert_calls() def test_register_machine_several_ports_create_failed(self): mac_address = '00:01:02:03:04:05' mac_address2 = mac_address[::-1] # Verify a couple of ways to provide MACs nics = [mac_address, {'mac': mac_address2}] node_uuid = self.fake_baremetal_node['uuid'] node_to_post = { 'chassis_uuid': None, 'driver': None, 'driver_info': None, 'name': self.fake_baremetal_node['name'], 'properties': None, 'uuid': node_uuid, } self.fake_baremetal_node['provision_state'] = 'available' self.register_uris( [ dict( method='POST', uri=self.get_mock_url(resource='nodes'), json=self.fake_baremetal_node, validate=dict(json=node_to_post), ), dict( method='POST', uri=self.get_mock_url(resource='ports'), validate=dict( json={'address': mac_address, 'node_uuid': node_uuid} ), json=self.fake_baremetal_port, ), dict( method='POST', uri=self.get_mock_url(resource='ports'), status_code=400, json={'error': 'no ports for you'}, validate=dict( json={'address': mac_address2, 'node_uuid': node_uuid} ), ), dict( method='DELETE', uri=self.get_mock_url( resource='ports', append=[self.fake_baremetal_port['uuid']], ), ), dict( method='DELETE', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), ), ] ) self.assertRaisesRegex( exceptions.SDKException, 'no ports for you', self.cloud.register_machine, nics, **node_to_post ) self.assert_calls() def test_unregister_machine(self): mac_address = self.fake_baremetal_port['address'] nics = [{'mac': mac_address}] port_uuid = self.fake_baremetal_port['uuid'] # NOTE(TheJulia): The two values below should be the same. port_node_uuid = self.fake_baremetal_port['node_uuid'] self.fake_baremetal_node['provision_state'] = 'available' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), dict( method='GET', uri=self.get_mock_url( resource='ports', qs_elements=['address=%s' % mac_address], ), json={ 'ports': [ { 'address': mac_address, 'node_uuid': port_node_uuid, 'uuid': port_uuid, } ] }, ), dict( method='DELETE', uri=self.get_mock_url( resource='ports', append=[self.fake_baremetal_port['uuid']], ), ), dict( method='DELETE', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), ), ] ) self.cloud.unregister_machine(nics, self.fake_baremetal_node['uuid']) self.assert_calls() def test_unregister_machine_locked_timeout(self): mac_address = self.fake_baremetal_port['address'] nics = [{'mac': mac_address}] self.fake_baremetal_node['provision_state'] = 'available' self.fake_baremetal_node['reservation'] = 'conductor99' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.unregister_machine, nics, self.fake_baremetal_node['uuid'], timeout=0.001, ) self.assert_calls() def test_unregister_machine_retries(self): mac_address = self.fake_baremetal_port['address'] nics = [{'mac': mac_address}] port_uuid = self.fake_baremetal_port['uuid'] # NOTE(TheJulia): The two values below should be the same. port_node_uuid = self.fake_baremetal_port['node_uuid'] self.fake_baremetal_node['provision_state'] = 'available' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), dict( method='GET', uri=self.get_mock_url( resource='ports', qs_elements=['address=%s' % mac_address], ), json={ 'ports': [ { 'address': mac_address, 'node_uuid': port_node_uuid, 'uuid': port_uuid, } ] }, ), dict( method='DELETE', status_code=503, uri=self.get_mock_url( resource='ports', append=[self.fake_baremetal_port['uuid']], ), ), dict( method='DELETE', status_code=409, uri=self.get_mock_url( resource='ports', append=[self.fake_baremetal_port['uuid']], ), ), dict( method='DELETE', uri=self.get_mock_url( resource='ports', append=[self.fake_baremetal_port['uuid']], ), ), dict( method='DELETE', status_code=409, uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), ), dict( method='DELETE', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), ), ] ) self.cloud.unregister_machine(nics, self.fake_baremetal_node['uuid']) self.assert_calls() def test_unregister_machine_unavailable(self): # This is a list of invalid states that the method # should fail on. invalid_states = ['active', 'cleaning', 'clean wait', 'clean failed'] mac_address = self.fake_baremetal_port['address'] nics = [{'mac': mac_address}] url_list = [] for state in invalid_states: self.fake_baremetal_node['provision_state'] = state url_list.append( dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ) ) self.register_uris(url_list) for state in invalid_states: self.assertRaises( exceptions.SDKException, self.cloud.unregister_machine, nics, self.fake_baremetal_node['uuid'], ) self.assert_calls() def test_update_machine_patch_no_action(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), ] ) # NOTE(TheJulia): This is just testing mechanics. update_dict = self.cloud.update_machine( self.fake_baremetal_node['uuid'] ) self.assertIsNone(update_dict['changes']) self.assertSubdict(self.fake_baremetal_node, update_dict['node']) self.assert_calls() def test_attach_port_to_machine(self): vif_id = '953ccbee-e854-450f-95fe-fe5e40d611ec' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), dict( method='GET', uri=self.get_mock_url( service_type='network', resource='ports', base_url_append='v2.0', append=[vif_id], ), json={'id': vif_id}, ), dict( method='POST', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid'], 'vifs'], ), ), ] ) self.cloud.attach_port_to_machine( self.fake_baremetal_node['uuid'], vif_id ) self.assert_calls() def test_detach_port_from_machine(self): vif_id = '953ccbee-e854-450f-95fe-fe5e40d611ec' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), dict( method='GET', uri=self.get_mock_url( service_type='network', resource='ports', base_url_append='v2.0', append=[vif_id], ), json={'id': vif_id}, ), dict( method='DELETE', uri=self.get_mock_url( resource='nodes', append=[ self.fake_baremetal_node['uuid'], 'vifs', vif_id, ], ), ), ] ) self.cloud.detach_port_from_machine( self.fake_baremetal_node['uuid'], vif_id ) self.assert_calls() def test_list_ports_attached_to_machine(self): vif_id = '953ccbee-e854-450f-95fe-fe5e40d611ec' fake_port = {'id': vif_id, 'name': 'test'} self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, ), dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid'], 'vifs'], ), json={'vifs': [{'id': vif_id}]}, ), dict( method='GET', uri=self.get_mock_url( service_type='network', resource='ports', base_url_append='v2.0', append=[vif_id], ), json=fake_port, ), ] ) res = self.cloud.list_ports_attached_to_machine( self.fake_baremetal_node['uuid'] ) self.assert_calls() self.assertEqual( [_port.Port(**fake_port).to_dict(computed=False)], [i.to_dict(computed=False) for i in res], ) class TestUpdateMachinePatch(base.IronicTestCase): # NOTE(TheJulia): As appears, and mordred describes, # this class utilizes black magic, which ultimately # results in additional test runs being executed with # the scenario name appended. Useful for lots of # variables that need to be tested. def setUp(self): super().setUp() self.fake_baremetal_node = fakes.make_fake_machine( self.name, self.uuid ) def test_update_machine_patch(self): # The model has evolved over time, create the field if # we don't already have it. if self.field_name not in self.fake_baremetal_node: self.fake_baremetal_node[self.field_name] = None value_to_send = self.fake_baremetal_node[self.field_name] if self.changed: value_to_send = self.new_value uris = [ dict( method='GET', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']] ), json=self.fake_baremetal_node, ), ] if self.changed: test_patch = [ { 'op': 'replace', 'path': '/' + self.field_name, 'value': value_to_send, } ] uris.append( dict( method='PATCH', uri=self.get_mock_url( resource='nodes', append=[self.fake_baremetal_node['uuid']], ), json=self.fake_baremetal_node, validate=dict(json=test_patch), ) ) self.register_uris(uris) call_args = {self.field_name: value_to_send} update_dict = self.cloud.update_machine( self.fake_baremetal_node['uuid'], **call_args ) if self.changed: self.assertEqual(['/' + self.field_name], update_dict['changes']) else: self.assertIsNone(update_dict['changes']) self.assertSubdict(self.fake_baremetal_node, update_dict['node']) self.assert_calls() scenarios = [ ('chassis_uuid', dict(field_name='chassis_uuid', changed=False)), ( 'chassis_uuid_changed', dict(field_name='chassis_uuid', changed=True, new_value='meow'), ), ('driver', dict(field_name='driver', changed=False)), ( 'driver_changed', dict(field_name='driver', changed=True, new_value='meow'), ), ('driver_info', dict(field_name='driver_info', changed=False)), ( 'driver_info_changed', dict( field_name='driver_info', changed=True, new_value={'cat': 'meow'}, ), ), ('instance_info', dict(field_name='instance_info', changed=False)), ( 'instance_info_changed', dict( field_name='instance_info', changed=True, new_value={'cat': 'meow'}, ), ), ('instance_uuid', dict(field_name='instance_uuid', changed=False)), ( 'instance_uuid_changed', dict(field_name='instance_uuid', changed=True, new_value='meow'), ), ('name', dict(field_name='name', changed=False)), ( 'name_changed', dict(field_name='name', changed=True, new_value='meow'), ), ('properties', dict(field_name='properties', changed=False)), ( 'properties_changed', dict( field_name='properties', changed=True, new_value={'cat': 'meow'}, ), ), ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_baremetal_ports.py0000664000175000017500000001245000000000000026243 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_baremetal_ports ---------------------------------- Tests for baremetal port related operations """ from testscenarios import load_tests_apply_scenarios as load_tests # noqa from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base class TestBaremetalPort(base.IronicTestCase): def setUp(self): super().setUp() self.fake_baremetal_node = fakes.make_fake_machine( self.name, self.uuid ) # TODO(TheJulia): Some tests below have fake ports, # since they are required in some processes. Lets refactor # them at some point to use self.fake_baremetal_port. self.fake_baremetal_port = fakes.make_fake_port( '00:01:02:03:04:05', node_id=self.uuid ) self.fake_baremetal_port2 = fakes.make_fake_port( '0a:0b:0c:0d:0e:0f', node_id=self.uuid ) def test_list_nics(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url(resource='ports', append=['detail']), json={ 'ports': [ self.fake_baremetal_port, self.fake_baremetal_port2, ] }, ), ] ) return_value = self.cloud.list_nics() self.assertEqual(2, len(return_value)) self.assertSubdict(self.fake_baremetal_port, return_value[0]) self.assert_calls() def test_list_nics_failure(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url(resource='ports', append=['detail']), status_code=400, ) ] ) self.assertRaises(exceptions.SDKException, self.cloud.list_nics) self.assert_calls() def test_list_nics_for_machine(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='ports', append=['detail'], qs_elements=[ 'node_uuid=%s' % self.fake_baremetal_node['uuid'] ], ), json={ 'ports': [ self.fake_baremetal_port, self.fake_baremetal_port2, ] }, ), ] ) return_value = self.cloud.list_nics_for_machine( self.fake_baremetal_node['uuid'] ) self.assertEqual(2, len(return_value)) self.assertSubdict(self.fake_baremetal_port, return_value[0]) self.assert_calls() def test_list_nics_for_machine_failure(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='ports', append=['detail'], qs_elements=[ 'node_uuid=%s' % self.fake_baremetal_node['uuid'] ], ), status_code=400, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.list_nics_for_machine, self.fake_baremetal_node['uuid'], ) self.assert_calls() def test_get_nic_by_mac(self): mac = self.fake_baremetal_port['address'] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='ports', append=['detail'], qs_elements=['address=%s' % mac], ), json={'ports': [self.fake_baremetal_port]}, ), ] ) return_value = self.cloud.get_nic_by_mac(mac) self.assertSubdict(self.fake_baremetal_port, return_value) self.assert_calls() def test_get_nic_by_mac_failure(self): mac = self.fake_baremetal_port['address'] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='ports', append=['detail'], qs_elements=['address=%s' % mac], ), json={'ports': []}, ), ] ) self.assertIsNone(self.cloud.get_nic_by_mac(mac)) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_cloud.py0000664000175000017500000001627700000000000024201 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid import testtools from openstack import connection from openstack import exceptions from openstack.tests.unit import base from openstack import utils RANGE_DATA = [ dict(id=1, key1=1, key2=5), dict(id=2, key1=1, key2=20), dict(id=3, key1=2, key2=10), dict(id=4, key1=2, key2=30), dict(id=5, key1=3, key2=40), dict(id=6, key1=3, key2=40), ] class TestCloud(base.TestCase): def test_openstack_cloud(self): self.assertIsInstance(self.cloud, connection.Connection) def test_endpoint_for(self): dns_override = 'https://override.dns.example.com' self.cloud.config.config['dns_endpoint_override'] = dns_override self.assertEqual( 'https://compute.example.com/v2.1/', self.cloud.endpoint_for('compute'), ) self.assertEqual( 'https://internal.compute.example.com/v2.1/', self.cloud.endpoint_for('compute', interface='internal'), ) self.assertIsNone( self.cloud.endpoint_for('compute', region_name='unknown-region') ) self.assertEqual(dns_override, self.cloud.endpoint_for('dns')) def test_connect_as(self): # Do initial auth/catalog steps # This should authenticate a second time, but should not # need a second identity discovery project_name = 'test_project' self.register_uris( [ self.get_keystone_v3_token(project_name=project_name), self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': []}, ), ] ) c2 = self.cloud.connect_as(project_name=project_name) self.assertEqual(c2.list_servers(), []) self.assert_calls() def test_connect_as_context(self): # Do initial auth/catalog steps # This should authenticate a second time, but should not # need a second identity discovery project_name = 'test_project' self.register_uris( [ self.get_keystone_v3_token(project_name=project_name), self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': []}, ), ] ) with self.cloud.connect_as(project_name=project_name) as c2: self.assertEqual(c2.list_servers(), []) self.assert_calls() def test_global_request_id(self): request_id = uuid.uuid4().hex self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': []}, validate=dict( headers={'X-Openstack-Request-Id': request_id} ), ), ] ) cloud2 = self.cloud.global_request(request_id) self.assertEqual([], cloud2.list_servers()) self.assert_calls() def test_global_request_id_context(self): request_id = uuid.uuid4().hex self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': []}, validate=dict( headers={'X-Openstack-Request-Id': request_id} ), ), ] ) with self.cloud.global_request(request_id) as c2: self.assertEqual([], c2.list_servers()) self.assert_calls() def test_iterate_timeout_bad_wait(self): with testtools.ExpectedException( exceptions.SDKException, "Wait value must be an int or float value.", ): for count in utils.iterate_timeout( 1, "test_iterate_timeout_bad_wait", wait="timeishard" ): pass @mock.patch('time.sleep') def test_iterate_timeout_str_wait(self, mock_sleep): iter = utils.iterate_timeout( 10, "test_iterate_timeout_str_wait", wait="1.6" ) next(iter) next(iter) mock_sleep.assert_called_with(1.6) @mock.patch('time.sleep') def test_iterate_timeout_int_wait(self, mock_sleep): iter = utils.iterate_timeout( 10, "test_iterate_timeout_int_wait", wait=1 ) next(iter) next(iter) mock_sleep.assert_called_with(1.0) @mock.patch('time.sleep') def test_iterate_timeout_timeout(self, mock_sleep): message = "timeout test" with testtools.ExpectedException(exceptions.ResourceTimeout, message): for count in utils.iterate_timeout(0.1, message, wait=1): pass mock_sleep.assert_called_with(1.0) def test_range_search(self): filters = {"key1": "min", "key2": "20"} retval = self.cloud.range_search(RANGE_DATA, filters) self.assertIsInstance(retval, list) self.assertEqual(1, len(retval)) self.assertEqual([RANGE_DATA[1]], retval) def test_range_search_2(self): filters = {"key1": "<=2", "key2": ">10"} retval = self.cloud.range_search(RANGE_DATA, filters) self.assertIsInstance(retval, list) self.assertEqual(2, len(retval)) self.assertEqual([RANGE_DATA[1], RANGE_DATA[3]], retval) def test_range_search_3(self): filters = {"key1": "2", "key2": "min"} retval = self.cloud.range_search(RANGE_DATA, filters) self.assertIsInstance(retval, list) self.assertEqual(0, len(retval)) def test_range_search_4(self): filters = {"key1": "max", "key2": "min"} retval = self.cloud.range_search(RANGE_DATA, filters) self.assertIsInstance(retval, list) self.assertEqual(0, len(retval)) def test_range_search_5(self): filters = {"key1": "min", "key2": "min"} retval = self.cloud.range_search(RANGE_DATA, filters) self.assertIsInstance(retval, list) self.assertEqual(1, len(retval)) self.assertEqual([RANGE_DATA[0]], retval) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_cluster_templates.py0000664000175000017500000002222200000000000026615 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from openstack.container_infrastructure_management.v1 import cluster_template from openstack import exceptions from openstack.tests.unit import base cluster_template_obj = dict( apiserver_port=12345, cluster_distro='fake-distro', coe='fake-coe', created_at='fake-date', dns_nameserver='8.8.8.8', docker_volume_size=1, external_network_id='public', fixed_network=None, flavor_id='fake-flavor', https_proxy=None, human_id=None, image_id='fake-image', insecure_registry='https://192.168.0.10', keypair_id='fake-key', labels={}, links={}, master_flavor_id=None, name='fake-cluster-template', network_driver='fake-driver', no_proxy=None, public=False, registry_enabled=False, server_type='vm', tls_disabled=False, updated_at=None, uuid='fake-uuid', volume_driver=None, ) class TestClusterTemplates(base.TestCase): def _compare_clustertemplates(self, exp, real): self.assertDictEqual( cluster_template.ClusterTemplate(**exp).to_dict(computed=False), real.to_dict(computed=False), ) def get_mock_url( self, service_type='container-infrastructure-management', base_url_append=None, append=None, resource=None, ): return super().get_mock_url( service_type=service_type, resource=resource, append=append, base_url_append=base_url_append, ) def test_list_cluster_templates_without_detail(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url(resource='clustertemplates'), json=dict(clustertemplates=[cluster_template_obj]), ) ] ) cluster_templates_list = self.cloud.list_cluster_templates() self._compare_clustertemplates( cluster_template_obj, cluster_templates_list[0], ) self.assert_calls() def test_list_cluster_templates_with_detail(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url(resource='clustertemplates'), json=dict(clustertemplates=[cluster_template_obj]), ) ] ) cluster_templates_list = self.cloud.list_cluster_templates(detail=True) self._compare_clustertemplates( cluster_template_obj, cluster_templates_list[0], ) self.assert_calls() def test_search_cluster_templates_by_name(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url(resource='clustertemplates'), json=dict(clustertemplates=[cluster_template_obj]), ) ] ) cluster_templates = self.cloud.search_cluster_templates( name_or_id='fake-cluster-template' ) self.assertEqual(1, len(cluster_templates)) self.assertEqual('fake-uuid', cluster_templates[0]['uuid']) self.assert_calls() def test_search_cluster_templates_not_found(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url(resource='clustertemplates'), json=dict(clustertemplates=[cluster_template_obj]), ) ] ) cluster_templates = self.cloud.search_cluster_templates( name_or_id='non-existent' ) self.assertEqual(0, len(cluster_templates)) self.assert_calls() def test_get_cluster_template(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url(resource='clustertemplates'), json=dict(clustertemplates=[cluster_template_obj]), ) ] ) r = self.cloud.get_cluster_template('fake-cluster-template') self.assertIsNotNone(r) self._compare_clustertemplates( cluster_template_obj, r, ) self.assert_calls() def test_get_cluster_template_not_found(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url(resource='clustertemplates'), json=dict(clustertemplates=[]), ) ] ) r = self.cloud.get_cluster_template('doesNotExist') self.assertIsNone(r) self.assert_calls() def test_create_cluster_template(self): json_response = cluster_template_obj.copy() kwargs = dict( name=cluster_template_obj['name'], image_id=cluster_template_obj['image_id'], keypair_id=cluster_template_obj['keypair_id'], coe=cluster_template_obj['coe'], ) self.register_uris( [ dict( method='POST', uri=self.get_mock_url(resource='clustertemplates'), json=json_response, validate=dict(json=kwargs), ) ] ) response = self.cloud.create_cluster_template(**kwargs) self._compare_clustertemplates(json_response, response) self.assert_calls() def test_create_cluster_template_exception(self): self.register_uris( [ dict( method='POST', uri=self.get_mock_url(resource='clustertemplates'), status_code=403, ) ] ) # TODO(mordred) requests here doens't give us a great story # for matching the old error message text. Investigate plumbing # an error message in to the adapter call so that we can give a # more informative error. Also, the test was originally catching # SDKException - but for some reason testtools will not # match the more specific HTTPError, even though it's a subclass # of SDKException. with testtools.ExpectedException(exceptions.ForbiddenException): self.cloud.create_cluster_template('fake-cluster-template') self.assert_calls() def test_delete_cluster_template(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url(resource='clustertemplates'), json=dict(clustertemplates=[cluster_template_obj]), ), dict( method='DELETE', uri=self.get_mock_url( resource='clustertemplates/fake-uuid' ), ), ] ) self.cloud.delete_cluster_template('fake-uuid') self.assert_calls() def test_update_cluster_template(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url(resource='clustertemplates'), json=dict(clustertemplates=[cluster_template_obj]), ), dict( method='PATCH', uri=self.get_mock_url( resource='clustertemplates/fake-uuid' ), status_code=200, validate=dict( json=[ { 'op': 'replace', 'path': '/name', 'value': 'new-cluster-template', } ] ), ), ] ) new_name = 'new-cluster-template' updated = self.cloud.update_cluster_template( 'fake-uuid', name=new_name ) self.assertEqual(new_name, updated.name) self.assert_calls() def test_coe_get_cluster_template(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url(resource='clustertemplates'), json=dict(clustertemplates=[cluster_template_obj]), ) ] ) r = self.cloud.get_cluster_template('fake-cluster-template') self.assertIsNotNone(r) self._compare_clustertemplates( cluster_template_obj, r, ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_clustering.py0000664000175000017500000006344600000000000025252 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from openstack.clustering.v1 import cluster from openstack.tests.unit import base CLUSTERING_DICT = { 'name': 'fake-name', 'profile_id': '1', 'desired_capacity': 1, 'config': {'a': 'b'}, 'max_size': 1, 'min_size': 1, 'timeout': 100, 'metadata': {}, } PROFILE_DICT = {'name': 'fake-profile-name', 'spec': {}, 'metadata': {}} POLICY_DICT = { 'name': 'fake-profile-name', 'spec': {}, } RECEIVER_DICT = { 'action': 'FAKE_CLUSTER_ACTION', 'cluster_id': 'fake-cluster-id', 'name': 'fake-receiver-name', 'params': {}, 'type': 'webhook', } NEW_CLUSTERING_DICT = copy.copy(CLUSTERING_DICT) NEW_CLUSTERING_DICT['id'] = '1' NEW_PROFILE_DICT = copy.copy(PROFILE_DICT) NEW_PROFILE_DICT['id'] = '1' NEW_POLICY_DICT = copy.copy(POLICY_DICT) NEW_POLICY_DICT['id'] = '1' NEW_RECEIVER_DICT = copy.copy(RECEIVER_DICT) NEW_RECEIVER_DICT['id'] = '1' class TestClustering(base.TestCase): def assertAreInstances(self, elements, elem_type): for e in elements: self.assertIsInstance(e, elem_type) def _compare_clusters(self, exp, real): self.assertEqual( cluster.Cluster(**exp).to_dict(computed=False), real.to_dict(computed=False), ) def setUp(self): super().setUp() self.use_senlin() # def test_create_cluster(self): # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'profiles', '1']), # json={ # "profiles": [NEW_PROFILE_DICT]}), # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'profiles']), # json={ # "profiles": [NEW_PROFILE_DICT]}), # dict(method='POST', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters']), # json=NEW_CLUSTERING_DICT) # ]) # profile = self.cloud.get_cluster_profile_by_id(NEW_PROFILE_DICT['id']) # c = self.cloud.create_cluster( # name=CLUSTERING_DICT['name'], # desired_capacity=CLUSTERING_DICT['desired_capacity'], # profile=profile, # config=CLUSTERING_DICT['config'], # max_size=CLUSTERING_DICT['max_size'], # min_size=CLUSTERING_DICT['min_size'], # metadata=CLUSTERING_DICT['metadata'], # timeout=CLUSTERING_DICT['timeout']) # # self._compare_clusters(NEW_CLUSTERING_DICT, c) # self.assert_calls() # # def test_create_cluster_exception(self): # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'profiles', '1']), # json={ # "profiles": [NEW_PROFILE_DICT]}), # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'profiles']), # json={ # "profiles": [NEW_PROFILE_DICT]}), # dict(method='POST', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters']), # status_code=500) # ]) # profile = self.cloud.get_cluster_profile_by_id(NEW_PROFILE_DICT['id']) # with testtools.ExpectedException( # exc.OpenStackCloudHTTPError): # self.cloud.create_cluster(name='fake-name', profile=profile) # self.assert_calls() # # def test_get_cluster_by_id(self): # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters', '1']), # json={ # "cluster": NEW_CLUSTERING_DICT}) # ]) # cluster = self.cloud.get_cluster_by_id('1') # self.assertEqual(cluster['id'], '1') # self.assert_calls() # # def test_get_cluster_not_found_returns_false(self): # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters', # 'no-cluster']), # status_code=404) # ]) # c = self.cloud.get_cluster_by_id('no-cluster') # self.assertFalse(c) # self.assert_calls() # # def test_update_cluster(self): # new_max_size = 5 # updated_cluster = copy.copy(NEW_CLUSTERING_DICT) # updated_cluster['max_size'] = new_max_size # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters', '1']), # json={ # "cluster": NEW_CLUSTERING_DICT}), # dict(method='PATCH', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters', '1']), # json=updated_cluster, # ) # ]) # cluster = self.cloud.get_cluster_by_id('1') # c = self.cloud.update_cluster(cluster, new_max_size) # self.assertEqual(updated_cluster, c) # self.assert_calls() # # def test_delete_cluster(self): # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters']), # json={ # "clusters": [NEW_CLUSTERING_DICT]}), # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters', '1', # 'policies']), # json={"cluster_policies": []}), # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'receivers']), # json={"receivers": []}), # dict(method='DELETE', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters', '1']), # json=NEW_CLUSTERING_DICT) # ]) # self.assertTrue(self.cloud.delete_cluster('1')) # self.assert_calls() # # def test_list_clusters(self): # clusters = {'clusters': [NEW_CLUSTERING_DICT]} # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters']), # json=clusters) # ]) # c = self.cloud.list_clusters() # # self.assertIsInstance(c, list) # self.assertAreInstances(c, dict) # # self.assert_calls() # # def test_attach_policy_to_cluster(self): # policy = { # 'policy_id': '1', # 'enabled': 'true' # } # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters', '1']), # json={ # "cluster": NEW_CLUSTERING_DICT}), # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'policies', '1']), # json={ # "policy": NEW_POLICY_DICT}), # dict(method='POST', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters', '1', # 'actions']), # json={'policy_attach': policy}) # ]) # cluster = self.cloud.get_cluster_by_id('1') # policy = self.cloud.get_cluster_policy_by_id('1') # p = self.cloud.attach_policy_to_cluster(cluster, policy, 'true') # self.assertTrue(p) # self.assert_calls() # # def test_detach_policy_from_cluster(self): # updated_cluster = copy.copy(NEW_CLUSTERING_DICT) # updated_cluster['policies'] = ['1'] # detached_cluster = copy.copy(NEW_CLUSTERING_DICT) # detached_cluster['policies'] = [] # # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters', '1']), # json={ # "cluster": NEW_CLUSTERING_DICT}), # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'policies', '1']), # json={ # "policy": NEW_POLICY_DICT}), # dict(method='POST', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters', '1', # 'actions']), # json={'policy_detach': {'policy_id': '1'}}), # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters', '1']), # json={ # "cluster": updated_cluster}), # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters', '1']), # json={ # "cluster": detached_cluster}), # ]) # cluster = self.cloud.get_cluster_by_id('1') # policy = self.cloud.get_cluster_policy_by_id('1') # p = self.cloud.detach_policy_from_cluster(cluster, policy, wait=True) # self.assertTrue(p) # self.assert_calls() # # def test_get_policy_on_cluster_by_id(self): # cluster_policy = { # "cluster_id": "1", # "cluster_name": "cluster1", # "enabled": True, # "id": "1", # "policy_id": "1", # "policy_name": "policy1", # "policy_type": "senlin.policy.deletion-1.0" # } # # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters', '1', # 'policies', '1']), # json={ # "cluster_policy": cluster_policy}) # ]) # policy = self.cloud.get_policy_on_cluster('1', '1') # self.assertEqual(policy['cluster_id'], '1') # self.assert_calls() # # def test_get_policy_on_cluster_not_found_returns_false(self): # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters', '1', # 'policies', # 'no-policy']), # status_code=404) # ]) # p = self.cloud.get_policy_on_cluster('1', 'no-policy') # self.assertFalse(p) # self.assert_calls() # # def test_update_policy_on_cluster(self): # policy = { # 'policy_id': '1', # 'enabled': 'true' # } # updated_cluster = copy.copy(NEW_CLUSTERING_DICT) # updated_cluster['policies'] = policy # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters', '1']), # json={ # "cluster": NEW_CLUSTERING_DICT}), # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'policies', # '1']), # json={ # "policy": NEW_POLICY_DICT}), # dict(method='POST', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters', '1', # 'actions']), # json={'policies': []}) # ]) # cluster = self.cloud.get_cluster_by_id('1') # policy = self.cloud.get_cluster_policy_by_id('1') # p = self.cloud.update_policy_on_cluster(cluster, policy, True) # self.assertTrue(p) # self.assert_calls() # # def test_get_policy_on_cluster(self): # cluster_policy = { # 'cluster_id': '1', # 'cluster_name': 'cluster1', # 'enabled': 'true', # 'id': '1', # 'policy_id': '1', # 'policy_name': 'policy1', # 'policy_type': 'type' # } # # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters', '1', # 'policies', '1']), # json={ # "cluster_policy": cluster_policy}) # ]) # get_policy = self.cloud.get_policy_on_cluster('1', '1') # self.assertEqual(get_policy, cluster_policy) # self.assert_calls() # # def test_create_cluster_profile(self): # self.register_uris([ # dict(method='POST', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'profiles']), # json={'profile': NEW_PROFILE_DICT}) # ]) # p = self.cloud.create_cluster_profile('fake-profile-name', {}) # # self.assertEqual(NEW_PROFILE_DICT, p) # self.assert_calls() # # def test_create_cluster_profile_exception(self): # self.register_uris([ # dict(method='POST', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'profiles']), # status_code=500) # ]) # with testtools.ExpectedException( # exc.OpenStackCloudHTTPError, # "Error creating profile fake-profile-name.*"): # self.cloud.create_cluster_profile('fake-profile-name', {}) # self.assert_calls() # # def test_list_cluster_profiles(self): # profiles = {'profiles': [NEW_PROFILE_DICT]} # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'profiles']), # json=profiles) # ]) # p = self.cloud.list_cluster_profiles() # # self.assertIsInstance(p, list) # self.assertAreInstances(p, dict) # # self.assert_calls() # # def test_get_cluster_profile_by_id(self): # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'profiles', '1']), # json={ # "profile": NEW_PROFILE_DICT}) # ]) # p = self.cloud.get_cluster_profile_by_id('1') # self.assertEqual(p['id'], '1') # self.assert_calls() # # def test_get_cluster_profile_not_found_returns_false(self): # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'profiles', # 'no-profile']), # status_code=404) # ]) # p = self.cloud.get_cluster_profile_by_id('no-profile') # self.assertFalse(p) # self.assert_calls() # # def test_update_cluster_profile(self): # new_name = "new-name" # updated_profile = copy.copy(NEW_PROFILE_DICT) # updated_profile['name'] = new_name # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'profiles']), # json={ # "profiles": [NEW_PROFILE_DICT]}), # dict(method='PATCH', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'profiles', '1']), # json=updated_profile, # ) # ]) # p = self.cloud.update_cluster_profile('1', new_name=new_name) # self.assertEqual(updated_profile, p) # self.assert_calls() # # def test_delete_cluster_profile(self): # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'profiles', '1']), # json={ # "profile": NEW_PROFILE_DICT}), # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters']), # json={'clusters': [{'cluster': CLUSTERING_DICT}]}), # dict(method='DELETE', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'profiles', '1']), # json=NEW_PROFILE_DICT) # ]) # profile = self.cloud.get_cluster_profile_by_id('1') # self.assertTrue(self.cloud.delete_cluster_profile(profile)) # self.assert_calls() # # def test_create_cluster_policy(self): # self.register_uris([ # dict(method='POST', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'policies']), # json={'policy': NEW_POLICY_DICT}) # ]) # p = self.cloud.create_cluster_policy('fake-policy-name', {}) # # self.assertEqual(NEW_POLICY_DICT, p) # self.assert_calls() # # def test_create_cluster_policy_exception(self): # self.register_uris([ # dict(method='POST', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'policies']), # status_code=500) # ]) # with testtools.ExpectedException( # exc.OpenStackCloudHTTPError, # "Error creating policy fake-policy-name.*"): # self.cloud.create_cluster_policy('fake-policy-name', {}) # self.assert_calls() # # def test_list_cluster_policies(self): # policies = {'policies': [NEW_POLICY_DICT]} # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'policies']), # json=policies) # ]) # p = self.cloud.list_cluster_policies() # # self.assertIsInstance(p, list) # self.assertAreInstances(p, dict) # # self.assert_calls() # # def test_get_cluster_policy_by_id(self): # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'policies', '1']), # json={ # "policy": NEW_POLICY_DICT}) # ]) # p = self.cloud.get_cluster_policy_by_id('1') # self.assertEqual(p['id'], '1') # self.assert_calls() # # def test_get_cluster_policy_not_found_returns_false(self): # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'policies', # 'no-policy']), # status_code=404) # ]) # p = self.cloud.get_cluster_policy_by_id('no-policy') # self.assertFalse(p) # self.assert_calls() # # def test_update_cluster_policy(self): # new_name = "new-name" # updated_policy = copy.copy(NEW_POLICY_DICT) # updated_policy['name'] = new_name # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'policies']), # json={ # "policies": [NEW_POLICY_DICT]}), # dict(method='PATCH', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'policies', '1']), # json=updated_policy, # ) # ]) # p = self.cloud.update_cluster_policy('1', new_name=new_name) # self.assertEqual(updated_policy, p) # self.assert_calls() # # def test_delete_cluster_policy(self): # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'policies', '1']), # json={ # "policy": NEW_POLICY_DICT}), # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters']), # json={}), # dict(method='DELETE', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'policies', '1']), # json=NEW_POLICY_DICT) # ]) # self.assertTrue(self.cloud.delete_cluster_policy('1')) # self.assert_calls() # # def test_create_cluster_receiver(self): # clusters = {'clusters': [NEW_CLUSTERING_DICT]} # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters']), # json=clusters), # dict(method='POST', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'receivers']), # json={'receiver': NEW_RECEIVER_DICT}) # ]) # r = self.cloud.create_cluster_receiver('fake-receiver-name', {}) # # self.assertEqual(NEW_RECEIVER_DICT, r) # self.assert_calls() # # def test_create_cluster_receiver_exception(self): # clusters = {'clusters': [NEW_CLUSTERING_DICT]} # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'clusters']), # json=clusters), # dict(method='POST', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'receivers']), # status_code=500), # ]) # with testtools.ExpectedException( # exc.OpenStackCloudHTTPError, # "Error creating receiver fake-receiver-name.*"): # self.cloud.create_cluster_receiver('fake-receiver-name', {}) # self.assert_calls() # # def test_list_cluster_receivers(self): # receivers = {'receivers': [NEW_RECEIVER_DICT]} # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'receivers']), # json=receivers) # ]) # r = self.cloud.list_cluster_receivers() # # self.assertIsInstance(r, list) # self.assertAreInstances(r, dict) # # self.assert_calls() # # def test_get_cluster_receiver_by_id(self): # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'receivers', '1']), # json={ # "receiver": NEW_RECEIVER_DICT}) # ]) # r = self.cloud.get_cluster_receiver_by_id('1') # self.assertEqual(r['id'], '1') # self.assert_calls() # # def test_get_cluster_receiver_not_found_returns_false(self): # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'receivers', # 'no-receiver']), # json={'receivers': []}) # ]) # p = self.cloud.get_cluster_receiver_by_id('no-receiver') # self.assertFalse(p) # self.assert_calls() # # def test_update_cluster_receiver(self): # new_name = "new-name" # updated_receiver = copy.copy(NEW_RECEIVER_DICT) # updated_receiver['name'] = new_name # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'receivers']), # json={ # "receivers": [NEW_RECEIVER_DICT]}), # dict(method='PATCH', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'receivers', '1']), # json=updated_receiver, # ) # ]) # r = self.cloud.update_cluster_receiver('1', new_name=new_name) # self.assertEqual(updated_receiver, r) # self.assert_calls() # # def test_delete_cluster_receiver(self): # self.register_uris([ # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'receivers']), # json={ # "receivers": [NEW_RECEIVER_DICT]}), # dict(method='DELETE', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'receivers', '1']), # json=NEW_RECEIVER_DICT), # dict(method='GET', # uri=self.get_mock_url( # 'clustering', 'public', append=['v1', 'receivers', '1']), # json={}), # ]) # self.assertTrue(self.cloud.delete_cluster_receiver('1', wait=True)) # self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_coe_clusters.py0000664000175000017500000001532400000000000025555 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.container_infrastructure_management.v1 import cluster from openstack.tests.unit import base coe_cluster_obj = dict( status="CREATE_IN_PROGRESS", cluster_template_id="0562d357-8641-4759-8fed-8173f02c9633", uuid="731387cf-a92b-4c36-981e-3271d63e5597", links=[{}, {}], stack_id="31c1ee6c-081e-4f39-9f0f-f1d87a7defa1", keypair="my_keypair", master_count=3, create_timeout=60, node_count=10, name="k8s", created_at="2016-08-29T06:51:31+00:00", api_address="https://172.24.4.6:6443", discovery_url="https://discovery.etcd.io/cbeb580da58915809d59ee69348a84f3", updated_at="2016-08-29T06:53:24+00:00", coe_version="v1.2.0", master_addresses=["172.24.4.6"], node_addresses=["172.24.4.13"], status_reason="Stack CREATE completed successfully", ) class TestCOEClusters(base.TestCase): def _compare_clusters(self, exp, real): self.assertDictEqual( cluster.Cluster(**exp).to_dict(computed=False), real.to_dict(computed=False), ) def get_mock_url( self, service_type="container-infrastructure-management", base_url_append=None, append=None, resource=None, ): return super().get_mock_url( service_type=service_type, resource=resource, append=append, base_url_append=base_url_append, ) def test_list_coe_clusters(self): self.register_uris( [ dict( method="GET", uri=self.get_mock_url(resource="clusters"), json=dict(clusters=[coe_cluster_obj]), ) ] ) cluster_list = self.cloud.list_coe_clusters() self._compare_clusters( coe_cluster_obj, cluster_list[0], ) self.assert_calls() def test_create_coe_cluster(self): json_response = dict(uuid=coe_cluster_obj.get("uuid")) kwargs = dict( name=coe_cluster_obj["name"], cluster_template_id=coe_cluster_obj["cluster_template_id"], master_count=coe_cluster_obj["master_count"], node_count=coe_cluster_obj["node_count"], ) self.register_uris( [ dict( method="POST", uri=self.get_mock_url(resource="clusters"), json=json_response, validate=dict(json=kwargs), ), ] ) response = self.cloud.create_coe_cluster(**kwargs) expected = kwargs.copy() expected.update(**json_response) self._compare_clusters(expected, response) self.assert_calls() def test_search_coe_cluster_by_name(self): self.register_uris( [ dict( method="GET", uri=self.get_mock_url(resource="clusters"), json=dict(clusters=[coe_cluster_obj]), ) ] ) coe_clusters = self.cloud.search_coe_clusters(name_or_id="k8s") self.assertEqual(1, len(coe_clusters)) self.assertEqual(coe_cluster_obj["uuid"], coe_clusters[0]["id"]) self.assert_calls() def test_search_coe_cluster_not_found(self): self.register_uris( [ dict( method="GET", uri=self.get_mock_url(resource="clusters"), json=dict(clusters=[coe_cluster_obj]), ) ] ) coe_clusters = self.cloud.search_coe_clusters( name_or_id="non-existent" ) self.assertEqual(0, len(coe_clusters)) self.assert_calls() def test_get_coe_cluster(self): self.register_uris( [ dict( method="GET", uri=self.get_mock_url(resource="clusters"), json=dict(clusters=[coe_cluster_obj]), ) ] ) r = self.cloud.get_coe_cluster(coe_cluster_obj["name"]) self.assertIsNotNone(r) self._compare_clusters( coe_cluster_obj, r, ) self.assert_calls() def test_get_coe_cluster_not_found(self): self.register_uris( [ dict( method="GET", uri=self.get_mock_url(resource="clusters"), json=dict(clusters=[]), ) ] ) r = self.cloud.get_coe_cluster("doesNotExist") self.assertIsNone(r) self.assert_calls() def test_delete_coe_cluster(self): self.register_uris( [ dict( method="GET", uri=self.get_mock_url(resource="clusters"), json=dict(clusters=[coe_cluster_obj]), ), dict( method="DELETE", uri=self.get_mock_url( resource="clusters", append=[coe_cluster_obj['uuid']] ), ), ] ) self.cloud.delete_coe_cluster(coe_cluster_obj["uuid"]) self.assert_calls() def test_update_coe_cluster(self): self.register_uris( [ dict( method="GET", uri=self.get_mock_url(resource="clusters"), json=dict(clusters=[coe_cluster_obj]), ), dict( method="PATCH", uri=self.get_mock_url( resource="clusters", append=[coe_cluster_obj["uuid"]] ), status_code=200, validate=dict( json=[ { "op": "replace", "path": "/node_count", "value": 3, } ] ), ), ] ) self.cloud.update_coe_cluster(coe_cluster_obj["uuid"], node_count=3) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_coe_clusters_certificate.py0000664000175000017500000000625700000000000030124 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.container_infrastructure_management.v1 import ( cluster_certificate, ) from openstack.tests.unit import base coe_cluster_ca_obj = dict( cluster_uuid="43e305ce-3a5f-412a-8a14-087834c34c8c", pem="-----BEGIN CERTIFICATE-----\nMIIDAO\n-----END CERTIFICATE-----\n", bay_uuid="43e305ce-3a5f-412a-8a14-087834c34c8c", links=[], ) coe_cluster_signed_cert_obj = dict( cluster_uuid='43e305ce-3a5f-412a-8a14-087834c34c8c', pem='-----BEGIN CERTIFICATE-----\nMIIDAO\n-----END CERTIFICATE-----', bay_uuid='43e305ce-3a5f-412a-8a14-087834c34c8c', links=[], csr=( '-----BEGIN CERTIFICATE REQUEST-----\nMIICfz==' '\n-----END CERTIFICATE REQUEST-----\n' ), ) class TestCOEClusters(base.TestCase): def _compare_cluster_certs(self, exp, real): self.assertDictEqual( cluster_certificate.ClusterCertificate(**exp).to_dict( computed=False ), real.to_dict(computed=False), ) def get_mock_url( self, service_type='container-infrastructure-management', base_url_append=None, append=None, resource=None, ): return super().get_mock_url( service_type=service_type, resource=resource, append=append, base_url_append=base_url_append, ) def test_get_coe_cluster_certificate(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='certificates', append=[coe_cluster_ca_obj['cluster_uuid']], ), json=coe_cluster_ca_obj, ) ] ) ca_cert = self.cloud.get_coe_cluster_certificate( coe_cluster_ca_obj['cluster_uuid'] ) self._compare_cluster_certs(coe_cluster_ca_obj, ca_cert) self.assert_calls() def test_sign_coe_cluster_certificate(self): self.register_uris( [ dict( method='POST', uri=self.get_mock_url(resource='certificates'), json={ "cluster_uuid": coe_cluster_signed_cert_obj[ 'cluster_uuid' ], "csr": coe_cluster_signed_cert_obj['csr'], }, ) ] ) self.cloud.sign_coe_cluster_certificate( coe_cluster_signed_cert_obj['cluster_uuid'], coe_cluster_signed_cert_obj['csr'], ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_compute.py0000664000175000017500000003265000000000000024540 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base class TestServers(base.TestCase): def test_get_server(self): server1 = fakes.make_fake_server('123', 'mickey') server2 = fakes.make_fake_server('345', 'mouse') self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': [server1, server2]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={"networks": []}, ), ] ) r = self.cloud.get_server('mickey') self.assertIsNotNone(r) self.assertEqual(server1['name'], r['name']) self.assert_calls() def test_get_server_not_found(self): self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': []}, ), ] ) r = self.cloud.get_server('doesNotExist') self.assertIsNone(r) self.assert_calls() def test_list_servers(self): server_id = str(uuid.uuid4()) server_name = self.getUniqueString('name') fake_server = fakes.make_fake_server(server_id, server_name) self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': [fake_server]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={"networks": []}, ), ] ) r = self.cloud.list_servers() self.assertEqual(1, len(r)) self.assertEqual(server_name, r[0]['name']) self.assert_calls() def test_list_server_private_ip(self): self.has_neutron = True fake_server = { "OS-EXT-STS:task_state": None, "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:b4:a3:07", "version": 4, "addr": "10.4.0.13", "OS-EXT-IPS:type": "fixed", }, { "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:b4:a3:07", "version": 4, "addr": "89.40.216.229", "OS-EXT-IPS:type": "floating", }, ] }, "links": [ {"href": "http://example.com/images/95e4c4", "rel": "self"}, { "href": "http://example.com/images/95e4c4", "rel": "bookmark", }, ], "image": { "id": "95e4c449-8abf-486e-97d9-dc3f82417d2d", "links": [ { "href": "http://example.com/images/95e4c4", "rel": "bookmark", } ], }, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2018-03-01T02:44:50.000000", "flavor": { "id": "3bd99062-2fe8-4eac-93f0-9200cc0f97ae", "links": [ { "href": "http://example.com/flavors/95e4c4", "rel": "bookmark", } ], }, "id": "97fe35e9-756a-41a2-960a-1d057d2c9ee4", "security_groups": [{"name": "default"}], "user_id": "c17534835f8f42bf98fc367e0bf35e09", "OS-DCF:diskConfig": "MANUAL", "accessIPv4": "", "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1, "OS-EXT-AZ:availability_zone": "nova", "metadata": {}, "status": "ACTIVE", "updated": "2018-03-01T02:44:51Z", "hostId": "", "OS-SRV-USG:terminated_at": None, "key_name": None, "name": "mttest", "created": "2018-03-01T02:44:46Z", "tenant_id": "65222a4d09ea4c68934fa1028c77f394", "os-extended-volumes:volumes_attached": [], "config_drive": "", } fake_networks = { "networks": [ { "status": "ACTIVE", "router:external": True, "availability_zone_hints": [], "availability_zones": ["nova"], "description": None, "subnets": [ "df3e17fa-a4b2-47ae-9015-bc93eb076ba2", "6b0c3dc9-b0b8-4d87-976a-7f2ebf13e7ec", "fc541f48-fc7f-48c0-a063-18de6ee7bdd7", ], "shared": False, "tenant_id": "a564613210ee43708b8a7fc6274ebd63", "tags": [], "ipv6_address_scope": "9f03124f-89af-483a-b6fd-10f08079db4d", # noqa: E501 "mtu": 1550, "is_default": False, "admin_state_up": True, "revision_number": 0, "ipv4_address_scope": None, "port_security_enabled": True, "project_id": "a564613210ee43708b8a7fc6274ebd63", "id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", "name": "ext-net", }, { "status": "ACTIVE", "router:external": False, "availability_zone_hints": [], "availability_zones": ["nova"], "description": "", "subnets": ["f0ad1df5-53ee-473f-b86b-3604ea5591e9"], "shared": False, "tenant_id": "65222a4d09ea4c68934fa1028c77f394", "created_at": "2016-10-22T13:46:26Z", "tags": [], "ipv6_address_scope": None, "updated_at": "2016-10-22T13:46:26Z", "admin_state_up": True, "mtu": 1500, "revision_number": 0, "ipv4_address_scope": None, "port_security_enabled": True, "project_id": "65222a4d09ea4c68934fa1028c77f394", "id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f", "name": "private", }, ] } fake_subnets = { "subnets": [ { "service_types": [], "description": "", "enable_dhcp": True, "tags": [], "network_id": "827c6bb6-492f-4168-9577-f3a131eb29e8", "tenant_id": "65222a4d09ea4c68934fa1028c77f394", "created_at": "2017-06-12T13:23:57Z", "dns_nameservers": [], "updated_at": "2017-06-12T13:23:57Z", "gateway_ip": "10.24.4.1", "ipv6_ra_mode": None, "allocation_pools": [ {"start": "10.24.4.2", "end": "10.24.4.254"} ], "host_routes": [], "revision_number": 0, "ip_version": 4, "ipv6_address_mode": None, "cidr": "10.24.4.0/24", "project_id": "65222a4d09ea4c68934fa1028c77f394", "id": "3f0642d9-4644-4dff-af25-bcf64f739698", "subnetpool_id": None, "name": "foo_subnet", }, { "service_types": [], "description": "", "enable_dhcp": True, "tags": [], "network_id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f", "tenant_id": "65222a4d09ea4c68934fa1028c77f394", "created_at": "2016-10-22T13:46:26Z", "dns_nameservers": ["89.36.90.101", "89.36.90.102"], "updated_at": "2016-10-22T13:46:26Z", "gateway_ip": "10.4.0.1", "ipv6_ra_mode": None, "allocation_pools": [ {"start": "10.4.0.2", "end": "10.4.0.200"} ], "host_routes": [], "revision_number": 0, "ip_version": 4, "ipv6_address_mode": None, "cidr": "10.4.0.0/24", "project_id": "65222a4d09ea4c68934fa1028c77f394", "id": "f0ad1df5-53ee-473f-b86b-3604ea5591e9", "subnetpool_id": None, "name": "private-subnet-ipv4", }, ] } self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': [fake_server]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json=fake_networks, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'] ), json=fake_subnets, ), ] ) r = self.cloud.get_server('97fe35e9-756a-41a2-960a-1d057d2c9ee4') self.assertEqual('10.4.0.13', r['private_v4']) self.assert_calls() def test_list_servers_all_projects(self): """This test verifies that when list_servers is called with `all_projects=True` that it passes `all_tenants=True` to nova.""" self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'], qs_elements=['all_tenants=True'], ), complete_qs=True, json={'servers': []}, ), ] ) self.cloud.list_servers(all_projects=True) self.assert_calls() def test_list_servers_filters(self): """This test verifies that when list_servers is called with `filters` dict that it passes it to nova.""" self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'], qs_elements=[ 'deleted=True', 'changes-since=2014-12-03T00:00:00Z', ], ), complete_qs=True, json={'servers': []}, ), ] ) self.cloud.list_servers( filters={'deleted': True, 'changes-since': '2014-12-03T00:00:00Z'} ) self.assert_calls() def test_list_servers_exception(self): self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), status_code=400, ), ] ) self.assertRaises(exceptions.SDKException, self.cloud.list_servers) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_create_server.py0000664000175000017500000016726500000000000025730 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_create_server ---------------------------------- Tests for the `create_server` command. """ import base64 from unittest import mock import uuid from openstack.compute.v2 import server from openstack import connection from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base class TestCreateServer(base.TestCase): def _compare_servers(self, exp, real): self.assertDictEqual( server.Server(**exp).to_dict(computed=False), real.to_dict(computed=False), ) def test_create_server_with_get_exception(self): """ Test that a bad status code when attempting to get the server instance raises an exception in create_server. """ build_server = fakes.make_fake_server('1234', '', 'BUILD') self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': build_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'name': 'server-name', 'networks': 'auto', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), status_code=404, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.create_server, 'server-name', {'id': 'image-id'}, {'id': 'flavor-id'}, ) self.assert_calls() def test_create_server_with_server_error(self): """ Test that a server error before we return or begin waiting for the server instance spawn raises an exception in create_server. """ build_server = fakes.make_fake_server('1234', '', 'BUILD') error_server = fakes.make_fake_server('1234', '', 'ERROR') self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': build_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'name': 'server-name', 'networks': 'auto', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), json={'server': error_server}, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.create_server, 'server-name', {'id': 'image-id'}, {'id': 'flavor-id'}, ) self.assert_calls() def test_create_server_wait_server_error(self): """ Test that a server error while waiting for the server to spawn raises an exception in create_server. """ build_server = fakes.make_fake_server('1234', '', 'BUILD') error_server = fakes.make_fake_server('1234', '', 'ERROR') self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': build_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'name': 'server-name', 'networks': 'auto', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': [build_server]}, ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': [error_server]}, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.create_server, 'server-name', dict(id='image-id'), dict(id='flavor-id'), wait=True, ) self.assert_calls() def test_create_server_with_timeout(self): """ Test that a timeout while waiting for the server to spawn raises an exception in create_server. """ fake_server = fakes.make_fake_server('1234', '', 'BUILD') self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': fake_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'name': 'server-name', 'networks': 'auto', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': [fake_server]}, ), ] ) self.assertRaises( exceptions.ResourceTimeout, self.cloud.create_server, 'server-name', dict(id='image-id'), dict(id='flavor-id'), wait=True, timeout=0.01, ) # We poll at the end, so we don't know real counts self.assert_calls(do_count=False) def test_create_server_no_wait(self): """ Test that create_server with no wait and no exception in the create call returns the server instance. """ fake_server = fakes.make_fake_server('1234', '', 'BUILD') self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': fake_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'name': 'server-name', 'networks': 'auto', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), json={'server': fake_server}, ), ] ) self.assertDictEqual( server.Server(**fake_server).to_dict(computed=False), self.cloud.create_server( name='server-name', image=dict(id='image-id'), flavor=dict(id='flavor-id'), ).to_dict(computed=False), ) self.assert_calls() def test_create_server_config_drive(self): """ Test that config_drive gets passed in properly """ fake_server = fakes.make_fake_server('1234', '', 'BUILD') self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': fake_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'config_drive': True, 'max_count': 1, 'min_count': 1, 'name': 'server-name', 'networks': 'auto', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), json={'server': fake_server}, ), ] ) self.assertDictEqual( server.Server(**fake_server).to_dict(computed=False), self.cloud.create_server( name='server-name', image=dict(id='image-id'), flavor=dict(id='flavor-id'), config_drive=True, ).to_dict(computed=False), ) self.assert_calls() def test_create_server_config_drive_none(self): """ Test that config_drive gets not passed in properly """ fake_server = fakes.make_fake_server('1234', '', 'BUILD') self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': fake_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'name': 'server-name', 'networks': 'auto', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), json={'server': fake_server}, ), ] ) self.assertEqual( server.Server(**fake_server).to_dict(computed=False), self.cloud.create_server( name='server-name', image=dict(id='image-id'), flavor=dict(id='flavor-id'), config_drive=None, ).to_dict(computed=False), ) self.assert_calls() def test_create_server_with_admin_pass_no_wait(self): """ Test that a server with an admin_pass passed returns the password """ admin_pass = self.getUniqueString('password') fake_server = fakes.make_fake_server('1234', '', 'BUILD') fake_create_server = fakes.make_fake_server( '1234', '', 'BUILD', admin_pass=admin_pass ) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': fake_create_server}, validate=dict( json={ 'server': { 'adminPass': admin_pass, 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'name': 'server-name', 'networks': 'auto', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), json={'server': fake_server}, ), ] ) self.assertEqual( admin_pass, self.cloud.create_server( name='server-name', image=dict(id='image-id'), flavor=dict(id='flavor-id'), admin_pass=admin_pass, )['admin_password'], ) self.assert_calls() @mock.patch.object(connection.Connection, "wait_for_server") def test_create_server_with_admin_pass_wait(self, mock_wait): """ Test that a server with an admin_pass passed returns the password """ admin_pass = self.getUniqueString('password') fake_server = fakes.make_fake_server('1234', '', 'BUILD') fake_server_with_pass = fakes.make_fake_server( '1234', '', 'BUILD', admin_pass=admin_pass ) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': fake_server_with_pass}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'adminPass': admin_pass, 'name': 'server-name', 'networks': 'auto', } } ), ), ] ) # The wait returns non-password server mock_wait.return_value = server.Server(**fake_server) new_server = self.cloud.create_server( name='server-name', image=dict(id='image-id'), flavor=dict(id='flavor-id'), admin_pass=admin_pass, wait=True, ) # Assert that we did wait self.assertTrue(mock_wait.called) # Even with the wait, we should still get back a passworded server self.assertEqual( new_server['admin_password'], fake_server_with_pass['adminPass'] ) self.assert_calls() def test_create_server_user_data_base64(self): """ Test that a server passed user-data sends it base64 encoded. """ user_data = self.getUniqueString('user_data') user_data_b64 = base64.b64encode(user_data.encode('utf-8')).decode( 'utf-8' ) fake_server = fakes.make_fake_server('1234', '', 'BUILD') fake_server['user_data'] = user_data self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': fake_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'user_data': user_data_b64, 'name': 'server-name', 'networks': 'auto', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), json={'server': fake_server}, ), ] ) self.cloud.create_server( name='server-name', image=dict(id='image-id'), flavor=dict(id='flavor-id'), userdata=user_data, wait=False, ) self.assert_calls() @mock.patch.object(connection.Connection, "get_active_server") @mock.patch.object(connection.Connection, "get_server") def test_wait_for_server(self, mock_get_server, mock_get_active_server): """ Test that waiting for a server returns the server instance when its status changes to "ACTIVE". """ # TODO(mordred) Rework this to not mock methods building_server = {'id': 'fake_server_id', 'status': 'BUILDING'} active_server = {'id': 'fake_server_id', 'status': 'ACTIVE'} mock_get_server.side_effect = iter([building_server, active_server]) mock_get_active_server.side_effect = iter( [building_server, active_server] ) server = self.cloud.wait_for_server(building_server) self.assertEqual(2, mock_get_server.call_count) mock_get_server.assert_has_calls( [ mock.call(building_server['id']), mock.call(active_server['id']), ] ) self.assertEqual(2, mock_get_active_server.call_count) mock_get_active_server.assert_has_calls( [ mock.call( server=building_server, reuse=True, auto_ip=True, ips=None, ip_pool=None, wait=True, timeout=mock.ANY, nat_destination=None, ), mock.call( server=active_server, reuse=True, auto_ip=True, ips=None, ip_pool=None, wait=True, timeout=mock.ANY, nat_destination=None, ), ] ) self.assertEqual('ACTIVE', server['status']) @mock.patch.object(connection.Connection, 'wait_for_server') def test_create_server_wait(self, mock_wait): """ Test that create_server with a wait actually does the wait. """ # TODO(mordred) Make this a full proper response fake_server = fakes.make_fake_server('1234', '', 'BUILD') self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': fake_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'name': 'server-name', 'networks': 'auto', } } ), ), ] ) self.cloud.create_server( 'server-name', dict(id='image-id'), dict(id='flavor-id'), wait=True, ), # This is a pretty dirty hack to ensure we in principle use object with # expected properties srv = server.Server.existing( connection=self.cloud, min_count=1, max_count=1, networks='auto', imageRef='image-id', flavorRef='flavor-id', **fake_server, ) mock_wait.assert_called_once_with( srv, auto_ip=True, ips=None, ip_pool=None, reuse=True, timeout=180, nat_destination=None, ) self.assert_calls() @mock.patch.object(connection.Connection, 'add_ips_to_server') def test_create_server_no_addresses(self, mock_add_ips_to_server): """ Test that create_server with a wait throws an exception if the server doesn't have addresses. """ build_server = fakes.make_fake_server('1234', '', 'BUILD') fake_server = fakes.make_fake_server( '1234', '', 'ACTIVE', addresses={} ) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': build_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'name': 'server-name', 'networks': 'auto', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': [build_server]}, ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': [fake_server]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'], qs_elements=['device_id=1234'], ), json={'ports': []}, ), dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), status_code=404, ), ] ) mock_add_ips_to_server.return_value = fake_server self.assertRaises( exceptions.SDKException, self.cloud.create_server, 'server-name', {'id': 'image-id'}, {'id': 'flavor-id'}, wait=True, ) self.assert_calls() def test_create_server_network_with_no_nics(self): """ Verify that if 'network' is supplied, and 'nics' is not, that we attempt to get the network for the server. """ build_server = fakes.make_fake_server('1234', '', 'BUILD') network = {'id': 'network-id', 'name': 'network-name'} self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', 'network-name'], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'], qs_elements=['name=network-name'], ), json={'networks': [network]}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': build_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'networks': [{'uuid': 'network-id'}], 'name': 'server-name', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), json={'server': build_server}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': [network]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'] ), json={'subnets': []}, ), ] ) self.cloud.create_server( 'server-name', dict(id='image-id'), dict(id='flavor-id'), network='network-name', ) self.assert_calls() def test_create_server_network_with_empty_nics(self): """ Verify that if 'network' is supplied, along with an empty 'nics' list, it's treated the same as if 'nics' were not included. """ network = {'id': 'network-id', 'name': 'network-name'} build_server = fakes.make_fake_server('1234', '', 'BUILD') self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', 'network-name'], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'], qs_elements=['name=network-name'], ), json={'networks': [network]}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': build_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'networks': [{'uuid': 'network-id'}], 'name': 'server-name', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), json={'server': build_server}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': [network]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'] ), json={'subnets': []}, ), ] ) self.cloud.create_server( 'server-name', dict(id='image-id'), dict(id='flavor-id'), network='network-name', nics=[], ) self.assert_calls() def test_create_server_network_fixed_ip(self): """ Verify that if 'fixed_ip' is supplied in nics, we pass it to networks appropriately. """ network = {'id': 'network-id', 'name': 'network-name'} fixed_ip = '10.0.0.1' build_server = fakes.make_fake_server('1234', '', 'BUILD') self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': build_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'networks': [{'fixed_ip': fixed_ip}], 'name': 'server-name', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), json={'server': build_server}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': [network]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'] ), json={'subnets': []}, ), ] ) self.cloud.create_server( 'server-name', dict(id='image-id'), dict(id='flavor-id'), nics=[{'fixed_ip': fixed_ip}], ) self.assert_calls() def test_create_server_network_v4_fixed_ip(self): """ Verify that if 'v4-fixed-ip' is supplied in nics, we pass it to networks appropriately. """ network = {'id': 'network-id', 'name': 'network-name'} fixed_ip = '10.0.0.1' build_server = fakes.make_fake_server('1234', '', 'BUILD') self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': build_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'networks': [{'fixed_ip': fixed_ip}], 'name': 'server-name', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), json={'server': build_server}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': [network]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'] ), json={'subnets': []}, ), ] ) self.cloud.create_server( 'server-name', dict(id='image-id'), dict(id='flavor-id'), nics=[{'fixed_ip': fixed_ip}], ) self.assert_calls() def test_create_server_network_v6_fixed_ip(self): """ Verify that if 'v6-fixed-ip' is supplied in nics, we pass it to networks appropriately. """ network = {'id': 'network-id', 'name': 'network-name'} # Note - it doesn't actually have to be a v6 address - it's just # an alias. fixed_ip = 'fe80::28da:5fff:fe57:13ed' build_server = fakes.make_fake_server('1234', '', 'BUILD') self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': build_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'networks': [{'fixed_ip': fixed_ip}], 'name': 'server-name', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), json={'server': build_server}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': [network]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'] ), json={'subnets': []}, ), ] ) self.cloud.create_server( 'server-name', dict(id='image-id'), dict(id='flavor-id'), nics=[{'fixed_ip': fixed_ip}], ) self.assert_calls() def test_create_server_network_fixed_ip_conflicts(self): """ Verify that if 'fixed_ip' and 'v4-fixed-ip' are both supplied in nics, we throw an exception. """ # Note - it doesn't actually have to be a v6 address - it's just # an alias. self.use_nothing() fixed_ip = '10.0.0.1' self.assertRaises( exceptions.SDKException, self.cloud.create_server, 'server-name', dict(id='image-id'), dict(id='flavor-id'), nics=[{'fixed_ip': fixed_ip, 'v4-fixed-ip': fixed_ip}], ) self.assert_calls() def test_create_server_get_flavor_image(self): self.use_glance() image_id = str(uuid.uuid4()) fake_image_dict = fakes.make_fake_image(image_id=image_id) fake_image_search_return = {'images': [fake_image_dict]} build_server = fakes.make_fake_server('1234', '', 'BUILD') active_server = fakes.make_fake_server('1234', '', 'BUILD') self.register_uris( [ dict( method='GET', uri=f'https://image.example.com/v2/images/{image_id}', json=fake_image_search_return, ), self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['flavors', 'vanilla'], qs_elements=[], ), json=fakes.FAKE_FLAVOR, ), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': build_server}, validate=dict( json={ 'server': { 'flavorRef': fakes.FLAVOR_ID, 'imageRef': image_id, 'max_count': 1, 'min_count': 1, 'networks': [{'uuid': 'some-network'}], 'name': 'server-name', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), json={'server': active_server}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), ] ) self.cloud.create_server( 'server-name', image_id, 'vanilla', nics=[{'net-id': 'some-network'}], wait=False, ) self.assert_calls() def test_create_server_nics_port_id(self): '''Verify port-id in nics input turns into port in REST.''' build_server = fakes.make_fake_server('1234', '', 'BUILD') active_server = fakes.make_fake_server('1234', '', 'BUILD') image_id = uuid.uuid4().hex port_id = uuid.uuid4().hex self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': build_server}, validate=dict( json={ 'server': { 'flavorRef': fakes.FLAVOR_ID, 'imageRef': image_id, 'max_count': 1, 'min_count': 1, 'networks': [{'port': port_id}], 'name': 'server-name', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), json={'server': active_server}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), ] ) self.cloud.create_server( 'server-name', dict(id=image_id), dict(id=fakes.FLAVOR_ID), nics=[{'port-id': port_id}], wait=False, ) self.assert_calls() def test_create_boot_attach_volume(self): build_server = fakes.make_fake_server('1234', '', 'BUILD') active_server = fakes.make_fake_server('1234', '', 'BUILD') volume_id = '20e82d93-14fa-475b-bfcc-f5e6246dd194' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), self.get_nova_discovery_mock_dict(), self.get_cinder_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', volume_id] ), json={ 'volume': { 'id': volume_id, 'status': 'available', 'size': 1, 'availability_zone': 'cinder', 'name': '', 'description': None, 'volume_type': 'lvmdriver-1', } }, ), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': build_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'block_device_mapping_v2': [ { 'boot_index': 0, 'delete_on_termination': True, 'destination_type': 'local', 'source_type': 'image', 'uuid': 'image-id', }, { 'boot_index': '-1', 'delete_on_termination': False, 'destination_type': 'volume', 'source_type': 'volume', 'uuid': volume_id, }, ], 'name': 'server-name', 'networks': 'auto', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), json={'server': active_server}, ), ] ) self.cloud.create_server( name='server-name', image=dict(id='image-id'), flavor=dict(id='flavor-id'), boot_from_volume=False, volumes=[volume_id], wait=False, ) self.assert_calls() def test_create_boot_from_volume_image_terminate(self): build_server = fakes.make_fake_server('1234', '', 'BUILD') active_server = fakes.make_fake_server('1234', '', 'BUILD') self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': build_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': '', 'max_count': 1, 'min_count': 1, 'block_device_mapping_v2': [ { 'boot_index': '0', 'delete_on_termination': True, 'destination_type': 'volume', 'source_type': 'image', 'uuid': 'image-id', 'volume_size': '1', } ], 'name': 'server-name', 'networks': 'auto', } } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), json={'server': active_server}, ), ] ) self.cloud.create_server( name='server-name', image=dict(id='image-id'), flavor=dict(id='flavor-id'), boot_from_volume=True, terminate_volume=True, volume_size=1, wait=False, ) self.assert_calls() def test_create_server_scheduler_hints(self): """ Test that setting scheduler_hints will include them in POST request """ scheduler_hints = { 'group': self.getUniqueString('group'), } fake_server = fakes.make_fake_server('1234', '', 'BUILD') fake_server['scheduler_hints'] = scheduler_hints self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': fake_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'name': 'server-name', 'networks': 'auto', }, 'OS-SCH-HNT:scheduler_hints': scheduler_hints, } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), json={'server': fake_server}, ), ] ) self.cloud.create_server( name='server-name', image=dict(id='image-id'), flavor=dict(id='flavor-id'), scheduler_hints=scheduler_hints, wait=False, ) self.assert_calls() def test_create_server_scheduler_hints_group_merge(self): """ Test that setting both scheduler_hints and group results in merged hints in POST request """ group_id = uuid.uuid4().hex group_name = self.getUniqueString('server-group') policies = ['affinity'] fake_group = fakes.make_fake_server_group( group_id, group_name, policies ) # The scheduler hints we pass in scheduler_hints = { 'different_host': [], } # The scheduler hints we expect to be in POST request scheduler_hints_merged = { 'different_host': [], 'group': group_id, } fake_server = fakes.make_fake_server('1234', '', 'BUILD') fake_server['scheduler_hints'] = scheduler_hints_merged self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['os-server-groups', group_id], ), json={'server_groups': [fake_group]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': fake_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'name': 'server-name', 'networks': 'auto', }, 'OS-SCH-HNT:scheduler_hints': scheduler_hints_merged, # noqa: E501 } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), json={'server': fake_server}, ), ] ) self.cloud.create_server( name='server-name', image=dict(id='image-id'), flavor=dict(id='flavor-id'), scheduler_hints=dict(scheduler_hints), group=group_id, wait=False, ) self.assert_calls() def test_create_server_scheduler_hints_group_override(self): """ Test that setting group in both scheduler_hints and group param prefers param """ group_id_scheduler_hints = uuid.uuid4().hex group_id = uuid.uuid4().hex group_name = self.getUniqueString('server-group') policies = ['affinity'] fake_group = fakes.make_fake_server_group( group_id, group_name, policies ) # The scheduler hints we pass in that are expected to be ignored in # POST call scheduler_hints = { 'group': group_id_scheduler_hints, } # The scheduler hints we expect to be in POST request group_scheduler_hints = { 'group': group_id, } fake_server = fakes.make_fake_server('1234', '', 'BUILD') fake_server['scheduler_hints'] = group_scheduler_hints self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['os-server-groups', group_id], ), json={'server_groups': [fake_group]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers'] ), json={'server': fake_server}, validate=dict( json={ 'server': { 'flavorRef': 'flavor-id', 'imageRef': 'image-id', 'max_count': 1, 'min_count': 1, 'name': 'server-name', 'networks': 'auto', }, 'OS-SCH-HNT:scheduler_hints': group_scheduler_hints, # noqa: E501 } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), json={'server': fake_server}, ), ] ) self.cloud.create_server( name='server-name', image=dict(id='image-id'), flavor=dict(id='flavor-id'), scheduler_hints=dict(scheduler_hints), group=group_id, wait=False, ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_create_volume_snapshot.py0000664000175000017500000001460500000000000027635 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_create_volume_snapshot ---------------------------------- Tests for the `create_volume_snapshot` command. """ from openstack.block_storage.v3 import snapshot from openstack.cloud import meta from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base class TestCreateVolumeSnapshot(base.TestCase): def setUp(self): super().setUp() self.use_cinder() def _compare_snapshots(self, exp, real): self.assertDictEqual( snapshot.Snapshot(**exp).to_dict(computed=False), real.to_dict(computed=False), ) def test_create_volume_snapshot_wait(self): """ Test that create_volume_snapshot with a wait returns the volume snapshot when its status changes to "available". """ snapshot_id = '5678' volume_id = '1234' build_snapshot = fakes.FakeVolumeSnapshot( snapshot_id, 'creating', 'foo', 'derpysnapshot' ) build_snapshot_dict = meta.obj_to_munch(build_snapshot) fake_snapshot = fakes.FakeVolumeSnapshot( snapshot_id, 'available', 'foo', 'derpysnapshot' ) fake_snapshot_dict = meta.obj_to_munch(fake_snapshot) self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'volumev3', 'public', append=['snapshots'] ), json={'snapshot': build_snapshot_dict}, validate=dict( json={ 'snapshot': {'volume_id': '1234', 'force': False} } ), ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['snapshots', snapshot_id] ), json={'snapshot': build_snapshot_dict}, ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['snapshots', snapshot_id] ), json={'snapshot': fake_snapshot_dict}, ), ] ) self._compare_snapshots( fake_snapshot_dict, self.cloud.create_volume_snapshot(volume_id=volume_id, wait=True), ) self.assert_calls() def test_create_volume_snapshot_with_timeout(self): """ Test that a timeout while waiting for the volume snapshot to create raises an exception in create_volume_snapshot. """ snapshot_id = '5678' volume_id = '1234' build_snapshot = fakes.FakeVolumeSnapshot( snapshot_id, 'creating', 'foo', 'derpysnapshot' ) build_snapshot_dict = meta.obj_to_munch(build_snapshot) self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'volumev3', 'public', append=['snapshots'] ), json={'snapshot': build_snapshot_dict}, validate=dict( json={ 'snapshot': {'volume_id': '1234', 'force': False} } ), ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['snapshots', snapshot_id] ), json={'snapshot': build_snapshot_dict}, ), ] ) self.assertRaises( exceptions.ResourceTimeout, self.cloud.create_volume_snapshot, volume_id=volume_id, wait=True, timeout=0.01, ) self.assert_calls(do_count=False) def test_create_volume_snapshot_with_error(self): """ Test that a error status while waiting for the volume snapshot to create raises an exception in create_volume_snapshot. """ snapshot_id = '5678' volume_id = '1234' build_snapshot = fakes.FakeVolumeSnapshot( snapshot_id, 'creating', 'bar', 'derpysnapshot' ) build_snapshot_dict = meta.obj_to_munch(build_snapshot) error_snapshot = fakes.FakeVolumeSnapshot( snapshot_id, 'error', 'blah', 'derpysnapshot' ) error_snapshot_dict = meta.obj_to_munch(error_snapshot) self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'volumev3', 'public', append=['snapshots'] ), json={'snapshot': build_snapshot_dict}, validate=dict( json={ 'snapshot': {'volume_id': '1234', 'force': False} } ), ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['snapshots', snapshot_id] ), json={'snapshot': build_snapshot_dict}, ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['snapshots', snapshot_id] ), json={'snapshot': error_snapshot_dict}, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.create_volume_snapshot, volume_id=volume_id, wait=True, timeout=5, ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_delete_server.py0000664000175000017500000004023300000000000025710 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_delete_server ---------------------------------- Tests for the `delete_server` command. """ import uuid from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base class TestDeleteServer(base.TestCase): def test_delete_server(self): """ Test that server delete is called when wait=False """ server = fakes.make_fake_server('1234', 'daffy', 'ACTIVE') self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'daffy'] ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'], qs_elements=['name=daffy'], ), json={'servers': [server]}, ), dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), ), ] ) self.assertTrue(self.cloud.delete_server('daffy', wait=False)) self.assert_calls() def test_delete_server_already_gone(self): """ Test that we return immediately when server is already gone """ self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'tweety'] ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'], qs_elements=['name=tweety'], ), json={'servers': []}, ), ] ) self.assertFalse(self.cloud.delete_server('tweety', wait=False)) self.assert_calls() def test_delete_server_already_gone_wait(self): self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'speedy'] ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'], qs_elements=['name=speedy'], ), json={'servers': []}, ), ] ) self.assertFalse(self.cloud.delete_server('speedy', wait=True)) self.assert_calls() def test_delete_server_wait_for_deleted(self): """ Test that delete_server waits for the server to be gone """ server = fakes.make_fake_server('9999', 'wily', 'ACTIVE') self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'wily'] ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'], qs_elements=['name=wily'], ), json={'servers': [server]}, ), dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=['servers', '9999'] ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '9999'] ), json={'server': server}, ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '9999'] ), status_code=404, ), ] ) self.assertTrue(self.cloud.delete_server('wily', wait=True)) self.assert_calls() def test_delete_server_fails(self): """ Test that delete_server raises non-404 exceptions """ server = fakes.make_fake_server('1212', 'speedy', 'ACTIVE') self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'speedy'] ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'], qs_elements=['name=speedy'], ), json={'servers': [server]}, ), dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1212'] ), status_code=400, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.delete_server, 'speedy', wait=False, ) self.assert_calls() def test_delete_server_no_cinder(self): """ Test that deleting server works when cinder is not available """ orig_has_service = self.cloud.has_service def fake_has_service(service_type): if service_type == 'volume': return False return orig_has_service(service_type) self.cloud.has_service = fake_has_service server = fakes.make_fake_server('1234', 'porky', 'ACTIVE') self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'porky'] ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'], qs_elements=['name=porky'], ), json={'servers': [server]}, ), dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), ), ] ) self.assertTrue(self.cloud.delete_server('porky', wait=False)) self.assert_calls() def test_delete_server_delete_ips(self): """ Test that deleting server and fips works """ server = fakes.make_fake_server('1234', 'porky', 'ACTIVE') fip_id = uuid.uuid4().hex self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'porky'] ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'], qs_elements=['name=porky'], ), json={'servers': [server]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'], qs_elements=['floating_ip_address=172.24.5.5'], ), complete_qs=True, json={ 'floatingips': [ { 'router_id': 'd23abc8d-2991-4a55-ba98-2aaea84cc72f', # noqa: E501 'tenant_id': '4969c491a3c74ee4af974e6d800c62de', # noqa: E501 'floating_network_id': '376da547-b977-4cfe-9cba7', # noqa: E501 'fixed_ip_address': '10.0.0.4', 'floating_ip_address': '172.24.5.5', 'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac', # noqa: E501 'id': fip_id, 'status': 'ACTIVE', } ] }, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips', fip_id], ), ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'] ), complete_qs=True, json={'floatingips': []}, ), dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), status_code=404, ), ] ) self.assertTrue( self.cloud.delete_server('porky', wait=True, delete_ips=True) ) self.assert_calls() def test_delete_server_delete_ips_bad_neutron(self): """ Test that deleting server with a borked neutron doesn't bork """ server = fakes.make_fake_server('1234', 'porky', 'ACTIVE') self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'porky'] ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'], qs_elements=['name=porky'], ), json={'servers': [server]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'], qs_elements=['floating_ip_address=172.24.5.5'], ), complete_qs=True, status_code=404, ), dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), status_code=404, ), ] ) self.assertTrue( self.cloud.delete_server('porky', wait=True, delete_ips=True) ) self.assert_calls() def test_delete_server_delete_fips_nova(self): """ Test that deleting server with a borked neutron doesn't bork """ self.cloud._floating_ip_source = 'nova' server = fakes.make_fake_server('1234', 'porky', 'ACTIVE') self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'porky'] ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'], qs_elements=['name=porky'], ), json={'servers': [server]}, ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['os-floating-ips'] ), json={ 'floating_ips': [ { 'fixed_ip': None, 'id': 1, 'instance_id': None, 'ip': '172.24.5.5', 'pool': 'nova', } ] }, ), dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=['os-floating-ips', '1'] ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['os-floating-ips'] ), json={'floating_ips': []}, ), dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', '1234'] ), status_code=404, ), ] ) self.assertTrue( self.cloud.delete_server('porky', wait=True, delete_ips=True) ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_delete_volume_snapshot.py0000664000175000017500000001132000000000000027623 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_delete_volume_snapshot ---------------------------------- Tests for the `delete_volume_snapshot` command. """ from openstack.cloud import meta from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base class TestDeleteVolumeSnapshot(base.TestCase): def setUp(self): super().setUp() self.use_cinder() def test_delete_volume_snapshot(self): """ Test that delete_volume_snapshot without a wait returns True instance when the volume snapshot deletes. """ fake_snapshot = fakes.FakeVolumeSnapshot( '1234', 'available', 'foo', 'derpysnapshot' ) fake_snapshot_dict = meta.obj_to_munch(fake_snapshot) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['snapshots', 'detail'] ), json={'snapshots': [fake_snapshot_dict]}, ), dict( method='DELETE', uri=self.get_mock_url( 'volumev3', 'public', append=['snapshots', fake_snapshot_dict['id']], ), ), ] ) self.assertTrue( self.cloud.delete_volume_snapshot(name_or_id='1234', wait=False) ) self.assert_calls() def test_delete_volume_snapshot_with_error(self): """ Test that a exception while deleting a volume snapshot will cause an SDKException. """ fake_snapshot = fakes.FakeVolumeSnapshot( '1234', 'available', 'foo', 'derpysnapshot' ) fake_snapshot_dict = meta.obj_to_munch(fake_snapshot) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['snapshots', 'detail'] ), json={'snapshots': [fake_snapshot_dict]}, ), dict( method='DELETE', uri=self.get_mock_url( 'volumev3', 'public', append=['snapshots', fake_snapshot_dict['id']], ), status_code=404, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.delete_volume_snapshot, name_or_id='1234', ) self.assert_calls() def test_delete_volume_snapshot_with_timeout(self): """ Test that a timeout while waiting for the volume snapshot to delete raises an exception in delete_volume_snapshot. """ fake_snapshot = fakes.FakeVolumeSnapshot( '1234', 'available', 'foo', 'derpysnapshot' ) fake_snapshot_dict = meta.obj_to_munch(fake_snapshot) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['snapshots', 'detail'] ), json={'snapshots': [fake_snapshot_dict]}, ), dict( method='DELETE', uri=self.get_mock_url( 'volumev3', 'public', append=['snapshots', fake_snapshot_dict['id']], ), ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['snapshots', '1234'] ), json={'snapshot': fake_snapshot_dict}, ), ] ) self.assertRaises( exceptions.ResourceTimeout, self.cloud.delete_volume_snapshot, name_or_id='1234', wait=True, timeout=0.01, ) self.assert_calls(do_count=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_domain_params.py0000664000175000017500000000337000000000000025673 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack.tests.unit import base class TestDomainParams(base.TestCase): def test_identity_params_v3(self): project_data = self._get_project_data(v3=True) self.register_uris( [ dict( method='GET', uri='https://identity.example.com/v3/projects', json=dict( projects=[project_data.json_response['project']] ), ) ] ) ret = self.cloud._get_identity_params( domain_id='5678', project=project_data.project_name ) self.assertIn('default_project_id', ret) self.assertEqual(ret['default_project_id'], project_data.project_id) self.assertIn('domain_id', ret) self.assertEqual(ret['domain_id'], '5678') self.assert_calls() def test_identity_params_v3_no_domain(self): project_data = self._get_project_data(v3=True) self.assertRaises( exceptions.SDKException, self.cloud._get_identity_params, domain_id=None, project=project_data.project_name, ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_domains.py0000664000175000017500000002677600000000000024532 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid import testtools from testtools import matchers from openstack import exceptions from openstack.tests.unit import base class TestDomains(base.TestCase): def get_mock_url( self, service_type='identity', resource='domains', append=None, base_url_append='v3', qs_elements=None, ): return super().get_mock_url( service_type=service_type, resource=resource, append=append, base_url_append=base_url_append, qs_elements=qs_elements, ) def test_list_domains(self): domain_data = self._get_domain_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url(), status_code=200, json={'domains': [domain_data.json_response['domain']]}, ) ] ) domains = self.cloud.list_domains() self.assertThat(len(domains), matchers.Equals(1)) self.assertThat( domains[0].name, matchers.Equals(domain_data.domain_name) ) self.assertThat(domains[0].id, matchers.Equals(domain_data.domain_id)) self.assert_calls() def test_get_domain(self): domain_data = self._get_domain_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url(append=[domain_data.domain_id]), status_code=200, json=domain_data.json_response, ) ] ) domain = self.cloud.get_domain(domain_id=domain_data.domain_id) self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) self.assert_calls() def test_get_domain_with_name_or_id(self): domain_data = self._get_domain_data() response = {'domains': [domain_data.json_response['domain']]} self.register_uris( [ dict( method='GET', uri=self.get_mock_url(append=[domain_data.domain_id]), status_code=200, json=domain_data.json_response, ), dict( method='GET', uri=self.get_mock_url(append=[domain_data.domain_name]), status_code=404, ), dict( method='GET', uri=self.get_mock_url( qs_elements=['name=' + domain_data.domain_name] ), status_code=200, json=response, ), ] ) domain = self.cloud.get_domain(name_or_id=domain_data.domain_id) domain_by_name = self.cloud.get_domain( name_or_id=domain_data.domain_name ) self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) self.assertThat( domain_by_name.id, matchers.Equals(domain_data.domain_id) ) self.assertThat( domain_by_name.name, matchers.Equals(domain_data.domain_name) ) self.assert_calls() def test_create_domain(self): domain_data = self._get_domain_data( description=uuid.uuid4().hex, enabled=True ) self.register_uris( [ dict( method='POST', uri=self.get_mock_url(), status_code=200, json=domain_data.json_response, validate=dict(json=domain_data.json_request), ) ] ) domain = self.cloud.create_domain( domain_data.domain_name, domain_data.description ) self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) self.assertThat( domain.description, matchers.Equals(domain_data.description) ) self.assert_calls() def test_create_domain_exception(self): domain_data = self._get_domain_data( domain_name='domain_name', enabled=True ) with testtools.ExpectedException(exceptions.BadRequestException): self.register_uris( [ dict( method='POST', uri=self.get_mock_url(), status_code=400, json=domain_data.json_response, validate=dict(json=domain_data.json_request), ) ] ) self.cloud.create_domain('domain_name') self.assert_calls() def test_delete_domain(self): domain_data = self._get_domain_data() new_resp = domain_data.json_response.copy() new_resp['domain']['enabled'] = False domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) self.register_uris( [ dict( method='PATCH', uri=domain_resource_uri, status_code=200, json=new_resp, validate=dict(json={'domain': {'enabled': False}}), ), dict( method='DELETE', uri=domain_resource_uri, status_code=204 ), ] ) self.cloud.delete_domain(domain_data.domain_id) self.assert_calls() def test_delete_domain_name_or_id(self): domain_data = self._get_domain_data() new_resp = domain_data.json_response.copy() new_resp['domain']['enabled'] = False domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) self.register_uris( [ dict( method='GET', uri=self.get_mock_url(append=[domain_data.domain_id]), status_code=200, json={'domain': domain_data.json_response['domain']}, ), dict( method='PATCH', uri=domain_resource_uri, status_code=200, json=new_resp, validate=dict(json={'domain': {'enabled': False}}), ), dict( method='DELETE', uri=domain_resource_uri, status_code=204 ), ] ) self.cloud.delete_domain(name_or_id=domain_data.domain_id) self.assert_calls() def test_delete_domain_exception(self): # NOTE(notmorgan): This test does not reflect the case where the domain # cannot be updated to be disabled, Shade raises that as an unable # to update domain even though it is called via delete_domain. This # should be fixed in shade to catch either a failure on PATCH, # subsequent GET, or DELETE call(s). domain_data = self._get_domain_data() new_resp = domain_data.json_response.copy() new_resp['domain']['enabled'] = False domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) self.register_uris( [ dict( method='PATCH', uri=domain_resource_uri, status_code=200, json=new_resp, validate=dict(json={'domain': {'enabled': False}}), ), dict( method='DELETE', uri=domain_resource_uri, status_code=404 ), ] ) with testtools.ExpectedException(exceptions.NotFoundException): self.cloud.delete_domain(domain_data.domain_id) self.assert_calls() def test_update_domain(self): domain_data = self._get_domain_data( description=self.getUniqueString('domainDesc') ) domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) self.register_uris( [ dict( method='PATCH', uri=domain_resource_uri, status_code=200, json=domain_data.json_response, validate=dict(json=domain_data.json_request), ) ] ) domain = self.cloud.update_domain( domain_data.domain_id, name=domain_data.domain_name, description=domain_data.description, ) self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) self.assertThat( domain.description, matchers.Equals(domain_data.description) ) self.assert_calls() def test_update_domain_name_or_id(self): domain_data = self._get_domain_data( description=self.getUniqueString('domainDesc') ) domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) self.register_uris( [ dict( method='GET', uri=self.get_mock_url(append=[domain_data.domain_id]), status_code=200, json={'domain': domain_data.json_response['domain']}, ), dict( method='PATCH', uri=domain_resource_uri, status_code=200, json=domain_data.json_response, validate=dict(json=domain_data.json_request), ), ] ) domain = self.cloud.update_domain( name_or_id=domain_data.domain_id, name=domain_data.domain_name, description=domain_data.description, ) self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) self.assertThat( domain.description, matchers.Equals(domain_data.description) ) self.assert_calls() def test_update_domain_exception(self): domain_data = self._get_domain_data( description=self.getUniqueString('domainDesc') ) self.register_uris( [ dict( method='PATCH', uri=self.get_mock_url(append=[domain_data.domain_id]), status_code=409, json=domain_data.json_response, validate=dict(json={'domain': {'enabled': False}}), ) ] ) with testtools.ExpectedException(exceptions.ConflictException): self.cloud.delete_domain(domain_data.domain_id) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_endpoints.py0000664000175000017500000003161200000000000025064 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test_cloud_endpoints ---------------------------------- Tests Keystone endpoints commands. """ import uuid from testtools import matchers from openstack.tests.unit import base class TestCloudEndpoints(base.TestCase): def get_mock_url( self, service_type='identity', interface='public', resource='endpoints', append=None, base_url_append='v3', ): return super().get_mock_url( service_type, interface, resource, append, base_url_append ) def _dummy_url(self): return 'https://%s.example.com/' % uuid.uuid4().hex def test_create_endpoint_v3(self): service_data = self._get_service_data() public_endpoint_data = self._get_endpoint_v3_data( service_id=service_data.service_id, interface='public', url=self._dummy_url(), ) public_endpoint_data_disabled = self._get_endpoint_v3_data( service_id=service_data.service_id, interface='public', url=self._dummy_url(), enabled=False, ) admin_endpoint_data = self._get_endpoint_v3_data( service_id=service_data.service_id, interface='admin', url=self._dummy_url(), region=public_endpoint_data.region_id, ) internal_endpoint_data = self._get_endpoint_v3_data( service_id=service_data.service_id, interface='internal', url=self._dummy_url(), region=public_endpoint_data.region_id, ) self.register_uris( [ dict( method='GET', uri=self.get_mock_url(resource='services'), status_code=200, json={ 'services': [service_data.json_response_v3['service']] }, ), dict( method='POST', uri=self.get_mock_url(), status_code=200, json=public_endpoint_data_disabled.json_response, validate=dict( json=public_endpoint_data_disabled.json_request ), ), dict( method='GET', uri=self.get_mock_url(resource='services'), status_code=200, json={ 'services': [service_data.json_response_v3['service']] }, ), dict( method='POST', uri=self.get_mock_url(), status_code=200, json=public_endpoint_data.json_response, validate=dict(json=public_endpoint_data.json_request), ), dict( method='POST', uri=self.get_mock_url(), status_code=200, json=internal_endpoint_data.json_response, validate=dict(json=internal_endpoint_data.json_request), ), dict( method='POST', uri=self.get_mock_url(), status_code=200, json=admin_endpoint_data.json_response, validate=dict(json=admin_endpoint_data.json_request), ), ] ) endpoints = self.cloud.create_endpoint( service_name_or_id=service_data.service_id, region=public_endpoint_data_disabled.region_id, url=public_endpoint_data_disabled.url, interface=public_endpoint_data_disabled.interface, enabled=False, ) # Test endpoint values self.assertThat( endpoints[0].id, matchers.Equals(public_endpoint_data_disabled.endpoint_id), ) self.assertThat( endpoints[0].url, matchers.Equals(public_endpoint_data_disabled.url), ) self.assertThat( endpoints[0].interface, matchers.Equals(public_endpoint_data_disabled.interface), ) self.assertThat( endpoints[0].region_id, matchers.Equals(public_endpoint_data_disabled.region_id), ) self.assertThat( endpoints[0].region_id, matchers.Equals(public_endpoint_data_disabled.region_id), ) self.assertThat( endpoints[0].is_enabled, matchers.Equals(public_endpoint_data_disabled.enabled), ) endpoints_2on3 = self.cloud.create_endpoint( service_name_or_id=service_data.service_id, region=public_endpoint_data.region_id, public_url=public_endpoint_data.url, internal_url=internal_endpoint_data.url, admin_url=admin_endpoint_data.url, ) # Three endpoints should be returned, public, internal, and admin self.assertThat(len(endpoints_2on3), matchers.Equals(3)) # test keys and values are correct for each endpoint created for result, reference in zip( endpoints_2on3, [ public_endpoint_data, internal_endpoint_data, admin_endpoint_data, ], ): self.assertThat(result.id, matchers.Equals(reference.endpoint_id)) self.assertThat(result.url, matchers.Equals(reference.url)) self.assertThat( result.interface, matchers.Equals(reference.interface) ) self.assertThat( result.region_id, matchers.Equals(reference.region_id) ) self.assertThat( result.is_enabled, matchers.Equals(reference.enabled) ) self.assert_calls() def test_update_endpoint_v3(self): service_data = self._get_service_data() dummy_url = self._dummy_url() endpoint_data = self._get_endpoint_v3_data( service_id=service_data.service_id, interface='admin', enabled=False, ) reference_request = endpoint_data.json_request.copy() reference_request['endpoint']['url'] = dummy_url self.register_uris( [ dict( method='PATCH', uri=self.get_mock_url(append=[endpoint_data.endpoint_id]), status_code=200, json=endpoint_data.json_response, validate=dict(json=reference_request), ) ] ) endpoint = self.cloud.update_endpoint( endpoint_data.endpoint_id, service_name_or_id=service_data.service_id, region=endpoint_data.region_id, url=dummy_url, interface=endpoint_data.interface, enabled=False, ) # test keys and values are correct self.assertThat( endpoint.id, matchers.Equals(endpoint_data.endpoint_id) ) self.assertThat( endpoint.service_id, matchers.Equals(service_data.service_id) ) self.assertThat(endpoint.url, matchers.Equals(endpoint_data.url)) self.assertThat( endpoint.interface, matchers.Equals(endpoint_data.interface) ) self.assert_calls() def test_list_endpoints(self): endpoints_data = [self._get_endpoint_v3_data() for e in range(1, 10)] self.register_uris( [ dict( method='GET', uri=self.get_mock_url(), status_code=200, json={ 'endpoints': [ e.json_response['endpoint'] for e in endpoints_data ] }, ) ] ) endpoints = self.cloud.list_endpoints() # test we are getting exactly len(self.mock_endpoints) elements self.assertThat(len(endpoints), matchers.Equals(len(endpoints_data))) # test keys and values are correct for i, ep in enumerate(endpoints_data): self.assertThat(endpoints[i].id, matchers.Equals(ep.endpoint_id)) self.assertThat( endpoints[i].service_id, matchers.Equals(ep.service_id) ) self.assertThat(endpoints[i].url, matchers.Equals(ep.url)) self.assertThat( endpoints[i].interface, matchers.Equals(ep.interface) ) self.assert_calls() def test_search_endpoints(self): endpoints_data = [ self._get_endpoint_v3_data(region='region1') for e in range(0, 2) ] endpoints_data.extend( [self._get_endpoint_v3_data() for e in range(1, 8)] ) self.register_uris( [ dict( method='GET', uri=self.get_mock_url(), status_code=200, json={ 'endpoints': [ e.json_response['endpoint'] for e in endpoints_data ] }, ), dict( method='GET', uri=self.get_mock_url(), status_code=200, json={ 'endpoints': [ e.json_response['endpoint'] for e in endpoints_data ] }, ), dict( method='GET', uri=self.get_mock_url(), status_code=200, json={ 'endpoints': [ e.json_response['endpoint'] for e in endpoints_data ] }, ), dict( method='GET', uri=self.get_mock_url(), status_code=200, json={ 'endpoints': [ e.json_response['endpoint'] for e in endpoints_data ] }, ), ] ) # Search by id endpoints = self.cloud.search_endpoints( id=endpoints_data[-1].endpoint_id ) # # test we are getting exactly 1 element self.assertEqual(1, len(endpoints)) self.assertThat( endpoints[0].id, matchers.Equals(endpoints_data[-1].endpoint_id) ) self.assertThat( endpoints[0].service_id, matchers.Equals(endpoints_data[-1].service_id), ) self.assertThat( endpoints[0].url, matchers.Equals(endpoints_data[-1].url) ) self.assertThat( endpoints[0].interface, matchers.Equals(endpoints_data[-1].interface), ) # Not found endpoints = self.cloud.search_endpoints(id='!invalid!') self.assertEqual(0, len(endpoints)) # Multiple matches endpoints = self.cloud.search_endpoints( filters={'region_id': 'region1'} ) # # test we are getting exactly 2 elements self.assertEqual(2, len(endpoints)) # test we are getting the correct response for region/region_id compat endpoints = self.cloud.search_endpoints( filters={'region_id': 'region1'} ) # # test we are getting exactly 2 elements, this is v3 self.assertEqual(2, len(endpoints)) self.assert_calls() def test_delete_endpoint(self): endpoint_data = self._get_endpoint_v3_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url(), status_code=200, json={ 'endpoints': [endpoint_data.json_response['endpoint']] }, ), dict( method='DELETE', uri=self.get_mock_url(append=[endpoint_data.endpoint_id]), status_code=204, ), ] ) # Delete by id self.cloud.delete_endpoint(id=endpoint_data.endpoint_id) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_flavors.py0000664000175000017500000003550200000000000024537 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base class TestFlavors(base.TestCase): def setUp(self): super().setUp() # self.use_compute_discovery() def test_create_flavor(self): self.use_compute_discovery() self.register_uris( [ dict( method='POST', uri='{endpoint}/flavors'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'flavor': fakes.FAKE_FLAVOR}, validate=dict( json={ 'flavor': { "name": "vanilla", "description": None, "ram": 65536, "vcpus": 24, "swap": 0, "os-flavor-access:is_public": True, "rxtx_factor": 1.0, "OS-FLV-EXT-DATA:ephemeral": 0, "disk": 1600, "id": None, } } ), ) ] ) self.cloud.create_flavor( 'vanilla', ram=65536, disk=1600, vcpus=24, ) self.assert_calls() def test_delete_flavor(self): self.use_compute_discovery() self.register_uris( [ dict( method='GET', uri='{endpoint}/flavors/vanilla'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json=fakes.FAKE_FLAVOR, ), dict( method='DELETE', uri='{endpoint}/flavors/{id}'.format( endpoint=fakes.COMPUTE_ENDPOINT, id=fakes.FLAVOR_ID ), ), ] ) self.assertTrue(self.cloud.delete_flavor('vanilla')) self.assert_calls() def test_delete_flavor_not_found(self): self.use_compute_discovery() self.register_uris( [ dict( method='GET', uri='{endpoint}/flavors/invalid'.format( endpoint=fakes.COMPUTE_ENDPOINT ), status_code=404, ), dict( method='GET', uri='{endpoint}/flavors/detail?is_public=None'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'flavors': fakes.FAKE_FLAVOR_LIST}, ), ] ) self.assertFalse(self.cloud.delete_flavor('invalid')) self.assert_calls() def test_delete_flavor_exception(self): self.use_compute_discovery() self.register_uris( [ dict( method='GET', uri='{endpoint}/flavors/vanilla'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json=fakes.FAKE_FLAVOR, ), dict( method='GET', uri='{endpoint}/flavors/detail?is_public=None'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'flavors': fakes.FAKE_FLAVOR_LIST}, ), dict( method='DELETE', uri='{endpoint}/flavors/{id}'.format( endpoint=fakes.COMPUTE_ENDPOINT, id=fakes.FLAVOR_ID ), status_code=503, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.delete_flavor, 'vanilla', ) def test_list_flavors(self): self.use_compute_discovery() uris_to_mock = [ dict( method='GET', uri='{endpoint}/flavors/detail?is_public=None'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'flavors': fakes.FAKE_FLAVOR_LIST}, ), ] self.register_uris(uris_to_mock) flavors = self.cloud.list_flavors() # test that new flavor is created correctly found = False for flavor in flavors: if flavor['name'] == 'vanilla': found = True break self.assertTrue(found) needed_keys = {'name', 'ram', 'vcpus', 'id', 'is_public', 'disk'} if found: # check flavor content self.assertTrue(needed_keys.issubset(flavor.keys())) self.assert_calls() def test_list_flavors_with_extra(self): self.use_compute_discovery() uris_to_mock = [ dict( method='GET', uri='{endpoint}/flavors/detail?is_public=None'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'flavors': fakes.FAKE_FLAVOR_LIST}, ), ] uris_to_mock.extend( [ dict( method='GET', uri='{endpoint}/flavors/{id}/os-extra_specs'.format( endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id'] ), json={'extra_specs': {}}, ) for flavor in fakes.FAKE_FLAVOR_LIST ] ) self.register_uris(uris_to_mock) flavors = self.cloud.list_flavors(get_extra=True) # test that new flavor is created correctly found = False for flavor in flavors: if flavor['name'] == 'vanilla': found = True break self.assertTrue(found) needed_keys = {'name', 'ram', 'vcpus', 'id', 'is_public', 'disk'} if found: # check flavor content self.assertTrue(needed_keys.issubset(flavor.keys())) self.assert_calls() def test_get_flavor_by_ram(self): self.use_compute_discovery() uris_to_mock = [ dict( method='GET', uri='{endpoint}/flavors/detail?is_public=None'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'flavors': fakes.FAKE_FLAVOR_LIST}, ), ] uris_to_mock.extend( [ dict( method='GET', uri='{endpoint}/flavors/{id}/os-extra_specs'.format( endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id'] ), json={'extra_specs': {}}, ) for flavor in fakes.FAKE_FLAVOR_LIST ] ) self.register_uris(uris_to_mock) flavor = self.cloud.get_flavor_by_ram(ram=250) self.assertEqual(fakes.STRAWBERRY_FLAVOR_ID, flavor['id']) def test_get_flavor_by_ram_and_include(self): self.use_compute_discovery() uris_to_mock = [ dict( method='GET', uri='{endpoint}/flavors/detail?is_public=None'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'flavors': fakes.FAKE_FLAVOR_LIST}, ), ] uris_to_mock.extend( [ dict( method='GET', uri='{endpoint}/flavors/{id}/os-extra_specs'.format( endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id'] ), json={'extra_specs': {}}, ) for flavor in fakes.FAKE_FLAVOR_LIST ] ) self.register_uris(uris_to_mock) flavor = self.cloud.get_flavor_by_ram(ram=150, include='strawberry') self.assertEqual(fakes.STRAWBERRY_FLAVOR_ID, flavor['id']) def test_get_flavor_by_ram_not_found(self): self.use_compute_discovery() self.register_uris( [ dict( method='GET', uri='{endpoint}/flavors/detail?is_public=None'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'flavors': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.get_flavor_by_ram, ram=100, ) def test_get_flavor_string_and_int(self): self.use_compute_discovery() flavor_resource_uri = '{endpoint}/flavors/1/os-extra_specs'.format( endpoint=fakes.COMPUTE_ENDPOINT ) flavor = fakes.make_fake_flavor('1', 'vanilla') flavor_json = {'extra_specs': {}} self.register_uris( [ dict( method='GET', uri='{endpoint}/flavors/1'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json=flavor, ), dict(method='GET', uri=flavor_resource_uri, json=flavor_json), ] ) flavor1 = self.cloud.get_flavor('1') self.assertEqual('1', flavor1['id']) flavor2 = self.cloud.get_flavor(1) self.assertEqual('1', flavor2['id']) def test_set_flavor_specs(self): self.use_compute_discovery() extra_specs = dict(key1='value1') self.register_uris( [ dict( method='POST', uri='{endpoint}/flavors/{id}/os-extra_specs'.format( endpoint=fakes.COMPUTE_ENDPOINT, id=1 ), json=dict(extra_specs=extra_specs), ) ] ) self.cloud.set_flavor_specs(1, extra_specs) self.assert_calls() def test_unset_flavor_specs(self): self.use_compute_discovery() keys = ['key1', 'key2'] self.register_uris( [ dict( method='DELETE', uri='{endpoint}/flavors/{id}/os-extra_specs/{key}'.format( endpoint=fakes.COMPUTE_ENDPOINT, id=1, key=key ), ) for key in keys ] ) self.cloud.unset_flavor_specs(1, keys) self.assert_calls() def test_add_flavor_access(self): self.register_uris( [ dict( method='POST', uri='{endpoint}/flavors/{id}/action'.format( endpoint=fakes.COMPUTE_ENDPOINT, id='flavor_id' ), json={ 'flavor_access': [ { 'flavor_id': 'flavor_id', 'tenant_id': 'tenant_id', } ] }, validate=dict( json={'addTenantAccess': {'tenant': 'tenant_id'}} ), ) ] ) self.cloud.add_flavor_access('flavor_id', 'tenant_id') self.assert_calls() def test_remove_flavor_access(self): self.register_uris( [ dict( method='POST', uri='{endpoint}/flavors/{id}/action'.format( endpoint=fakes.COMPUTE_ENDPOINT, id='flavor_id' ), json={'flavor_access': []}, validate=dict( json={'removeTenantAccess': {'tenant': 'tenant_id'}} ), ) ] ) self.cloud.remove_flavor_access('flavor_id', 'tenant_id') self.assert_calls() def test_list_flavor_access(self): self.register_uris( [ dict( method='GET', uri='{endpoint}/flavors/vanilla/os-flavor-access'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={ 'flavor_access': [ {'flavor_id': 'vanilla', 'tenant_id': 'tenant_id'} ] }, ) ] ) self.cloud.list_flavor_access('vanilla') self.assert_calls() def test_get_flavor_by_id(self): self.use_compute_discovery() flavor_uri = '{endpoint}/flavors/1'.format( endpoint=fakes.COMPUTE_ENDPOINT ) flavor_json = {'flavor': fakes.make_fake_flavor('1', 'vanilla')} self.register_uris( [ dict(method='GET', uri=flavor_uri, json=flavor_json), ] ) flavor1 = self.cloud.get_flavor_by_id('1') self.assertEqual('1', flavor1['id']) self.assertEqual({}, flavor1.extra_specs) flavor2 = self.cloud.get_flavor_by_id('1') self.assertEqual('1', flavor2['id']) self.assertEqual({}, flavor2.extra_specs) def test_get_flavor_with_extra_specs(self): self.use_compute_discovery() flavor_uri = '{endpoint}/flavors/1'.format( endpoint=fakes.COMPUTE_ENDPOINT ) flavor_extra_uri = '{endpoint}/flavors/1/os-extra_specs'.format( endpoint=fakes.COMPUTE_ENDPOINT ) flavor_json = {'flavor': fakes.make_fake_flavor('1', 'vanilla')} flavor_extra_json = {'extra_specs': {'name': 'test'}} self.register_uris( [ dict(method='GET', uri=flavor_uri, json=flavor_json), dict( method='GET', uri=flavor_extra_uri, json=flavor_extra_json ), ] ) flavor1 = self.cloud.get_flavor_by_id('1', get_extra=True) self.assertEqual('1', flavor1['id']) self.assertEqual({'name': 'test'}, flavor1.extra_specs) flavor2 = self.cloud.get_flavor_by_id('1', get_extra=False) self.assertEqual('1', flavor2['id']) self.assertEqual({}, flavor2.extra_specs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_floating_ip_common.py0000664000175000017500000002105300000000000026722 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test_floating_ip_common ---------------------------------- Tests floating IP resource methods for Neutron and Nova-network. """ from unittest.mock import patch from openstack.cloud import meta from openstack.compute.v2 import server as _server from openstack import connection from openstack.tests import fakes from openstack.tests.unit import base class TestFloatingIP(base.TestCase): @patch.object(connection.Connection, 'get_floating_ip') @patch.object(connection.Connection, '_attach_ip_to_server') @patch.object(connection.Connection, 'available_floating_ip') def test_add_auto_ip( self, mock_available_floating_ip, mock_attach_ip_to_server, mock_get_floating_ip, ): server_dict = fakes.make_fake_server( server_id='server-id', name='test-server', status="ACTIVE", addresses={}, ) floating_ip_dict = { "id": "this-is-a-floating-ip-id", "fixed_ip_address": None, "internal_network": None, "floating_ip_address": "203.0.113.29", "network": "this-is-a-net-or-pool-id", "attached": False, "status": "ACTIVE", } mock_available_floating_ip.return_value = floating_ip_dict self.cloud.add_auto_ip(server=server_dict) mock_attach_ip_to_server.assert_called_with( timeout=60, wait=False, server=server_dict, floating_ip=floating_ip_dict, skip_attach=False, ) @patch.object(connection.Connection, '_add_ip_from_pool') def test_add_ips_to_server_pool(self, mock_add_ip_from_pool): server_dict = fakes.make_fake_server( server_id='romeo', name='test-server', status="ACTIVE", addresses={}, ) pool = 'nova' self.cloud.add_ips_to_server(server_dict, ip_pool=pool) mock_add_ip_from_pool.assert_called_with( server_dict, pool, reuse=True, wait=False, timeout=60, fixed_address=None, nat_destination=None, ) @patch.object(connection.Connection, 'has_service') @patch.object(connection.Connection, 'get_floating_ip') @patch.object(connection.Connection, '_add_auto_ip') def test_add_ips_to_server_ipv6_only( self, mock_add_auto_ip, mock_get_floating_ip, mock_has_service ): self.cloud._floating_ip_source = None self.cloud.force_ipv4 = False self.cloud._local_ipv6 = True mock_has_service.return_value = False server = fakes.make_fake_server( server_id='server-id', name='test-server', status="ACTIVE", addresses={ 'private': [{'addr': "10.223.160.141", 'version': 4}], 'public': [ { 'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:ae:7d:42', 'OS-EXT-IPS:type': 'fixed', 'addr': "2001:4800:7819:103:be76:4eff:fe05:8525", 'version': 6, } ], }, ) server_dict = meta.add_server_interfaces( self.cloud, _server.Server(**server) ) new_server = self.cloud.add_ips_to_server(server=server_dict) mock_get_floating_ip.assert_not_called() mock_add_auto_ip.assert_not_called() self.assertEqual( new_server['interface_ip'], '2001:4800:7819:103:be76:4eff:fe05:8525', ) self.assertEqual(new_server['private_v4'], '10.223.160.141') self.assertEqual(new_server['public_v4'], '') self.assertEqual( new_server['public_v6'], '2001:4800:7819:103:be76:4eff:fe05:8525' ) @patch.object(connection.Connection, 'has_service') @patch.object(connection.Connection, 'get_floating_ip') @patch.object(connection.Connection, '_add_auto_ip') def test_add_ips_to_server_rackspace( self, mock_add_auto_ip, mock_get_floating_ip, mock_has_service ): self.cloud._floating_ip_source = None self.cloud.force_ipv4 = False self.cloud._local_ipv6 = True mock_has_service.return_value = False server = fakes.make_fake_server( server_id='server-id', name='test-server', status="ACTIVE", addresses={ 'private': [{'addr': "10.223.160.141", 'version': 4}], 'public': [ {'addr': "104.130.246.91", 'version': 4}, { 'addr': "2001:4800:7819:103:be76:4eff:fe05:8525", 'version': 6, }, ], }, ) server_dict = meta.add_server_interfaces( self.cloud, _server.Server(**server) ) new_server = self.cloud.add_ips_to_server(server=server_dict) mock_get_floating_ip.assert_not_called() mock_add_auto_ip.assert_not_called() self.assertEqual( new_server['interface_ip'], '2001:4800:7819:103:be76:4eff:fe05:8525', ) @patch.object(connection.Connection, 'has_service') @patch.object(connection.Connection, 'get_floating_ip') @patch.object(connection.Connection, '_add_auto_ip') def test_add_ips_to_server_rackspace_local_ipv4( self, mock_add_auto_ip, mock_get_floating_ip, mock_has_service ): self.cloud._floating_ip_source = None self.cloud.force_ipv4 = False self.cloud._local_ipv6 = False mock_has_service.return_value = False server = fakes.make_fake_server( server_id='server-id', name='test-server', status="ACTIVE", addresses={ 'private': [{'addr': "10.223.160.141", 'version': 4}], 'public': [ {'addr': "104.130.246.91", 'version': 4}, { 'addr': "2001:4800:7819:103:be76:4eff:fe05:8525", 'version': 6, }, ], }, ) server_dict = meta.add_server_interfaces( self.cloud, _server.Server(**server) ) new_server = self.cloud.add_ips_to_server(server=server_dict) mock_get_floating_ip.assert_not_called() mock_add_auto_ip.assert_not_called() self.assertEqual(new_server['interface_ip'], '104.130.246.91') @patch.object(connection.Connection, 'add_ip_list') def test_add_ips_to_server_ip_list(self, mock_add_ip_list): server_dict = fakes.make_fake_server( server_id='server-id', name='test-server', status="ACTIVE", addresses={}, ) ips = ['203.0.113.29', '172.24.4.229'] self.cloud.add_ips_to_server(server_dict, ips=ips) mock_add_ip_list.assert_called_with( server_dict, ips, wait=False, timeout=60, fixed_address=None, nat_destination=None, ) @patch.object(connection.Connection, '_needs_floating_ip') @patch.object(connection.Connection, '_add_auto_ip') def test_add_ips_to_server_auto_ip( self, mock_add_auto_ip, mock_needs_floating_ip ): server_dict = fakes.make_fake_server( server_id='server-id', name='test-server', status="ACTIVE", addresses={}, ) # TODO(mordred) REMOVE THIS MOCK WHEN THE NEXT PATCH LANDS # SERIOUSLY THIS TIME. NEXT PATCH - WHICH SHOULD ADD MOCKS FOR # list_ports AND list_networks AND list_subnets. BUT THAT WOULD # BE NOT ACTUALLY RELATED TO THIS PATCH. SO DO IT NEXT PATCH mock_needs_floating_ip.return_value = True self.cloud.add_ips_to_server(server_dict) mock_add_auto_ip.assert_called_with( server_dict, wait=False, timeout=60, reuse=True ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_floating_ip_neutron.py0000664000175000017500000015656500000000000027145 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test_floating_ip_neutron ---------------------------------- Tests Floating IP resource methods for Neutron """ import copy import datetime from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base from openstack import utils class TestFloatingIP(base.TestCase): mock_floating_ip_list_rep = { 'floatingips': [ { 'router_id': 'd23abc8d-2991-4a55-ba98-2aaea84cc72f', 'tenant_id': '4969c491a3c74ee4af974e6d800c62de', 'floating_network_id': '376da547-b977-4cfe-9cba-275c80debf57', 'fixed_ip_address': '10.0.0.4', 'floating_ip_address': '172.24.4.229', 'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac', 'id': '2f245a7b-796b-4f26-9cf9-9e82d248fda7', 'status': 'ACTIVE', }, { 'router_id': None, 'tenant_id': '4969c491a3c74ee4af974e6d800c62de', 'floating_network_id': '376da547-b977-4cfe-9cba-275c80debf57', 'fixed_ip_address': None, 'floating_ip_address': '203.0.113.30', 'port_id': None, 'id': '61cea855-49cb-4846-997d-801b70c71bdd', 'status': 'DOWN', }, ] } mock_floating_ip_new_rep = { 'floatingip': { 'fixed_ip_address': '10.0.0.4', 'floating_ip_address': '172.24.4.229', 'floating_network_id': 'my-network-id', 'id': '2f245a7b-796b-4f26-9cf9-9e82d248fda8', 'port_id': None, 'router_id': None, 'status': 'ACTIVE', 'tenant_id': '4969c491a3c74ee4af974e6d800c62df', } } mock_floating_ip_port_rep = { 'floatingip': { 'fixed_ip_address': '10.0.0.4', 'floating_ip_address': '172.24.4.229', 'floating_network_id': 'my-network-id', 'id': '2f245a7b-796b-4f26-9cf9-9e82d248fda8', 'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac', 'router_id': None, 'status': 'ACTIVE', 'tenant_id': '4969c491a3c74ee4af974e6d800c62df', } } mock_get_network_rep = { 'status': 'ACTIVE', 'subnets': ['54d6f61d-db07-451c-9ab3-b9609b6b6f0b'], 'name': 'my-network', 'provider:physical_network': None, 'admin_state_up': True, 'tenant_id': '4fd44f30292945e481c7b8a0c8908869', 'provider:network_type': 'local', 'router:external': True, 'shared': True, 'id': 'my-network-id', 'provider:segmentation_id': None, } mock_search_ports_rep = [ { 'status': 'ACTIVE', 'binding:host_id': 'devstack', 'name': 'first-port', 'created_at': datetime.datetime.now().isoformat(), 'allowed_address_pairs': [], 'admin_state_up': True, 'network_id': '70c1db1f-b701-45bd-96e0-a313ee3430b3', 'tenant_id': '', 'extra_dhcp_opts': [], 'binding:vif_details': { 'port_filter': True, 'ovs_hybrid_plug': True, }, 'binding:vif_type': 'ovs', 'device_owner': 'compute:None', 'mac_address': 'fa:16:3e:58:42:ed', 'binding:profile': {}, 'binding:vnic_type': 'normal', 'fixed_ips': [ { 'subnet_id': '008ba151-0b8c-4a67-98b5-0d2b87666062', 'ip_address': '172.24.4.2', } ], 'id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac', 'security_groups': [], 'device_id': 'server-id', } ] def assertAreInstances(self, elements, elem_type): for e in elements: self.assertIsInstance(e, elem_type) def setUp(self): super().setUp() self.fake_server = fakes.make_fake_server( 'server-id', '', 'ACTIVE', addresses={ 'test_pnztt_net': [ { 'OS-EXT-IPS:type': 'fixed', 'addr': '192.0.2.129', 'version': 4, 'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:ae:7d:42', } ] }, ) self.floating_ip = self.mock_floating_ip_list_rep['floatingips'][0] def test_list_floating_ips(self): self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/floatingips', json=self.mock_floating_ip_list_rep, ) ] ) floating_ips = self.cloud.list_floating_ips() self.assertIsInstance(floating_ips, list) self.assertAreInstances(floating_ips, dict) self.assertEqual(2, len(floating_ips)) self.assert_calls() def test_list_floating_ips_with_filters(self): self.register_uris( [ dict( method='GET', uri=( 'https://network.example.com/v2.0/floatingips?' 'description=42' ), json={'floatingips': []}, ) ] ) self.cloud.list_floating_ips(filters={'description': 42}) self.assert_calls() def test_search_floating_ips(self): self.register_uris( [ dict( method='GET', uri=('https://network.example.com/v2.0/floatingips'), json=self.mock_floating_ip_list_rep, ) ] ) floating_ips = self.cloud.search_floating_ips( filters={'updated_at': 'never'} ) self.assertIsInstance(floating_ips, list) self.assertAreInstances(floating_ips, dict) self.assertEqual(0, len(floating_ips)) self.assert_calls() def test_get_floating_ip(self): self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/floatingips', json=self.mock_floating_ip_list_rep, ) ] ) floating_ip = self.cloud.get_floating_ip( id='2f245a7b-796b-4f26-9cf9-9e82d248fda7' ) self.assertIsInstance(floating_ip, dict) self.assertEqual('172.24.4.229', floating_ip['floating_ip_address']) self.assertEqual( self.mock_floating_ip_list_rep['floatingips'][0]['tenant_id'], floating_ip['project_id'], ) self.assertEqual( self.mock_floating_ip_list_rep['floatingips'][0]['tenant_id'], floating_ip['tenant_id'], ) self.assertIn('location', floating_ip) self.assert_calls() def test_get_floating_ip_not_found(self): self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/floatingips', json=self.mock_floating_ip_list_rep, ) ] ) floating_ip = self.cloud.get_floating_ip(id='non-existent') self.assertIsNone(floating_ip) self.assert_calls() def test_get_floating_ip_by_id(self): fid = self.mock_floating_ip_new_rep['floatingip']['id'] self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/floatingips/' '{id}'.format(id=fid), json=self.mock_floating_ip_new_rep, ) ] ) floating_ip = self.cloud.get_floating_ip_by_id(id=fid) self.assertIsInstance(floating_ip, dict) self.assertEqual('172.24.4.229', floating_ip['floating_ip_address']) self.assertEqual( self.mock_floating_ip_new_rep['floatingip']['tenant_id'], floating_ip['project_id'], ) self.assertEqual( self.mock_floating_ip_new_rep['floatingip']['tenant_id'], floating_ip['tenant_id'], ) self.assertIn('location', floating_ip) self.assert_calls() def test_create_floating_ip(self): self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks/my-network', status_code=404, ), dict( method='GET', uri='https://network.example.com/v2.0/networks' '?name=my-network', json={'networks': [self.mock_get_network_rep]}, ), dict( method='POST', uri='https://network.example.com/v2.0/floatingips', json=self.mock_floating_ip_new_rep, validate=dict( json={ 'floatingip': { 'floating_network_id': 'my-network-id' } } ), ), ] ) ip = self.cloud.create_floating_ip(network='my-network') self.assertEqual( self.mock_floating_ip_new_rep['floatingip']['floating_ip_address'], ip['floating_ip_address'], ) self.assert_calls() def test_create_floating_ip_port_bad_response(self): self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks/my-network', json=self.mock_get_network_rep, ), dict( method='POST', uri='https://network.example.com/v2.0/floatingips', json=self.mock_floating_ip_new_rep, validate=dict( json={ 'floatingip': { 'floating_network_id': 'my-network-id', 'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ab', # noqa: E501 } } ), ), ] ) # Fails because we requested a port and the returned FIP has no port self.assertRaises( exceptions.SDKException, self.cloud.create_floating_ip, network='my-network', port='ce705c24-c1ef-408a-bda3-7bbd946164ab', ) self.assert_calls() def test_create_floating_ip_port(self): self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks/my-network', status_code=404, ), dict( method='GET', uri='https://network.example.com/v2.0/networks' '?name=my-network', json={'networks': [self.mock_get_network_rep]}, ), dict( method='POST', uri='https://network.example.com/v2.0/floatingips', json=self.mock_floating_ip_port_rep, validate=dict( json={ 'floatingip': { 'floating_network_id': 'my-network-id', 'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac', # noqa: E501 } } ), ), ] ) ip = self.cloud.create_floating_ip( network='my-network', port='ce705c24-c1ef-408a-bda3-7bbd946164ac' ) self.assertEqual( self.mock_floating_ip_new_rep['floatingip']['floating_ip_address'], ip['floating_ip_address'], ) self.assert_calls() def test_neutron_available_floating_ips(self): """ Test without specifying a network name. """ fips_mock_uri = 'https://network.example.com/v2.0/floatingips' self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks', json={'networks': [self.mock_get_network_rep]}, ), dict( method='GET', uri='https://network.example.com/v2.0/subnets', json={'subnets': []}, ), dict( method='GET', uri=fips_mock_uri, json={'floatingips': []} ), dict( method='POST', uri=fips_mock_uri, json=self.mock_floating_ip_new_rep, validate=dict( json={ 'floatingip': { 'floating_network_id': self.mock_get_network_rep[ # noqa: E501 'id' ] } } ), ), ] ) # Test if first network is selected if no network is given self.cloud._neutron_available_floating_ips() self.assert_calls() def test_neutron_available_floating_ips_network(self): """ Test with specifying a network name. """ fips_mock_uri = 'https://network.example.com/v2.0/floatingips' self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks', json={'networks': [self.mock_get_network_rep]}, ), dict( method='GET', uri='https://network.example.com/v2.0/subnets', json={'subnets': []}, ), dict( method='GET', uri=fips_mock_uri, json={'floatingips': []} ), dict( method='POST', uri=fips_mock_uri, json=self.mock_floating_ip_new_rep, validate=dict( json={ 'floatingip': { 'floating_network_id': self.mock_get_network_rep[ # noqa: E501 'id' ] } } ), ), ] ) # Test if first network is selected if no network is given self.cloud._neutron_available_floating_ips( network=self.mock_get_network_rep['name'] ) self.assert_calls() def test_neutron_available_floating_ips_invalid_network(self): """ Test with an invalid network name. """ self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks', json={'networks': [self.mock_get_network_rep]}, ), dict( method='GET', uri='https://network.example.com/v2.0/subnets', json={'subnets': []}, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud._neutron_available_floating_ips, network='INVALID', ) self.assert_calls() def test_auto_ip_pool_no_reuse(self): server_id = 'f80e3ad0-e13e-41d4-8e9c-be79bccdb8f7' # payloads taken from citycloud self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks/ext-net', status_code=404, ), dict( method='GET', uri='https://network.example.com/v2.0/networks?name=ext-net', # noqa: E501 json={ "networks": [ { "status": "ACTIVE", "subnets": [ "df3e17fa-a4b2-47ae-9015-bc93eb076ba2", "6b0c3dc9-b0b8-4d87-976a-7f2ebf13e7ec", "fc541f48-fc7f-48c0-a063-18de6ee7bdd7", ], "availability_zone_hints": [], "availability_zones": ["nova"], "name": "ext-net", "admin_state_up": True, "tenant_id": "a564613210ee43708b8a7fc6274ebd63", # noqa: E501 "tags": [], "ipv6_address_scope": "9f03124f-89af-483a-b6fd-10f08079db4d", # noqa: E501 "mtu": 0, "is_default": False, "router:external": True, "ipv4_address_scope": None, "shared": False, "id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", "description": None, } ] }, ), dict( method='GET', uri=f'https://network.example.com/v2.0/ports?device_id={server_id}', json={ "ports": [ { "status": "ACTIVE", "created_at": "2017-02-06T20:59:45", "description": "", "allowed_address_pairs": [], "admin_state_up": True, "network_id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f", # noqa: E501 "dns_name": None, "extra_dhcp_opts": [], "mac_address": "fa:16:3e:e8:7f:03", "updated_at": "2017-02-06T20:59:49", "name": "", "device_owner": "compute:None", "tenant_id": "65222a4d09ea4c68934fa1028c77f394", # noqa: E501 "binding:vnic_type": "normal", "fixed_ips": [ { "subnet_id": "f0ad1df5-53ee-473f-b86b-3604ea5591e9", # noqa: E501 "ip_address": "10.4.0.16", } ], "id": "a767944e-057a-47d1-a669-824a21b8fb7b", "security_groups": [ "9fb5ba44-5c46-4357-8e60-8b55526cab54" ], "device_id": server_id, # noqa: E501 } ] }, ), dict( method='POST', uri='https://network.example.com/v2.0/floatingips', json={ "floatingip": { "router_id": "9de9c787-8f89-4a53-8468-a5533d6d7fd1", # noqa: E501 "status": "DOWN", "description": "", "dns_domain": "", "floating_network_id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", # noqa: E501 "fixed_ip_address": "10.4.0.16", "floating_ip_address": "89.40.216.153", "port_id": "a767944e-057a-47d1-a669-824a21b8fb7b", "id": "e69179dc-a904-4c9a-a4c9-891e2ecb984c", "dns_name": "", "tenant_id": "65222a4d09ea4c68934fa1028c77f394", } }, validate=dict( json={ "floatingip": { "floating_network_id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", # noqa: E501 "fixed_ip_address": "10.4.0.16", "port_id": "a767944e-057a-47d1-a669-824a21b8fb7b", # noqa: E501 } } ), ), self.get_nova_discovery_mock_dict(), dict( method='GET', uri=f'https://compute.example.com/v2.1/servers/{server_id}', json={ "server": { "status": "ACTIVE", "updated": "2017-02-06T20:59:49Z", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:e8:7f:03", # noqa: E501 "version": 4, "addr": "10.4.0.16", "OS-EXT-IPS:type": "fixed", }, { "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:e8:7f:03", # noqa: E501 "version": 4, "addr": "89.40.216.153", "OS-EXT-IPS:type": "floating", }, ] }, "key_name": None, "image": { "id": "95e4c449-8abf-486e-97d9-dc3f82417d2d" # noqa: E501 }, "OS-EXT-STS:task_state": None, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2017-02-06T20:59:48.000000", # noqa: E501 "flavor": { "id": "2186bd79-a05e-4953-9dde-ddefb63c88d4" # noqa: E501 }, "id": server_id, "security_groups": [{"name": "default"}], "OS-SRV-USG:terminated_at": None, "OS-EXT-AZ:availability_zone": "nova", "user_id": "c17534835f8f42bf98fc367e0bf35e09", "name": "testmt", "created": "2017-02-06T20:59:44Z", "tenant_id": "65222a4d09ea4c68934fa1028c77f394", # noqa: E501 "OS-DCF:diskConfig": "MANUAL", "os-extended-volumes:volumes_attached": [], "accessIPv4": "", "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1, "config_drive": "", "metadata": {}, } }, ), ] ) self.cloud.add_ips_to_server( utils.Munch( id=server_id, addresses={ "private": [ { "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:e8:7f:03", "version": 4, "addr": "10.4.0.16", "OS-EXT-IPS:type": "fixed", } ] }, ), ip_pool='ext-net', reuse=False, ) self.assert_calls() def test_available_floating_ip_new(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': [self.mock_get_network_rep]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'] ), json={'subnets': []}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'] ), json={'floatingips': []}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'] ), validate=dict( json={ 'floatingip': { 'floating_network_id': 'my-network-id' } } ), json=self.mock_floating_ip_new_rep, ), ] ) ip = self.cloud.available_floating_ip(network='my-network') self.assertEqual( self.mock_floating_ip_new_rep['floatingip']['floating_ip_address'], ip['floating_ip_address'], ) self.assert_calls() def test_delete_floating_ip_existing(self): fip_id = '2f245a7b-796b-4f26-9cf9-9e82d248fda7' fake_fip = { 'id': fip_id, 'floating_ip_address': '172.99.106.167', 'status': 'ACTIVE', } self.register_uris( [ dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', f'floatingips/{fip_id}'], ), json={}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'] ), json={'floatingips': [fake_fip]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', f'floatingips/{fip_id}'], ), json={}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'] ), json={'floatingips': [fake_fip]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', f'floatingips/{fip_id}'], ), json={}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'] ), json={'floatingips': []}, ), ] ) self.assertTrue( self.cloud.delete_floating_ip(floating_ip_id=fip_id, retry=2) ) self.assert_calls() def test_delete_floating_ip_existing_down(self): fip_id = '2f245a7b-796b-4f26-9cf9-9e82d248fda7' fake_fip = { 'id': fip_id, 'floating_ip_address': '172.99.106.167', 'status': 'ACTIVE', } down_fip = { 'id': fip_id, 'floating_ip_address': '172.99.106.167', 'status': 'DOWN', } self.register_uris( [ dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', f'floatingips/{fip_id}'], ), json={}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'] ), json={'floatingips': [fake_fip]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', f'floatingips/{fip_id}'], ), json={}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'] ), json={'floatingips': [down_fip]}, ), ] ) self.assertTrue( self.cloud.delete_floating_ip(floating_ip_id=fip_id, retry=2) ) self.assert_calls() def test_delete_floating_ip_existing_no_delete(self): fip_id = '2f245a7b-796b-4f26-9cf9-9e82d248fda7' fake_fip = { 'id': fip_id, 'floating_ip_address': '172.99.106.167', 'status': 'ACTIVE', } self.register_uris( [ dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', f'floatingips/{fip_id}'], ), json={}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'] ), json={'floatingips': [fake_fip]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', f'floatingips/{fip_id}'], ), json={}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'] ), json={'floatingips': [fake_fip]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', f'floatingips/{fip_id}'], ), json={}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'] ), json={'floatingips': [fake_fip]}, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.delete_floating_ip, floating_ip_id=fip_id, retry=2, ) self.assert_calls() def test_delete_floating_ip_not_found(self): self.register_uris( [ dict( method='DELETE', uri=( 'https://network.example.com/v2.0/floatingips/' 'a-wild-id-appears' ), status_code=404, ) ] ) ret = self.cloud.delete_floating_ip(floating_ip_id='a-wild-id-appears') self.assertFalse(ret) self.assert_calls() def test_attach_ip_to_server(self): fip = self.mock_floating_ip_list_rep['floatingips'][0].copy() fip.update({'status': 'DOWN', 'port_id': None, 'router_id': None}) device_id = self.fake_server['id'] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'], qs_elements=[f"device_id={device_id}"], ), json={'ports': self.mock_search_ports_rep}, ), dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips/{}'.format(fip['id'])], ), json={ 'floatingip': self.mock_floating_ip_list_rep[ 'floatingips' ][0] }, validate=dict( json={ 'floatingip': { 'port_id': self.mock_search_ports_rep[0]['id'], 'fixed_ip_address': self.mock_search_ports_rep[ 0 ]['fixed_ips'][0]['ip_address'], } } ), ), ] ) self.cloud._attach_ip_to_server( server=self.fake_server, floating_ip=self.cloud._normalize_floating_ip(fip), ) self.assert_calls() def test_detach_ip_from_server(self): fip = self.mock_floating_ip_new_rep['floatingip'] attached_fip = copy.copy(fip) attached_fip['port_id'] = 'server-port-id' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'] ), json={'floatingips': [attached_fip]}, ), dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips/{}'.format(fip['id'])], ), json={'floatingip': fip}, validate=dict(json={'floatingip': {'port_id': None}}), ), ] ) self.cloud.detach_ip_from_server( server_id='server-id', floating_ip_id=fip['id'] ) self.assert_calls() def test_add_ip_from_pool(self): network = self.mock_get_network_rep fip = self.mock_floating_ip_new_rep['floatingip'] fixed_ip = self.mock_search_ports_rep[0]['fixed_ips'][0]['ip_address'] port_id = self.mock_search_ports_rep[0]['id'] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': [network]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'] ), json={'subnets': []}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'] ), json={'floatingips': [fip]}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'] ), json={'floatingip': fip}, validate=dict( json={ 'floatingip': { 'floating_network_id': network['id'] } } ), ), dict( method="GET", uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'], qs_elements=[ "device_id={}".format(self.fake_server['id']) ], ), json={'ports': self.mock_search_ports_rep}, ), dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips/{}'.format(fip['id'])], ), json={'floatingip': fip}, validate=dict( json={ 'floatingip': { 'fixed_ip_address': fixed_ip, 'port_id': port_id, } } ), ), ] ) server = self.cloud._add_ip_from_pool( server=self.fake_server, network=network['id'], fixed_address=fixed_ip, ) self.assertEqual(server, self.fake_server) self.assert_calls() def test_cleanup_floating_ips(self): floating_ips = [ { "id": "this-is-a-floating-ip-id", "fixed_ip_address": None, "internal_network": None, "floating_ip_address": "203.0.113.29", "network": "this-is-a-net-or-pool-id", "port_id": None, "status": "ACTIVE", }, { "id": "this-is-a-second-floating-ip-id", "fixed_ip_address": None, "internal_network": None, "floating_ip_address": "203.0.113.30", "network": "this-is-a-net-or-pool-id", "port_id": None, "status": "ACTIVE", }, { "id": "this-is-an-attached-floating-ip-id", "fixed_ip_address": None, "internal_network": None, "floating_ip_address": "203.0.113.29", "network": "this-is-a-net-or-pool-id", "attached": True, "port_id": "this-is-id-of-port-with-fip", "status": "ACTIVE", }, ] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'] ), json={'floatingips': floating_ips}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'floatingips/{}'.format(floating_ips[0]['id']), ], ), json={}, ), # First IP has been deleted now, return just the second dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'] ), json={'floatingips': floating_ips[1:]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'floatingips/{}'.format(floating_ips[1]['id']), ], ), json={}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'] ), json={'floatingips': [floating_ips[2]]}, ), ] ) cleaned_up = self.cloud.delete_unattached_floating_ips() self.assertEqual(cleaned_up, 2) self.assert_calls() def test_create_floating_ip_no_port(self): server_port = { "id": "port-id", "device_id": "some-server", 'created_at': datetime.datetime.now().isoformat(), 'fixed_ips': [ {'subnet_id': 'subnet-id', 'ip_address': '172.24.4.2'} ], } floating_ip = {"id": "floating-ip-id", "port_id": None} self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': [self.mock_get_network_rep]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'] ), json={'subnets': []}, ), dict( method="GET", uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'], qs_elements=['device_id=some-server'], ), json={'ports': [server_port]}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'floatingips'] ), json={'floatingip': floating_ip}, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud._neutron_create_floating_ip, server=dict(id='some-server'), ) self.assert_calls() def test_find_nat_source_inferred(self): # payloads contrived but based on ones from citycloud self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks', json={ "networks": [ { "status": "ACTIVE", "subnets": [ "df3e17fa-a4b2-47ae-9015-bc93eb076ba2", "6b0c3dc9-b0b8-4d87-976a-7f2ebf13e7ec", "fc541f48-fc7f-48c0-a063-18de6ee7bdd7", ], "availability_zone_hints": [], "availability_zones": ["nova"], "name": "ext-net", "admin_state_up": True, "tenant_id": "a564613210ee43708b8a7fc6274ebd63", # noqa: E501 "tags": [], "ipv6_address_scope": "9f03124f-89af-483a-b6fd-10f08079db4d", # noqa: E501 "mtu": 0, "is_default": False, "router:external": True, "ipv4_address_scope": None, "shared": False, "id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", "description": None, }, { "status": "ACTIVE", "subnets": [ "df3e17fa-a4b2-47ae-9015-bc93eb076ba2", "6b0c3dc9-b0b8-4d87-976a-7f2ebf13e7ec", "fc541f48-fc7f-48c0-a063-18de6ee7bdd7", ], "availability_zone_hints": [], "availability_zones": ["nova"], "name": "my-network", "admin_state_up": True, "tenant_id": "a564613210ee43708b8a7fc6274ebd63", # noqa: E501 "tags": [], "ipv6_address_scope": "9f03124f-89af-483a-b6fd-10f08079db4d", # noqa: E501 "mtu": 0, "is_default": False, "router:external": True, "ipv4_address_scope": None, "shared": False, "id": "0232c17f-2096-49bc-b205-d3dcd9a30ebg", "description": None, }, { "status": "ACTIVE", "subnets": [ "f0ad1df5-53ee-473f-b86b-3604ea5591e9" ], "availability_zone_hints": [], "availability_zones": ["nova"], "name": "private", "admin_state_up": True, "tenant_id": "65222a4d09ea4c68934fa1028c77f394", # noqa: E501 "created_at": "2016-10-22T13:46:26", "tags": [], "updated_at": "2016-10-22T13:46:26", "ipv6_address_scope": None, "router:external": False, "ipv4_address_scope": None, "shared": False, "mtu": 1450, "id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f", "description": "", }, ] }, ), dict( method='GET', uri='https://network.example.com/v2.0/subnets', json={ "subnets": [ { "description": "", "enable_dhcp": True, "network_id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f", # noqa: E501 "tenant_id": "65222a4d09ea4c68934fa1028c77f394", # noqa: E501 "created_at": "2016-10-22T13:46:26", "dns_nameservers": [ "89.36.90.101", "89.36.90.102", ], "updated_at": "2016-10-22T13:46:26", "gateway_ip": "10.4.0.1", "ipv6_ra_mode": None, "allocation_pools": [ {"start": "10.4.0.2", "end": "10.4.0.200"} ], "host_routes": [], "ip_version": 4, "ipv6_address_mode": None, "cidr": "10.4.0.0/24", "id": "f0ad1df5-53ee-473f-b86b-3604ea5591e9", "subnetpool_id": None, "name": "private-subnet-ipv4", } ] }, ), ] ) self.assertEqual('ext-net', self.cloud.get_nat_source()['name']) self.assert_calls() def test_find_nat_source_config(self): self.cloud._nat_source = 'my-network' # payloads contrived but based on ones from citycloud self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks', json={ "networks": [ { "status": "ACTIVE", "subnets": [ "df3e17fa-a4b2-47ae-9015-bc93eb076ba2", "6b0c3dc9-b0b8-4d87-976a-7f2ebf13e7ec", "fc541f48-fc7f-48c0-a063-18de6ee7bdd7", ], "availability_zone_hints": [], "availability_zones": ["nova"], "name": "ext-net", "admin_state_up": True, "tenant_id": "a564613210ee43708b8a7fc6274ebd63", # noqa: E501 "tags": [], "ipv6_address_scope": "9f03124f-89af-483a-b6fd-10f08079db4d", # noqa: E501 "mtu": 0, "is_default": False, "router:external": True, "ipv4_address_scope": None, "shared": False, "id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", "description": None, }, { "status": "ACTIVE", "subnets": [ "df3e17fa-a4b2-47ae-9015-bc93eb076ba2", "6b0c3dc9-b0b8-4d87-976a-7f2ebf13e7ec", "fc541f48-fc7f-48c0-a063-18de6ee7bdd7", ], "availability_zone_hints": [], "availability_zones": ["nova"], "name": "my-network", "admin_state_up": True, "tenant_id": "a564613210ee43708b8a7fc6274ebd63", # noqa: E501 "tags": [], "ipv6_address_scope": "9f03124f-89af-483a-b6fd-10f08079db4d", # noqa: E501 "mtu": 0, "is_default": False, "router:external": True, "ipv4_address_scope": None, "shared": False, "id": "0232c17f-2096-49bc-b205-d3dcd9a30ebg", "description": None, }, { "status": "ACTIVE", "subnets": [ "f0ad1df5-53ee-473f-b86b-3604ea5591e9" ], "availability_zone_hints": [], "availability_zones": ["nova"], "name": "private", "admin_state_up": True, "tenant_id": "65222a4d09ea4c68934fa1028c77f394", # noqa: E501 "created_at": "2016-10-22T13:46:26", "tags": [], "updated_at": "2016-10-22T13:46:26", "ipv6_address_scope": None, "router:external": False, "ipv4_address_scope": None, "shared": False, "mtu": 1450, "id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f", "description": "", }, ] }, ), dict( method='GET', uri='https://network.example.com/v2.0/subnets', json={ "subnets": [ { "description": "", "enable_dhcp": True, "network_id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f", # noqa: E501 "tenant_id": "65222a4d09ea4c68934fa1028c77f394", # noqa: E501 "created_at": "2016-10-22T13:46:26", "dns_nameservers": [ "89.36.90.101", "89.36.90.102", ], "updated_at": "2016-10-22T13:46:26", "gateway_ip": "10.4.0.1", "ipv6_ra_mode": None, "allocation_pools": [ {"start": "10.4.0.2", "end": "10.4.0.200"} ], "host_routes": [], "ip_version": 4, "ipv6_address_mode": None, "cidr": "10.4.0.0/24", "id": "f0ad1df5-53ee-473f-b86b-3604ea5591e9", "subnetpool_id": None, "name": "private-subnet-ipv4", } ] }, ), ] ) self.assertEqual('my-network', self.cloud.get_nat_source()['name']) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_floating_ip_nova.py0000664000175000017500000003245400000000000026404 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test_floating_ip_nova ---------------------------------- Tests Floating IP resource methods for nova-network """ from openstack.tests import fakes from openstack.tests.unit import base def get_fake_has_service(has_service): def fake_has_service(s): if s == 'network': return False return has_service(s) return fake_has_service class TestFloatingIP(base.TestCase): mock_floating_ip_list_rep = [ { 'fixed_ip': None, 'id': 1, 'instance_id': None, 'ip': '203.0.113.1', 'pool': 'nova', }, { 'fixed_ip': None, 'id': 2, 'instance_id': None, 'ip': '203.0.113.2', 'pool': 'nova', }, { 'fixed_ip': '192.0.2.3', 'id': 29, 'instance_id': 'myself', 'ip': '198.51.100.29', 'pool': 'black_hole', }, ] mock_floating_ip_pools = [ {'id': 'pool1_id', 'name': 'nova'}, {'id': 'pool2_id', 'name': 'pool2'}, ] def assertAreInstances(self, elements, elem_type): for e in elements: self.assertIsInstance(e, elem_type) def setUp(self): super().setUp() self.fake_server = fakes.make_fake_server( 'server-id', '', 'ACTIVE', addresses={ 'test_pnztt_net': [ { 'OS-EXT-IPS:type': 'fixed', 'addr': '192.0.2.129', 'version': 4, 'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:ae:7d:42', } ] }, ) self.cloud.has_service = get_fake_has_service(self.cloud.has_service) def test_list_floating_ips(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', append=['os-floating-ips'] ), json={'floating_ips': self.mock_floating_ip_list_rep}, ), ] ) floating_ips = self.cloud.list_floating_ips() self.assertIsInstance(floating_ips, list) self.assertEqual(3, len(floating_ips)) self.assertAreInstances(floating_ips, dict) self.assert_calls() def test_list_floating_ips_with_filters(self): self.assertRaisesRegex( ValueError, "nova-network doesn't support server-side floating IPs filtering. " "Use the 'search_floating_ips' method instead", self.cloud.list_floating_ips, filters={'Foo': 42}, ) def test_search_floating_ips(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', append=['os-floating-ips'] ), json={'floating_ips': self.mock_floating_ip_list_rep}, ), ] ) floating_ips = self.cloud.search_floating_ips( filters={'attached': False} ) self.assertIsInstance(floating_ips, list) self.assertEqual(2, len(floating_ips)) self.assertAreInstances(floating_ips, dict) self.assert_calls() def test_get_floating_ip(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', append=['os-floating-ips'] ), json={'floating_ips': self.mock_floating_ip_list_rep}, ), ] ) floating_ip = self.cloud.get_floating_ip(id='29') self.assertIsInstance(floating_ip, dict) self.assertEqual('198.51.100.29', floating_ip['floating_ip_address']) self.assert_calls() def test_get_floating_ip_not_found(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', append=['os-floating-ips'] ), json={'floating_ips': self.mock_floating_ip_list_rep}, ), ] ) floating_ip = self.cloud.get_floating_ip(id='666') self.assertIsNone(floating_ip) self.assert_calls() def test_get_floating_ip_by_id(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', append=['os-floating-ips', '1'] ), json={'floating_ip': self.mock_floating_ip_list_rep[0]}, ), ] ) floating_ip = self.cloud.get_floating_ip_by_id(id='1') self.assertIsInstance(floating_ip, dict) self.assertEqual('203.0.113.1', floating_ip['floating_ip_address']) self.assert_calls() def test_create_floating_ip(self): self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'compute', append=['os-floating-ips'] ), json={'floating_ip': self.mock_floating_ip_list_rep[1]}, validate=dict(json={'pool': 'nova'}), ), dict( method='GET', uri=self.get_mock_url( 'compute', append=['os-floating-ips', '2'] ), json={'floating_ip': self.mock_floating_ip_list_rep[1]}, ), ] ) self.cloud.create_floating_ip(network='nova') self.assert_calls() def test_available_floating_ip_existing(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', append=['os-floating-ips'] ), json={'floating_ips': self.mock_floating_ip_list_rep[:1]}, ), ] ) ip = self.cloud.available_floating_ip(network='nova') self.assertEqual( self.mock_floating_ip_list_rep[0]['ip'], ip['floating_ip_address'] ) self.assert_calls() def test_available_floating_ip_new(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', append=['os-floating-ips'] ), json={'floating_ips': []}, ), dict( method='POST', uri=self.get_mock_url( 'compute', append=['os-floating-ips'] ), json={'floating_ip': self.mock_floating_ip_list_rep[0]}, validate=dict(json={'pool': 'nova'}), ), dict( method='GET', uri=self.get_mock_url( 'compute', append=['os-floating-ips', '1'] ), json={'floating_ip': self.mock_floating_ip_list_rep[0]}, ), ] ) ip = self.cloud.available_floating_ip(network='nova') self.assertEqual( self.mock_floating_ip_list_rep[0]['ip'], ip['floating_ip_address'] ) self.assert_calls() def test_delete_floating_ip_existing(self): self.register_uris( [ dict( method='DELETE', uri=self.get_mock_url( 'compute', append=['os-floating-ips', 'a-wild-id-appears'], ), ), dict( method='GET', uri=self.get_mock_url( 'compute', append=['os-floating-ips'] ), json={'floating_ips': []}, ), ] ) ret = self.cloud.delete_floating_ip(floating_ip_id='a-wild-id-appears') self.assertTrue(ret) self.assert_calls() def test_delete_floating_ip_not_found(self): self.register_uris( [ dict( method='DELETE', uri=self.get_mock_url( 'compute', append=['os-floating-ips', 'a-wild-id-appears'], ), status_code=404, ), ] ) ret = self.cloud.delete_floating_ip(floating_ip_id='a-wild-id-appears') self.assertFalse(ret) self.assert_calls() def test_attach_ip_to_server(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', append=['os-floating-ips'] ), json={'floating_ips': self.mock_floating_ip_list_rep}, ), dict( method='POST', uri=self.get_mock_url( 'compute', append=['servers', self.fake_server['id'], 'action'], ), validate=dict( json={ "addFloatingIp": { "address": "203.0.113.1", "fixed_address": "192.0.2.129", } } ), ), ] ) self.cloud._attach_ip_to_server( server=self.fake_server, floating_ip=self.cloud._normalize_floating_ip( self.mock_floating_ip_list_rep[0] ), fixed_address='192.0.2.129', ) self.assert_calls() def test_detach_ip_from_server(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', append=['os-floating-ips'] ), json={'floating_ips': self.mock_floating_ip_list_rep}, ), dict( method='POST', uri=self.get_mock_url( 'compute', append=['servers', self.fake_server['id'], 'action'], ), validate=dict( json={ "removeFloatingIp": { "address": "203.0.113.1", } } ), ), ] ) self.cloud.detach_ip_from_server( server_id='server-id', floating_ip_id=1 ) self.assert_calls() def test_add_ip_from_pool(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', append=['os-floating-ips'] ), json={'floating_ips': self.mock_floating_ip_list_rep}, ), dict( method='GET', uri=self.get_mock_url( 'compute', append=['os-floating-ips'] ), json={'floating_ips': self.mock_floating_ip_list_rep}, ), dict( method='POST', uri=self.get_mock_url( 'compute', append=['servers', self.fake_server['id'], 'action'], ), validate=dict( json={ "addFloatingIp": { "address": "203.0.113.1", "fixed_address": "192.0.2.129", } } ), ), ] ) server = self.cloud._add_ip_from_pool( server=self.fake_server, network='nova', fixed_address='192.0.2.129', ) self.assertEqual(server, self.fake_server) self.assert_calls() def test_cleanup_floating_ips(self): # This should not call anything because it's unsafe on nova. self.assertFalse(self.cloud.delete_unattached_floating_ips()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_floating_ip_pool.py0000664000175000017500000000262200000000000026404 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test_floating_ip_pool ---------------------------------- Test floating IP pool resource (managed by nova) """ from openstack.tests import fakes from openstack.tests.unit import base class TestFloatingIPPool(base.TestCase): pools = [{'name': 'public'}] def test_list_floating_ip_pools(self): self.register_uris( [ dict( method='GET', uri='{endpoint}/os-floating-ip-pools'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={"floating_ip_pools": [{"name": "public"}]}, ), ] ) floating_ip_pools = self.cloud.list_floating_ip_pools() self.assertCountEqual(floating_ip_pools, self.pools) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_fwaas.py0000664000175000017500000017141600000000000024171 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy from unittest import mock from openstack import exceptions from openstack.network.v2.firewall_group import FirewallGroup from openstack.network.v2.firewall_policy import FirewallPolicy from openstack.network.v2.firewall_rule import FirewallRule from openstack.tests.unit import base class FirewallTestCase(base.TestCase): def _make_mock_url(self, *args, **params): params_list = ['='.join([k, v]) for k, v in params.items()] return self.get_mock_url( 'network', 'public', append=['v2.0', 'fwaas'] + list(args), qs_elements=params_list or None, ) class TestFirewallRule(FirewallTestCase): firewall_rule_name = 'deny_ssh' firewall_rule_id = 'd525a9b2-ab28-493d-b988-b824c8c033b1' _mock_firewall_rule_attrs = { 'action': 'deny', 'description': 'Deny SSH access', 'destination_ip_address': None, 'destination_port': 22, 'enabled': True, 'id': firewall_rule_id, 'ip_version': 4, 'name': firewall_rule_name, 'project_id': 'ef44f1efcb9548d9a441cdc252a979a6', 'protocol': 'tcp', 'shared': False, 'source_ip_address': None, 'source_port': None, } mock_firewall_rule = None def setUp(self, cloud_config_fixture='clouds.yaml'): super().setUp() self.mock_firewall_rule = FirewallRule( connection=self.cloud, **self._mock_firewall_rule_attrs ).to_dict() def test_create_firewall_rule(self): # attributes that are passed to the tested function passed_attrs = self._mock_firewall_rule_attrs.copy() del passed_attrs['id'] self.register_uris( [ # no validate due to added location key dict( method='POST', uri=self._make_mock_url('firewall_rules'), json={'firewall_rule': self.mock_firewall_rule.copy()}, ) ] ) r = self.cloud.create_firewall_rule(**passed_attrs) self.assertDictEqual(self.mock_firewall_rule, r.to_dict()) self.assert_calls() def test_create_firewall_rule_bad_protocol(self): bad_rule = self._mock_firewall_rule_attrs.copy() del bad_rule['id'] # id not allowed bad_rule['ip_version'] = 5 self.register_uris( [ # no validate due to added location key dict( method='POST', uri=self._make_mock_url('firewall_rules'), status_code=400, json={}, ) ] ) self.assertRaises( exceptions.BadRequestException, self.cloud.create_firewall_rule, **bad_rule ) self.assert_calls() def test_delete_firewall_rule(self): self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_rules', self.firewall_rule_name ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_rules', name=self.firewall_rule_name ), json={'firewall_rules': [self.mock_firewall_rule]}, ), dict( method='DELETE', uri=self._make_mock_url( 'firewall_rules', self.firewall_rule_id ), json={}, status_code=204, ), ] ) self.assertTrue( self.cloud.delete_firewall_rule(self.firewall_rule_name) ) self.assert_calls() def test_delete_firewall_rule_filters(self): filters = {'project_id': self.mock_firewall_rule['project_id']} self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_rules', self.firewall_rule_name, **filters ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_rules', name=self.firewall_rule_name, **filters ), json={'firewall_rules': [self.mock_firewall_rule]}, ), dict( method='DELETE', uri=self._make_mock_url( 'firewall_rules', self.firewall_rule_id ), json={}, status_code=204, ), ] ) self.assertTrue( self.cloud.delete_firewall_rule(self.firewall_rule_name, filters) ) self.assert_calls() def test_delete_firewall_rule_not_found(self): self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_rules', self.firewall_rule_name ), status_code=404, ), dict( method='GET', uri=self._make_mock_url('firewall_rules'), json={'firewall_rules': []}, ), ] ) with mock.patch.object( self.cloud.network, 'delete_firewall_rule' ), mock.patch.object(self.cloud.log, 'debug'): self.assertFalse( self.cloud.delete_firewall_rule(self.firewall_rule_name) ) self.cloud.network.delete_firewall_rule.assert_not_called() self.cloud.log.debug.assert_called_once() def test_delete_firewall_multiple_matches(self): self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_rules', self.firewall_rule_name ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_rules', name=self.firewall_rule_name ), json={ 'firewall_rules': [ self.mock_firewall_rule, self.mock_firewall_rule, ] }, ), ] ) self.assertRaises( exceptions.DuplicateResource, self.cloud.delete_firewall_rule, self.firewall_rule_name, ) self.assert_calls() def test_get_firewall_rule(self): self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_rules', self.firewall_rule_name ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_rules', name=self.firewall_rule_name ), json={'firewall_rules': [self.mock_firewall_rule]}, ), ] ) r = self.cloud.get_firewall_rule(self.firewall_rule_name) self.assertDictEqual(self.mock_firewall_rule, r) self.assert_calls() def test_get_firewall_rule_not_found(self): name = 'not_found' self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url('firewall_rules', name), status_code=404, ), dict( method='GET', uri=self._make_mock_url('firewall_rules', name=name), json={'firewall_rules': []}, ), ] ) self.assertIsNone(self.cloud.get_firewall_rule(name)) self.assert_calls() def test_list_firewall_rules(self): self.register_uris( [ dict( method='GET', uri=self._make_mock_url('firewall_rules'), json={'firewall_rules': [self.mock_firewall_rule]}, ) ] ) self.assertDictEqual( self.mock_firewall_rule, self.cloud.list_firewall_rules()[0] ) self.assert_calls() def test_update_firewall_rule(self): params = {'description': 'UpdatedDescription'} updated = self.mock_firewall_rule.copy() updated.update(params) self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_rules', self.firewall_rule_name ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_rules', name=self.firewall_rule_name ), json={'firewall_rules': [self.mock_firewall_rule]}, ), dict( method='PUT', uri=self._make_mock_url( 'firewall_rules', self.firewall_rule_id ), json={'firewall_rule': updated}, validate=dict(json={'firewall_rule': params}), ), ] ) self.assertDictEqual( updated, self.cloud.update_firewall_rule(self.firewall_rule_name, **params), ) self.assert_calls() def test_update_firewall_rule_filters(self): params = {'description': 'Updated!'} filters = {'project_id': self.mock_firewall_rule['project_id']} updated = self.mock_firewall_rule.copy() updated.update(params) updated_dict = self._mock_firewall_rule_attrs.copy() updated_dict.update(params) self.register_uris( [ dict( method='GET', uri=self._make_mock_url( 'firewall_rules', self.firewall_rule_name, **filters ), json={'firewall_rule': self._mock_firewall_rule_attrs}, ), dict( method='PUT', uri=self._make_mock_url( 'firewall_rules', self.firewall_rule_id ), json={'firewall_rule': updated_dict}, validate={ 'json': {'firewall_rule': params}, }, ), ] ) updated_rule = self.cloud.update_firewall_rule( self.firewall_rule_name, filters, **params ) self.assertDictEqual(updated, updated_rule) self.assert_calls() class TestFirewallPolicy(FirewallTestCase): firewall_policy_id = '78d05d20-d406-41ec-819d-06b65c2684e4' firewall_policy_name = 'block_popular_services' _mock_firewall_policy_attrs = { 'audited': True, 'description': 'block ports of well-known services', 'firewall_rules': ['deny_ssh'], 'id': firewall_policy_id, 'name': firewall_policy_name, 'project_id': 'b64238cb-a25d-41af-9ee1-42deb4587d20', 'shared': False, } mock_firewall_policy = None def setUp(self, cloud_config_fixture='clouds.yaml'): super().setUp() self.mock_firewall_policy = FirewallPolicy( connection=self.cloud, **self._mock_firewall_policy_attrs ).to_dict() def test_create_firewall_policy(self): # attributes that are passed to the tested method passed_attrs = deepcopy(self._mock_firewall_policy_attrs) del passed_attrs['id'] # policy that is returned by the POST request created_attrs = deepcopy(self._mock_firewall_policy_attrs) created_attrs['firewall_rules'][0] = TestFirewallRule.firewall_rule_id created_policy = FirewallPolicy(connection=self.cloud, **created_attrs) # attributes used to validate the request inside register_uris() validate_attrs = deepcopy(created_attrs) del validate_attrs['id'] self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_rules', TestFirewallRule.firewall_rule_name ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_rules', name=TestFirewallRule.firewall_rule_name, ), json={ 'firewall_rules': [ TestFirewallRule._mock_firewall_rule_attrs ] }, ), dict( method='POST', uri=self._make_mock_url('firewall_policies'), json={'firewall_policy': created_attrs}, validate=dict(json={'firewall_policy': validate_attrs}), ), ] ) res = self.cloud.create_firewall_policy(**passed_attrs) self.assertDictEqual(created_policy, res.to_dict()) self.assert_calls() def test_create_firewall_policy_rule_not_found(self): posted_policy = deepcopy(self._mock_firewall_policy_attrs) del posted_policy['id'] self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_rules', posted_policy['firewall_rules'][0] ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_rules', name=posted_policy['firewall_rules'][0], ), json={'firewall_rules': []}, ), ] ) with mock.patch.object(self.cloud.network, 'create_firewall_policy'): self.assertRaises( exceptions.NotFoundException, self.cloud.create_firewall_policy, **posted_policy ) self.cloud.network.create_firewall_policy.assert_not_called() self.assert_calls() def test_delete_firewall_policy(self): self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_policies', self.firewall_policy_name ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_policies', name=self.firewall_policy_name ), json={'firewall_policies': [self.mock_firewall_policy]}, ), dict( method='DELETE', uri=self._make_mock_url( 'firewall_policies', self.firewall_policy_id ), json={}, status_code=204, ), ] ) with mock.patch.object(self.cloud.log, 'debug'): self.assertTrue( self.cloud.delete_firewall_policy(self.firewall_policy_name) ) self.assert_calls() self.cloud.log.debug.assert_not_called() def test_delete_firewall_policy_filters(self): filters = {'project_id': self.mock_firewall_policy['project_id']} self.register_uris( [ dict( method='DELETE', uri=self._make_mock_url( 'firewall_policies', self.firewall_policy_id ), json={}, status_code=204, ) ] ) with mock.patch.object( self.cloud.network, 'find_firewall_policy', return_value=self.mock_firewall_policy, ), mock.patch.object(self.cloud.log, 'debug'): self.assertTrue( self.cloud.delete_firewall_policy( self.firewall_policy_name, filters ) ) self.assert_calls() self.cloud.network.find_firewall_policy.assert_called_once_with( self.firewall_policy_name, ignore_missing=False, **filters ) self.cloud.log.debug.assert_not_called() def test_delete_firewall_policy_not_found(self): self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_policies', self.firewall_policy_name ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_policies', name=self.firewall_policy_name ), json={'firewall_policies': []}, ), ] ) with mock.patch.object(self.cloud.log, 'debug'): self.assertFalse( self.cloud.delete_firewall_policy(self.firewall_policy_name) ) self.assert_calls() self.cloud.log.debug.assert_called_once() def test_get_firewall_policy(self): self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_policies', self.firewall_policy_name ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_policies', name=self.firewall_policy_name ), json={'firewall_policies': [self.mock_firewall_policy]}, ), ] ) self.assertDictEqual( self.mock_firewall_policy, self.cloud.get_firewall_policy(self.firewall_policy_name), ) self.assert_calls() def test_get_firewall_policy_not_found(self): name = 'not_found' self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url('firewall_policies', name), status_code=404, ), dict( method='GET', uri=self._make_mock_url('firewall_policies', name=name), json={'firewall_policies': []}, ), ] ) self.assertIsNone(self.cloud.get_firewall_policy(name)) self.assert_calls() def test_list_firewall_policies(self): self.register_uris( [ dict( method='GET', uri=self._make_mock_url('firewall_policies'), json={ 'firewall_policies': [ self.mock_firewall_policy.copy(), self.mock_firewall_policy.copy(), ] }, ) ] ) policy = FirewallPolicy( connection=self.cloud, **self.mock_firewall_policy ) self.assertListEqual( self.cloud.list_firewall_policies(), [policy, policy] ) self.assert_calls() def test_list_firewall_policies_filters(self): filters = {'project_id': self.mock_firewall_policy['project_id']} self.register_uris( [ dict( method='GET', uri=self._make_mock_url('firewall_policies', **filters), json={'firewall_policies': [self.mock_firewall_policy]}, ) ] ) self.assertListEqual( self.cloud.list_firewall_policies(filters), [ FirewallPolicy( connection=self.cloud, **self.mock_firewall_policy ) ], ) self.assert_calls() def test_update_firewall_policy(self): lookup_rule = FirewallRule( connection=self.cloud, **TestFirewallRule._mock_firewall_rule_attrs ).to_dict() params = { 'firewall_rules': [lookup_rule['id']], 'description': 'updated!', } retrieved_policy = deepcopy(self.mock_firewall_policy) del retrieved_policy['firewall_rules'][0] updated_policy = deepcopy(self.mock_firewall_policy) updated_policy.update(params) self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_policies', self.firewall_policy_name ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_policies', name=self.firewall_policy_name ), json={'firewall_policies': [retrieved_policy]}, ), dict( method='GET', uri=self._make_mock_url( 'firewall_rules', lookup_rule['id'] ), json={'firewall_rule': lookup_rule}, ), dict( method='PUT', uri=self._make_mock_url( 'firewall_policies', self.firewall_policy_id ), json={'firewall_policy': updated_policy}, validate=dict(json={'firewall_policy': params}), ), ] ) self.assertDictEqual( updated_policy, self.cloud.update_firewall_policy( self.firewall_policy_name, **params ), ) self.assert_calls() def test_update_firewall_policy_no_rules(self): params = {'description': 'updated!'} updated_policy = deepcopy(self.mock_firewall_policy) updated_policy.update(params) self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_policies', self.firewall_policy_name ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_policies', name=self.firewall_policy_name ), json={ 'firewall_policies': [ deepcopy(self.mock_firewall_policy) ] }, ), dict( method='PUT', uri=self._make_mock_url( 'firewall_policies', self.firewall_policy_id ), json={'firewall_policy': updated_policy}, validate=dict(json={'firewall_policy': params}), ), ] ) self.assertDictEqual( updated_policy, self.cloud.update_firewall_policy( self.firewall_policy_name, **params ), ) self.assert_calls() def test_update_firewall_policy_filters(self): filters = {'project_id': self.mock_firewall_policy['project_id']} params = {'description': 'updated!'} updated_policy = deepcopy(self.mock_firewall_policy) updated_policy.update(params) self.register_uris( [ dict( method='PUT', uri=self._make_mock_url( 'firewall_policies', self.firewall_policy_id ), json={'firewall_policy': updated_policy}, validate=dict(json={'firewall_policy': params}), ), ] ) with mock.patch.object( self.cloud.network, 'find_firewall_policy', return_value=deepcopy(self.mock_firewall_policy), ): self.assertDictEqual( updated_policy, self.cloud.update_firewall_policy( self.firewall_policy_name, filters, **params ), ) self.assert_calls() self.cloud.network.find_firewall_policy.assert_called_once_with( self.firewall_policy_name, ignore_missing=False, **filters ) def test_insert_rule_into_policy(self): rule0 = FirewallRule( connection=self.cloud, **TestFirewallRule._mock_firewall_rule_attrs ) _rule1_attrs = deepcopy(TestFirewallRule._mock_firewall_rule_attrs) _rule1_attrs.update( id='8068fc06-0e72-43f2-a76f-a51a33b46e08', name='after_rule' ) rule1 = FirewallRule(**_rule1_attrs) _rule2_attrs = deepcopy(TestFirewallRule._mock_firewall_rule_attrs) _rule2_attrs.update( id='c716382d-183b-475d-b500-dcc762f45ce3', name='before_rule' ) rule2 = FirewallRule(**_rule2_attrs) retrieved_policy = deepcopy(self.mock_firewall_policy) retrieved_policy['firewall_rules'] = [rule1['id'], rule2['id']] updated_policy = deepcopy(self.mock_firewall_policy) updated_policy['firewall_rules'] = [ rule0['id'], rule1['id'], rule2['id'], ] self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_policies', self.firewall_policy_name ), status_code=404, ), dict( method='GET', # get policy uri=self._make_mock_url( 'firewall_policies', name=self.firewall_policy_name ), json={'firewall_policies': [retrieved_policy]}, ), dict( method='GET', # short-circuit uri=self._make_mock_url('firewall_rules', rule0['name']), status_code=404, ), dict( method='GET', # get rule to add uri=self._make_mock_url( 'firewall_rules', name=rule0['name'] ), json={'firewall_rules': [rule0]}, ), dict( method='GET', # short-circuit uri=self._make_mock_url('firewall_rules', rule1['name']), status_code=404, ), dict( method='GET', # get after rule uri=self._make_mock_url( 'firewall_rules', name=rule1['name'] ), json={'firewall_rules': [rule1]}, ), dict( method='GET', # short-circuit uri=self._make_mock_url('firewall_rules', rule2['name']), status_code=404, ), dict( method='GET', # get before rule uri=self._make_mock_url( 'firewall_rules', name=rule2['name'] ), json={'firewall_rules': [rule2]}, ), dict( method='PUT', # add rule uri=self._make_mock_url( 'firewall_policies', self.firewall_policy_id, 'insert_rule', ), json=updated_policy, validate=dict( json={ 'firewall_rule_id': rule0['id'], 'insert_after': rule1['id'], 'insert_before': rule2['id'], } ), ), ] ) r = self.cloud.insert_rule_into_policy( name_or_id=self.firewall_policy_name, rule_name_or_id=rule0['name'], insert_after=rule1['name'], insert_before=rule2['name'], ) self.assertDictEqual(updated_policy, r.to_dict()) self.assert_calls() def test_insert_rule_into_policy_compact(self): """ Tests without insert_after and insert_before """ rule = FirewallRule(**TestFirewallRule._mock_firewall_rule_attrs) retrieved_policy = deepcopy(self.mock_firewall_policy) retrieved_policy['firewall_rules'] = [] updated_policy = deepcopy(retrieved_policy) updated_policy['firewall_rules'].append(rule['id']) self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_policies', self.firewall_policy_name ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_policies', name=self.firewall_policy_name ), json={'firewall_policies': [retrieved_policy]}, ), dict( method='GET', # short-circuit uri=self._make_mock_url('firewall_rules', rule['name']), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_rules', name=rule['name'] ), json={'firewall_rules': [rule]}, ), dict( method='PUT', uri=self._make_mock_url( 'firewall_policies', retrieved_policy['id'], 'insert_rule', ), json=updated_policy, validate=dict( json={ 'firewall_rule_id': rule['id'], 'insert_after': None, 'insert_before': None, } ), ), ] ) r = self.cloud.insert_rule_into_policy( self.firewall_policy_name, rule['name'] ) self.assertDictEqual(updated_policy, r.to_dict()) self.assert_calls() def test_insert_rule_into_policy_not_found(self): policy_name = 'bogus_policy' self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url('firewall_policies', policy_name), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_policies', name=policy_name ), json={'firewall_policies': []}, ), ] ) with mock.patch.object(self.cloud.network, 'find_firewall_rule'): self.assertRaises( exceptions.NotFoundException, self.cloud.insert_rule_into_policy, policy_name, 'bogus_rule', ) self.assert_calls() self.cloud.network.find_firewall_rule.assert_not_called() def test_insert_rule_into_policy_rule_not_found(self): rule_name = 'unknown_rule' self.register_uris( [ dict( method='GET', uri=self._make_mock_url( 'firewall_policies', self.firewall_policy_id ), json={'firewall_policy': self.mock_firewall_policy}, ), dict( method='GET', # short-circuit uri=self._make_mock_url('firewall_rules', rule_name), status_code=404, ), dict( method='GET', uri=self._make_mock_url('firewall_rules', name=rule_name), json={'firewall_rules': []}, ), ] ) self.assertRaises( exceptions.NotFoundException, self.cloud.insert_rule_into_policy, self.firewall_policy_id, rule_name, ) self.assert_calls() def test_insert_rule_into_policy_already_associated(self): rule = FirewallRule( **TestFirewallRule._mock_firewall_rule_attrs ).to_dict() policy = deepcopy(self.mock_firewall_policy) policy['firewall_rules'] = [rule['id']] self.register_uris( [ dict( method='GET', uri=self._make_mock_url( 'firewall_policies', self.firewall_policy_id ), json={'firewall_policy': policy}, ), dict( method='GET', uri=self._make_mock_url('firewall_rules', rule['id']), json={'firewall_rule': rule}, ), ] ) with mock.patch.object(self.cloud.log, 'debug'): r = self.cloud.insert_rule_into_policy(policy['id'], rule['id']) self.assertDictEqual(policy, r.to_dict()) self.assert_calls() self.cloud.log.debug.assert_called() def test_remove_rule_from_policy(self): policy_name = self.firewall_policy_name rule = FirewallRule(**TestFirewallRule._mock_firewall_rule_attrs) retrieved_policy = deepcopy(self.mock_firewall_policy) retrieved_policy['firewall_rules'][0] = rule['id'] updated_policy = deepcopy(self.mock_firewall_policy) del updated_policy['firewall_rules'][0] self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url('firewall_policies', policy_name), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_policies', name=policy_name ), json={'firewall_policies': [retrieved_policy]}, ), dict( method='GET', # short-circuit uri=self._make_mock_url('firewall_rules', rule['name']), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_rules', name=rule['name'] ), json={'firewall_rules': [rule]}, ), dict( method='PUT', uri=self._make_mock_url( 'firewall_policies', self.firewall_policy_id, 'remove_rule', ), json=updated_policy, validate=dict(json={'firewall_rule_id': rule['id']}), ), ] ) r = self.cloud.remove_rule_from_policy(policy_name, rule['name']) self.assertDictEqual(updated_policy, r.to_dict()) self.assert_calls() def test_remove_rule_from_policy_not_found(self): self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_policies', self.firewall_policy_name ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_policies', name=self.firewall_policy_name ), json={'firewall_policies': []}, ), ] ) with mock.patch.object(self.cloud.network, 'find_firewall_rule'): self.assertRaises( exceptions.NotFoundException, self.cloud.remove_rule_from_policy, self.firewall_policy_name, TestFirewallRule.firewall_rule_name, ) self.assert_calls() self.cloud.network.find_firewall_rule.assert_not_called() def test_remove_rule_from_policy_rule_not_found(self): retrieved_policy = deepcopy(self.mock_firewall_policy) rule = FirewallRule(**TestFirewallRule._mock_firewall_rule_attrs) retrieved_policy['firewall_rules'][0] = rule['id'] self.register_uris( [ dict( method='GET', uri=self._make_mock_url( 'firewall_policies', self.firewall_policy_id ), json={'firewall_policy': retrieved_policy}, ), dict( method='GET', # short-circuit uri=self._make_mock_url('firewall_rules', rule['name']), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_rules', name=rule['name'] ), json={'firewall_rules': []}, ), ] ) r = self.cloud.remove_rule_from_policy( self.firewall_policy_id, rule['name'] ) self.assertDictEqual(retrieved_policy, r.to_dict()) self.assert_calls() def test_remove_rule_from_policy_not_associated(self): rule = FirewallRule( **TestFirewallRule._mock_firewall_rule_attrs ).to_dict() policy = deepcopy(self.mock_firewall_policy) del policy['firewall_rules'][0] self.register_uris( [ dict( method='GET', uri=self._make_mock_url('firewall_policies', policy['id']), json={'firewall_policy': policy}, ), dict( method='GET', uri=self._make_mock_url('firewall_rules', rule['id']), json={'firewall_rule': rule}, ), ] ) with mock.patch.object( self.cloud.network, 'remove_rule_from_policy' ), mock.patch.object(self.cloud.log, 'debug'): r = self.cloud.remove_rule_from_policy(policy['id'], rule['id']) self.assertDictEqual(policy, r.to_dict()) self.assert_calls() self.cloud.log.debug.assert_called_once() self.cloud.network.remove_rule_from_policy.assert_not_called() class TestFirewallGroup(FirewallTestCase): firewall_group_id = '700eed7a-b979-4b80-a06d-14f000d0f645' firewall_group_name = 'max_security_group' mock_port = { 'name': 'mock_port', 'id': '7d90977c-45ec-467e-a16d-dcaed772a161', } _mock_egress_policy_attrs = { 'id': '34335e5b-44af-4ffd-9dcf-518133f897c7', 'name': 'safe_outgoing_data', } _mock_ingress_policy_attrs = { 'id': 'cd28fb50-85d0-4f36-89af-50fac08ac174', 'name': 'bad_incoming_data', } _mock_firewall_group_attrs = { 'admin_state_up': True, 'description': 'Providing max security!', 'egress_firewall_policy': _mock_egress_policy_attrs['name'], 'ingress_firewall_policy': _mock_ingress_policy_attrs['name'], 'id': firewall_group_id, 'name': firewall_group_name, 'ports': [mock_port['name']], 'project_id': 'da347b09-0b4f-4994-a3ef-05d13eaecb2c', 'shared': False, } _mock_returned_firewall_group_attrs = { 'admin_state_up': True, 'description': 'Providing max security!', 'egress_firewall_policy': _mock_egress_policy_attrs['name'], 'egress_firewall_policy_id': _mock_egress_policy_attrs['id'], 'ingress_firewall_policy': _mock_ingress_policy_attrs['name'], 'ingress_firewall_policy_id': _mock_ingress_policy_attrs['id'], 'id': firewall_group_id, 'name': firewall_group_name, 'ports': [mock_port['id']], 'project_id': 'da347b09-0b4f-4994-a3ef-05d13eaecb2c', 'shared': False, } mock_egress_policy = None mock_ingress_policy = None mock_firewall_rule = None mock_returned_firewall_rule = None def setUp(self, cloud_config_fixture='clouds.yaml'): super().setUp() self.mock_egress_policy = FirewallPolicy( connection=self.cloud, **self._mock_egress_policy_attrs ).to_dict() self.mock_ingress_policy = FirewallPolicy( connection=self.cloud, **self._mock_ingress_policy_attrs ).to_dict() self.mock_firewall_group = FirewallGroup( connection=self.cloud, **self._mock_firewall_group_attrs ).to_dict() self.mock_returned_firewall_group = FirewallGroup( connection=self.cloud, **self._mock_returned_firewall_group_attrs ).to_dict() def test_create_firewall_group(self): create_group_attrs = self._mock_firewall_group_attrs.copy() del create_group_attrs['id'] posted_group_attrs = self._mock_returned_firewall_group_attrs.copy() del posted_group_attrs['egress_firewall_policy'] del posted_group_attrs['ingress_firewall_policy'] del posted_group_attrs['id'] self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_policies', self.mock_egress_policy['name'] ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_policies', name=self.mock_egress_policy['name'], ), json={'firewall_policies': [self.mock_egress_policy]}, ), dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_policies', self.mock_ingress_policy['name'] ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_policies', name=self.mock_ingress_policy['name'], ), json={'firewall_policies': [self.mock_ingress_policy]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports', self.mock_port['name']], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'], qs_elements=['name=%s' % self.mock_port['name']], ), json={'ports': [self.mock_port]}, ), dict( method='POST', uri=self._make_mock_url('firewall_groups'), json={ 'firewall_group': deepcopy( self.mock_returned_firewall_group ) }, validate=dict(json={'firewall_group': posted_group_attrs}), ), ] ) r = self.cloud.create_firewall_group(**create_group_attrs) self.assertDictEqual(self.mock_returned_firewall_group, r.to_dict()) self.assert_calls() def test_create_firewall_group_compact(self): """ Tests firewall group creation without policies or ports """ firewall_group = deepcopy(self._mock_firewall_group_attrs) del firewall_group['ports'] del firewall_group['egress_firewall_policy'] del firewall_group['ingress_firewall_policy'] created_firewall = deepcopy(firewall_group) created_firewall.update( egress_firewall_policy_id=None, ingress_firewall_policy_id=None, ports=[], ) del firewall_group['id'] self.register_uris( [ dict( method='POST', uri=self._make_mock_url('firewall_groups'), json={'firewall_group': created_firewall}, validate=dict(json={'firewall_group': firewall_group}), ) ] ) r = self.cloud.create_firewall_group(**firewall_group) self.assertDictEqual( FirewallGroup(connection=self.cloud, **created_firewall).to_dict(), r.to_dict(), ) self.assert_calls() def test_delete_firewall_group(self): self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_groups', self.firewall_group_name ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_groups', name=self.firewall_group_name ), json={ 'firewall_groups': [ deepcopy(self.mock_returned_firewall_group) ] }, ), dict( method='DELETE', uri=self._make_mock_url( 'firewall_groups', self.firewall_group_id ), status_code=204, ), ] ) self.assertTrue( self.cloud.delete_firewall_group(self.firewall_group_name) ) self.assert_calls() def test_delete_firewall_group_filters(self): filters = {'project_id': self.mock_firewall_group['project_id']} self.register_uris( [ dict( method='DELETE', uri=self._make_mock_url( 'firewall_groups', self.firewall_group_id ), status_code=204, ) ] ) with mock.patch.object( self.cloud.network, 'find_firewall_group', return_value=deepcopy(self.mock_firewall_group), ): self.assertTrue( self.cloud.delete_firewall_group( self.firewall_group_name, filters ) ) self.assert_calls() self.cloud.network.find_firewall_group.assert_called_once_with( self.firewall_group_name, ignore_missing=False, **filters ) def test_delete_firewall_group_not_found(self): self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_groups', self.firewall_group_name ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_groups', name=self.firewall_group_name ), json={'firewall_groups': []}, ), ] ) with mock.patch.object(self.cloud.log, 'debug'): self.assertFalse( self.cloud.delete_firewall_group(self.firewall_group_name) ) self.assert_calls() self.cloud.log.debug.assert_called_once() def test_get_firewall_group(self): returned_group = deepcopy(self.mock_returned_firewall_group) self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_groups', self.firewall_group_name ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_groups', name=self.firewall_group_name ), json={'firewall_groups': [returned_group]}, ), ] ) self.assertDictEqual( returned_group, self.cloud.get_firewall_group(self.firewall_group_name), ) self.assert_calls() def test_get_firewall_group_not_found(self): name = 'not_found' self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url('firewall_groups', name), status_code=404, ), dict( method='GET', uri=self._make_mock_url('firewall_groups', name=name), json={'firewall_groups': []}, ), ] ) self.assertIsNone(self.cloud.get_firewall_group(name)) self.assert_calls() def test_get_firewall_group_by_id(self): returned_group = deepcopy(self.mock_returned_firewall_group) self.register_uris( [ dict( method='GET', uri=self._make_mock_url( 'firewall_groups', self.firewall_group_id ), json={'firewall_group': returned_group}, ) ] ) r = self.cloud.get_firewall_group(self.firewall_group_id) self.assertDictEqual(returned_group, r.to_dict()) self.assert_calls() def test_list_firewall_groups(self): returned_attrs = deepcopy(self.mock_returned_firewall_group) self.register_uris( [ dict( method='GET', uri=self._make_mock_url('firewall_groups'), json={'firewall_groups': [returned_attrs, returned_attrs]}, ) ] ) group = FirewallGroup(connection=self.cloud, **returned_attrs) self.assertListEqual([group, group], self.cloud.list_firewall_groups()) self.assert_calls() def test_update_firewall_group(self): params = { 'description': 'updated!', 'egress_firewall_policy': self.mock_egress_policy['name'], 'ingress_firewall_policy': self.mock_ingress_policy['name'], 'ports': [self.mock_port['name']], } updated_group = deepcopy(self.mock_returned_firewall_group) updated_group['description'] = params['description'] returned_group = deepcopy(self.mock_returned_firewall_group) # unset attributes that will be updated! returned_group.update( ingress_firewall_policy_id=None, egress_firewall_policy_id=None, ports=[], ) self.register_uris( [ dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_groups', self.firewall_group_name ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_groups', name=self.firewall_group_name ), json={'firewall_groups': [returned_group]}, ), dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_policies', self.mock_egress_policy['name'] ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_policies', name=self.mock_egress_policy['name'], ), json={ 'firewall_policies': [ deepcopy(self.mock_egress_policy) ] }, ), dict( method='GET', # short-circuit uri=self._make_mock_url( 'firewall_policies', self.mock_ingress_policy['name'] ), status_code=404, ), dict( method='GET', uri=self._make_mock_url( 'firewall_policies', name=self.mock_ingress_policy['name'], ), json={ 'firewall_policies': [ deepcopy(self.mock_ingress_policy) ] }, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports', self.mock_port['name']], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'], qs_elements=['name=%s' % self.mock_port['name']], ), json={'ports': [self.mock_port]}, ), dict( method='PUT', uri=self._make_mock_url( 'firewall_groups', self.firewall_group_id ), json={'firewall_group': updated_group}, validate=dict( json={ 'firewall_group': { 'description': params['description'], 'egress_firewall_policy_id': self.mock_egress_policy[ # noqa: E501 'id' ], 'ingress_firewall_policy_id': self.mock_ingress_policy[ # noqa: E501 'id' ], 'ports': [self.mock_port['id']], } } ), ), ] ) self.assertDictEqual( updated_group, self.cloud.update_firewall_group( self.firewall_group_name, **params ), ) self.assert_calls() def test_update_firewall_group_compact(self): params = {'description': 'updated again!'} updated_group = deepcopy(self.mock_returned_firewall_group) updated_group.update(params) self.register_uris( [ dict( method='GET', uri=self._make_mock_url( 'firewall_groups', self.firewall_group_id ), json={ 'firewall_group': deepcopy( self.mock_returned_firewall_group ) }, ), dict( method='PUT', uri=self._make_mock_url( 'firewall_groups', self.firewall_group_id ), json={'firewall_group': updated_group}, validate=dict(json={'firewall_group': params}), ), ] ) self.assertDictEqual( updated_group, self.cloud.update_firewall_group(self.firewall_group_id, **params), ) self.assert_calls() def test_update_firewall_group_filters(self): filters = {'project_id': self.mock_firewall_group['project_id']} params = {'description': 'updated again!'} updated_group = deepcopy(self.mock_returned_firewall_group) self.register_uris( [ dict( method='PUT', uri=self._make_mock_url( 'firewall_groups', self.firewall_group_id ), json={'firewall_group': updated_group}, validate=dict(json={'firewall_group': params}), ) ] ) with mock.patch.object( self.cloud.network, 'find_firewall_group', return_value=deepcopy(self.mock_firewall_group), ): r = self.cloud.update_firewall_group( self.firewall_group_name, filters, **params ) self.assertDictEqual(updated_group, r.to_dict()) self.assert_calls() self.cloud.network.find_firewall_group.assert_called_once_with( self.firewall_group_name, ignore_missing=False, **filters ) def test_update_firewall_group_unset_policies(self): transformed_params = { 'ingress_firewall_policy_id': None, 'egress_firewall_policy_id': None, } updated_group = deepcopy(self.mock_returned_firewall_group) updated_group.update(**transformed_params) returned_group = deepcopy(self.mock_returned_firewall_group) self.register_uris( [ dict( method='GET', uri=self._make_mock_url( 'firewall_groups', self.firewall_group_id ), json={'firewall_group': returned_group}, ), dict( method='PUT', uri=self._make_mock_url( 'firewall_groups', self.firewall_group_id ), json={'firewall_group': updated_group}, validate=dict(json={'firewall_group': transformed_params}), ), ] ) self.assertDictEqual( updated_group, self.cloud.update_firewall_group( self.firewall_group_id, ingress_firewall_policy=None, egress_firewall_policy=None, ), ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_groups.py0000664000175000017500000001125500000000000024401 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack.tests.unit import base class TestGroups(base.TestCase): def setUp(self, cloud_config_fixture='clouds.yaml'): super().setUp(cloud_config_fixture=cloud_config_fixture) self.addCleanup(self.assert_calls) def get_mock_url( self, service_type='identity', interface='public', resource='groups', append=None, base_url_append='v3', ): return super().get_mock_url( service_type='identity', interface=interface, resource=resource, append=append, base_url_append=base_url_append, ) def test_list_groups(self): group_data = self._get_group_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url(), status_code=200, json={'groups': [group_data.json_response['group']]}, ) ] ) self.cloud.list_groups() def test_get_group(self): group_data = self._get_group_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url(), status_code=200, json={'groups': [group_data.json_response['group']]}, ), ] ) self.cloud.get_group(group_data.group_id) def test_delete_group(self): group_data = self._get_group_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url(append=[group_data.group_id]), status_code=200, json={'group': group_data.json_response['group']}, ), dict( method='DELETE', uri=self.get_mock_url(append=[group_data.group_id]), status_code=204, ), ] ) self.assertTrue(self.cloud.delete_group(group_data.group_id)) def test_create_group(self): domain_data = self._get_domain_data() group_data = self._get_group_data(domain_id=domain_data.domain_id) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='domains', append=[domain_data.domain_id] ), status_code=200, json=domain_data.json_response, ), dict( method='POST', uri=self.get_mock_url(), status_code=200, json=group_data.json_response, validate=dict(json=group_data.json_request), ), ] ) self.cloud.create_group( name=group_data.group_name, description=group_data.description, domain=group_data.domain_id, ) def test_update_group(self): group_data = self._get_group_data() # Domain ID is not sent group_data.json_request['group'].pop('domain_id') self.register_uris( [ dict( method='GET', uri=self.get_mock_url(append=[group_data.group_id]), status_code=200, json={'group': group_data.json_response['group']}, ), dict( method='PATCH', uri=self.get_mock_url(append=[group_data.group_id]), status_code=200, json=group_data.json_response, validate=dict( json={ 'group': { 'name': 'new_name', 'description': 'new_description', } } ), ), ] ) self.cloud.update_group( group_data.group_id, 'new_name', 'new_description' ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_identity_roles.py0000664000175000017500000002543100000000000026120 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import testtools from testtools import matchers from openstack import exceptions from openstack.tests.unit import base RAW_ROLE_ASSIGNMENTS = [ { "links": {"assignment": "http://example"}, "role": {"id": "123456"}, "scope": {"domain": {"id": "161718"}}, "user": {"id": "313233"}, }, { "links": {"assignment": "http://example"}, "group": {"id": "101112"}, "role": {"id": "123456"}, "scope": {"project": {"id": "456789"}}, }, ] class TestIdentityRoles(base.TestCase): def get_mock_url( self, service_type='identity', interface='public', resource='roles', append=None, base_url_append='v3', qs_elements=None, ): return super().get_mock_url( service_type, interface, resource, append, base_url_append, qs_elements, ) def test_list_roles(self): role_data = self._get_role_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url(), status_code=200, json={'roles': [role_data.json_response['role']]}, ) ] ) self.cloud.list_roles() self.assert_calls() def test_list_role_by_name(self): role_data = self._get_role_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url( qs_elements=[f'name={role_data.role_name}'] ), status_code=200, json={'roles': [role_data.json_response['role']]}, ) ] ) role = self.cloud.list_roles(name=role_data.role_name)[0] self.assertIsNotNone(role) self.assertThat(role.id, matchers.Equals(role_data.role_id)) self.assertThat(role.name, matchers.Equals(role_data.role_name)) self.assert_calls() def test_get_role_by_name(self): role_data = self._get_role_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url(), status_code=200, json={'roles': [role_data.json_response['role']]}, ) ] ) role = self.cloud.get_role(role_data.role_name) self.assertIsNotNone(role) self.assertThat(role.id, matchers.Equals(role_data.role_id)) self.assertThat(role.name, matchers.Equals(role_data.role_name)) self.assert_calls() def test_get_role_by_id(self): role_data = self._get_role_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url(), status_code=200, json={'roles': [role_data.json_response['role']]}, ) ] ) role = self.cloud.get_role(role_data.role_id) self.assertIsNotNone(role) self.assertThat(role.id, matchers.Equals(role_data.role_id)) self.assertThat(role.name, matchers.Equals(role_data.role_name)) self.assert_calls() def test_create_role(self): role_data = self._get_role_data() self.register_uris( [ dict( method='POST', uri=self.get_mock_url(), status_code=200, json=role_data.json_response, validate=dict(json=role_data.json_request), ) ] ) role = self.cloud.create_role(role_data.role_name) self.assertIsNotNone(role) self.assertThat(role.name, matchers.Equals(role_data.role_name)) self.assertThat(role.id, matchers.Equals(role_data.role_id)) self.assert_calls() def test_update_role(self): role_data = self._get_role_data() req = {'role': {'name': 'new_name'}} self.register_uris( [ dict( method='GET', uri=self.get_mock_url(), status_code=200, json={'roles': [role_data.json_response['role']]}, ), dict( method='PATCH', uri=self.get_mock_url(append=[role_data.role_id]), status_code=200, json=role_data.json_response, validate=dict(json=req), ), ] ) role = self.cloud.update_role(role_data.role_id, 'new_name') self.assertIsNotNone(role) self.assertThat(role.name, matchers.Equals(role_data.role_name)) self.assertThat(role.id, matchers.Equals(role_data.role_id)) self.assert_calls() def test_delete_role_by_id(self): role_data = self._get_role_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url(), status_code=200, json={'roles': [role_data.json_response['role']]}, ), dict( method='DELETE', uri=self.get_mock_url(append=[role_data.role_id]), status_code=204, ), ] ) role = self.cloud.delete_role(role_data.role_id) self.assertThat(role, matchers.Equals(True)) self.assert_calls() def test_delete_role_by_name(self): role_data = self._get_role_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url(), status_code=200, json={'roles': [role_data.json_response['role']]}, ), dict( method='DELETE', uri=self.get_mock_url(append=[role_data.role_id]), status_code=204, ), ] ) role = self.cloud.delete_role(role_data.role_name) self.assertThat(role, matchers.Equals(True)) self.assert_calls() def test_list_role_assignments(self): domain_data = self._get_domain_data() user_data = self._get_user_data(domain_id=domain_data.domain_id) group_data = self._get_group_data(domain_id=domain_data.domain_id) project_data = self._get_project_data(domain_id=domain_data.domain_id) role_data = self._get_role_data() response = [ { 'links': 'https://example.com', 'role': {'id': role_data.role_id}, 'scope': {'domain': {'id': domain_data.domain_id}}, 'user': {'id': user_data.user_id}, }, { 'links': 'https://example.com', 'role': {'id': role_data.role_id}, 'scope': {'project': {'id': project_data.project_id}}, 'group': {'id': group_data.group_id}, }, ] self.register_uris( [ dict( method='GET', uri=self.get_mock_url(resource='role_assignments'), status_code=200, json={'role_assignments': response}, complete_qs=True, ) ] ) ret = self.cloud.list_role_assignments() self.assertThat(len(ret), matchers.Equals(2)) self.assertThat(ret[0].user['id'], matchers.Equals(user_data.user_id)) self.assertThat(ret[0].role['id'], matchers.Equals(role_data.role_id)) self.assertThat( ret[0].scope['domain']['id'], matchers.Equals(domain_data.domain_id), ) self.assertThat( ret[1].group['id'], matchers.Equals(group_data.group_id) ) self.assertThat(ret[1].role['id'], matchers.Equals(role_data.role_id)) self.assertThat( ret[1].scope['project']['id'], matchers.Equals(project_data.project_id), ) def test_list_role_assignments_filters(self): domain_data = self._get_domain_data() user_data = self._get_user_data(domain_id=domain_data.domain_id) role_data = self._get_role_data() response = [ { 'links': 'https://example.com', 'role': {'id': role_data.role_id}, 'scope': {'domain': {'id': domain_data.domain_id}}, 'user': {'id': user_data.user_id}, } ] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='role_assignments', qs_elements=[ 'scope.domain.id=%s' % domain_data.domain_id, 'user.id=%s' % user_data.user_id, 'effective=True', ], ), status_code=200, json={'role_assignments': response}, complete_qs=True, ) ] ) params = dict( user=user_data.user_id, domain=domain_data.domain_id, effective=True, ) ret = self.cloud.list_role_assignments(filters=params) self.assertThat(len(ret), matchers.Equals(1)) self.assertThat(ret[0].user['id'], matchers.Equals(user_data.user_id)) self.assertThat(ret[0].role['id'], matchers.Equals(role_data.role_id)) self.assertThat( ret[0].scope['domain']['id'], matchers.Equals(domain_data.domain_id), ) def test_list_role_assignments_exception(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url(resource='role_assignments'), status_code=403, ) ] ) with testtools.ExpectedException(exceptions.ForbiddenException): self.cloud.list_role_assignments() self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_identity_users.py0000664000175000017500000000537600000000000026143 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from testtools import matchers from openstack.tests.unit import base class TestIdentityUsers(base.TestCase): def get_mock_url( self, service_type='identity', interface='public', resource='users', append=None, base_url_append='v3', qs_elements=None, ): return super().get_mock_url( service_type, interface, resource, append, base_url_append, qs_elements, ) def test_create_user(self): domain_data = self._get_domain_data() user_data = self._get_user_data( "myusername", "mypassword", domain_id=domain_data.domain_id ) self.register_uris( [ dict( method='POST', uri=self.get_mock_url(), status_code=200, json=user_data.json_response, validate=dict(json=user_data.json_request), ) ] ) user = self.cloud.create_user( user_data.name, password=user_data.password, domain_id=domain_data.domain_id, ) self.assertIsNotNone(user) self.assertThat(user.name, matchers.Equals(user_data.name)) self.assert_calls() def test_create_user_without_password(self): domain_data = self._get_domain_data() user_data = self._get_user_data( "myusername", domain_id=domain_data.domain_id ) user_data._replace( password=None, json_request=user_data.json_request["user"].pop("password"), ) self.register_uris( [ dict( method='POST', uri=self.get_mock_url(), status_code=200, json=user_data.json_response, validate=dict(json=user_data.json_request), ) ] ) user = self.cloud.create_user( user_data.name, domain_id=domain_data.domain_id ) self.assertIsNotNone(user) self.assertThat(user.name, matchers.Equals(user_data.name)) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_image.py0000664000175000017500000021340000000000000024140 0ustar00zuulzuul00000000000000# Copyright 2016 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import operator import tempfile from unittest import mock import uuid from openstack.cloud import meta from openstack import connection from openstack import exceptions from openstack.image.v1 import image as image_v1 from openstack.image.v2 import image from openstack.tests import fakes from openstack.tests.unit import base IMPORT_METHODS = 'glance-direct,web-download' class BaseTestImage(base.TestCase): def setUp(self): super().setUp() self.image_id = str(uuid.uuid4()) self.image_name = self.getUniqueString('image') self.object_name = f'images/{self.image_name}' self.imagefile = tempfile.NamedTemporaryFile(delete=False) data = b'\2\0' self.imagefile.write(data) self.imagefile.close() self.output = data self.fake_image_dict = fakes.make_fake_image( image_id=self.image_id, image_name=self.image_name, data=self.imagefile.name, ) self.fake_search_return = {'images': [self.fake_image_dict]} self.container_name = self.getUniqueString('container') def _compare_images(self, exp, real): self.assertDictEqual( image.Image(**exp).to_dict(computed=False), real.to_dict(computed=False), ) def _compare_images_v1(self, exp, real): self.assertDictEqual( image_v1.Image(**exp).to_dict(computed=False), real.to_dict(computed=False), ) class TestImage(BaseTestImage): def setUp(self): super().setUp() self.use_glance() def test_download_image_no_output(self): self.assertRaises( exceptions.SDKException, self.cloud.download_image, self.image_name, ) def test_download_image_two_outputs(self): fake_fd = io.BytesIO() self.assertRaises( exceptions.SDKException, self.cloud.download_image, self.image_name, output_path='fake_path', output_file=fake_fd, ) def test_download_image_no_images_found(self): self.register_uris( [ dict( method='GET', uri='https://image.example.com/v2/images/{name}'.format( name=self.image_name ), status_code=404, ), dict( method='GET', uri='https://image.example.com/v2/images?name={name}'.format( # noqa: E501 name=self.image_name ), json=dict(images=[]), ), dict( method='GET', uri='https://image.example.com/v2/images?os_hidden=True', json=dict(images=[]), ), ] ) self.assertRaises( exceptions.NotFoundException, self.cloud.download_image, self.image_name, output_path='fake_path', ) self.assert_calls() def _register_image_mocks(self): self.register_uris( [ dict( method='GET', uri='https://image.example.com/v2/images/{name}'.format( name=self.image_name ), status_code=404, ), dict( method='GET', uri='https://image.example.com/v2/images?name={name}'.format( # noqa: E501 name=self.image_name ), json=self.fake_search_return, ), dict( method='GET', uri='https://image.example.com/v2/images/{id}/file'.format( id=self.image_id ), content=self.output, headers={ 'Content-Type': 'application/octet-stream', 'Content-MD5': self.fake_image_dict['checksum'], }, ), ] ) def test_download_image_with_fd(self): self._register_image_mocks() output_file = io.BytesIO() self.cloud.download_image(self.image_name, output_file=output_file) output_file.seek(0) self.assertEqual(output_file.read(), self.output) self.assert_calls() def test_download_image_with_path(self): self._register_image_mocks() output_file = tempfile.NamedTemporaryFile() self.cloud.download_image( self.image_name, output_path=output_file.name ) output_file.seek(0) self.assertEqual(output_file.read(), self.output) self.assert_calls() @mock.patch.object(connection.Connection, 'search_images') def test_get_images(self, mock_search): image1 = dict(id='123', name='mickey') mock_search.return_value = [image1] r = self.cloud.get_image('mickey') self.assertIsNotNone(r) self.assertDictEqual(image1, r) @mock.patch.object(connection.Connection, 'search_images') def test_get_image_not_found(self, mock_search): mock_search.return_value = [] r = self.cloud.get_image('doesNotExist') self.assertIsNone(r) def test_get_image_name(self, cloud=None): cloud = cloud or self.cloud self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json=self.fake_search_return, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json=self.fake_search_return, ), ] ) self.assertEqual(self.image_name, cloud.get_image_name(self.image_id)) self.assertEqual( self.image_name, cloud.get_image_name(self.image_name) ) self.assert_calls() def test_get_image_by_id(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images', self.image_id], base_url_append='v2', ), json=self.fake_image_dict, ) ] ) self._compare_images( self.fake_image_dict, self.cloud.get_image_by_id(self.image_id) ) self.assert_calls() def test_get_image_id(self, cloud=None): cloud = cloud or self.cloud self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json=self.fake_search_return, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json=self.fake_search_return, ), ] ) self.assertEqual(self.image_id, cloud.get_image_id(self.image_id)) self.assertEqual(self.image_id, cloud.get_image_id(self.image_name)) self.assert_calls() def test_get_image_name_operator(self): # This should work the same as non-operator, just verifying it does. self.test_get_image_name(cloud=self.cloud) def test_get_image_id_operator(self): # This should work the same as the other test, just verifying it does. self.test_get_image_id(cloud=self.cloud) def test_empty_list_images(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json={'images': []}, ) ] ) self.assertEqual([], self.cloud.list_images()) self.assert_calls() def test_list_images(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json=self.fake_search_return, ) ] ) [ self._compare_images(a, b) for a, b in zip([self.fake_image_dict], self.cloud.list_images()) ] self.assert_calls() def test_list_images_show_all(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=['member_status=all'], ), json=self.fake_search_return, ) ] ) [ self._compare_images(a, b) for a, b in zip( [self.fake_image_dict], self.cloud.list_images(show_all=True) ) ] self.assert_calls() def test_list_images_show_all_deleted(self): deleted_image = self.fake_image_dict.copy() deleted_image['status'] = 'deleted' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=['member_status=all'], ), json={'images': [self.fake_image_dict, deleted_image]}, ) ] ) [ self._compare_images(a, b) for a, b in zip( [self.fake_image_dict], self.cloud.list_images(show_all=True) ) ] self.assert_calls() def test_list_images_no_filter_deleted(self): deleted_image = self.fake_image_dict.copy() deleted_image['status'] = 'deleted' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json={'images': [self.fake_image_dict, deleted_image]}, ) ] ) [ self._compare_images(a, b) for a, b in zip( [self.fake_image_dict], self.cloud.list_images(filter_deleted=False), ) ] self.assert_calls() def test_list_images_filter_deleted(self): deleted_image = self.fake_image_dict.copy() deleted_image['status'] = 'deleted' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json={'images': [self.fake_image_dict, deleted_image]}, ) ] ) [ self._compare_images(a, b) for a, b in zip([self.fake_image_dict], self.cloud.list_images()) ] self.assert_calls() def test_list_images_string_properties(self): image_dict = self.fake_image_dict.copy() image_dict['properties'] = 'list,of,properties' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json={'images': [image_dict]}, ), ] ) images = self.cloud.list_images() [self._compare_images(a, b) for a, b in zip([image_dict], images)] self.assertEqual( images[0]['properties']['properties'], 'list,of,properties' ) self.assert_calls() def test_list_images_paginated(self): marker = str(uuid.uuid4()) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json={ 'images': [self.fake_image_dict], 'next': '/v2/images?marker={marker}'.format( marker=marker ), }, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=[f'marker={marker}'], ), json=self.fake_search_return, ), ] ) [ self._compare_images(a, b) for a, b in zip([self.fake_image_dict], self.cloud.list_images()) ] self.assert_calls() def test_create_image_put_v2_no_import(self): self.cloud.image_api_use_tasks = False self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images', self.image_name], base_url_append='v2', ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=['name=' + self.image_name], ), validate=dict(), json={'images': []}, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=['os_hidden=True'], ), json={'images': []}, ), dict( method='POST', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json=self.fake_image_dict, validate=dict( json={ 'container_format': 'bare', 'disk_format': 'qcow2', 'name': self.image_name, 'owner_specified.openstack.md5': self.fake_image_dict[ # noqa: E501 'owner_specified.openstack.md5' ], 'owner_specified.openstack.object': self.object_name, # noqa: E501 'owner_specified.openstack.sha256': self.fake_image_dict[ # noqa: E501 'owner_specified.openstack.sha256' ], 'visibility': 'private', 'tags': ['tag1', 'tag2'], } ), ), dict( method='PUT', uri=self.get_mock_url( 'image', append=['images', self.image_id, 'file'], base_url_append='v2', ), request_headers={ 'Content-Type': 'application/octet-stream' }, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images', self.fake_image_dict['id']], base_url_append='v2', ), json=self.fake_image_dict, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), complete_qs=True, json=self.fake_search_return, ), ] ) self.cloud.create_image( self.image_name, self.imagefile.name, wait=True, timeout=1, tags=['tag1', 'tag2'], is_public=False, validate_checksum=True, ) self.assert_calls() self.assertEqual( self.adapter.request_history[7].text.read(), self.output ) def test_create_image_put_v2_import_supported(self): self.cloud.image_api_use_tasks = False self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images', self.image_name], base_url_append='v2', ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=['name=' + self.image_name], ), validate=dict(), json={'images': []}, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=['os_hidden=True'], ), json={'images': []}, ), dict( method='POST', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json=self.fake_image_dict, headers={ 'OpenStack-image-import-methods': IMPORT_METHODS, }, validate=dict( json={ 'container_format': 'bare', 'disk_format': 'qcow2', 'name': self.image_name, 'owner_specified.openstack.md5': self.fake_image_dict[ # noqa: E501 'owner_specified.openstack.md5' ], 'owner_specified.openstack.object': self.object_name, # noqa: E501 'owner_specified.openstack.sha256': self.fake_image_dict[ # noqa: E501 'owner_specified.openstack.sha256' ], 'visibility': 'private', 'tags': ['tag1', 'tag2'], } ), ), dict( method='PUT', uri=self.get_mock_url( 'image', append=['images', self.image_id, 'file'], base_url_append='v2', ), request_headers={ 'Content-Type': 'application/octet-stream' }, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images', self.fake_image_dict['id']], base_url_append='v2', ), json=self.fake_image_dict, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), complete_qs=True, json=self.fake_search_return, ), ] ) self.cloud.create_image( self.image_name, self.imagefile.name, wait=True, timeout=1, tags=['tag1', 'tag2'], is_public=False, validate_checksum=True, ) self.assert_calls() self.assertEqual( self.adapter.request_history[7].text.read(), self.output ) def test_create_image_use_import(self): self.cloud.image_api_use_tasks = False self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images', self.image_name], base_url_append='v2', ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=['name=' + self.image_name], ), validate=dict(), json={'images': []}, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=['os_hidden=True'], ), json={'images': []}, ), dict( method='POST', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json=self.fake_image_dict, headers={ 'OpenStack-image-import-methods': IMPORT_METHODS, }, validate=dict( json={ 'container_format': 'bare', 'disk_format': 'qcow2', 'name': self.image_name, 'owner_specified.openstack.md5': self.fake_image_dict[ # noqa: E501 'owner_specified.openstack.md5' ], 'owner_specified.openstack.object': self.object_name, # noqa: E501 'owner_specified.openstack.sha256': self.fake_image_dict[ # noqa: E501 'owner_specified.openstack.sha256' ], 'visibility': 'private', 'tags': ['tag1', 'tag2'], } ), ), dict( method='PUT', uri=self.get_mock_url( 'image', append=['images', self.image_id, 'stage'], base_url_append='v2', ), request_headers={ 'Content-Type': 'application/octet-stream' }, ), dict( method='POST', uri=self.get_mock_url( 'image', append=['images', self.image_id, 'import'], base_url_append='v2', ), json={'method': {'name': 'glance-direct'}}, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images', self.fake_image_dict['id']], base_url_append='v2', ), json=self.fake_image_dict, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), complete_qs=True, json=self.fake_search_return, ), ] ) self.cloud.create_image( self.image_name, self.imagefile.name, wait=True, timeout=1, tags=['tag1', 'tag2'], is_public=False, validate_checksum=True, use_import=True, ) self.assert_calls() self.assertEqual( self.adapter.request_history[7].text.read(), self.output ) def test_create_image_task(self): self.cloud.image_api_use_tasks = True endpoint = self.cloud.object_store.get_endpoint() task_id = str(uuid.uuid4()) args = dict( id=task_id, status='success', type='import', result={ 'image_id': self.image_id, }, ) image_no_checksums = self.fake_image_dict.copy() del image_no_checksums['owner_specified.openstack.md5'] del image_no_checksums['owner_specified.openstack.sha256'] del image_no_checksums['owner_specified.openstack.object'] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images', self.image_name], base_url_append='v2', ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=['name=' + self.image_name], ), validate=dict(), json={'images': []}, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=['os_hidden=True'], ), json={'images': []}, ), dict( method='HEAD', uri='{endpoint}/{container}'.format( endpoint=endpoint, container=self.container_name ), status_code=404, ), dict( method='PUT', uri='{endpoint}/{container}'.format( endpoint=endpoint, container=self.container_name ), status_code=201, headers={ 'Date': 'Fri, 16 Dec 2016 18:21:20 GMT', 'Content-Length': '0', 'Content-Type': 'text/html; charset=UTF-8', }, ), dict( method='HEAD', uri='{endpoint}/{container}'.format( endpoint=endpoint, container=self.container_name ), headers={ 'Content-Length': '0', 'X-Container-Object-Count': '0', 'Accept-Ranges': 'bytes', 'X-Storage-Policy': 'Policy-0', 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', 'X-Timestamp': '1481912480.41664', 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', 'X-Container-Bytes-Used': '0', 'Content-Type': 'text/plain; charset=utf-8', }, ), dict( method='GET', # This is explicitly not using get_mock_url because that # gets us a project-id oriented URL. uri='https://object-store.example.com/info', json=dict( swift={'max_file_size': 1000}, slo={'min_segment_size': 500}, ), ), dict( method='HEAD', uri='{endpoint}/{container}/{object}'.format( endpoint=endpoint, container=self.container_name, object=self.image_name, ), status_code=404, ), dict( method='PUT', uri='{endpoint}/{container}/{object}'.format( endpoint=endpoint, container=self.container_name, object=self.image_name, ), status_code=201, validate=dict( headers={ 'X-Object-Meta-x-sdk-md5': self.fake_image_dict[ 'owner_specified.openstack.md5' ], 'X-Object-Meta-x-sdk-sha256': self.fake_image_dict[ 'owner_specified.openstack.sha256' ], } ), ), dict( method='POST', uri=self.get_mock_url( 'image', append=['tasks'], base_url_append='v2' ), json={'id': task_id, 'status': 'processing'}, validate=dict( json=dict( type='import', input={ 'import_from': '{container}/{object}'.format( container=self.container_name, object=self.image_name, ), 'image_properties': {'name': self.image_name}, }, ) ), ), dict( method='GET', uri=self.get_mock_url( 'image', append=['tasks', task_id], base_url_append='v2', ), status_code=503, text='Random error', ), dict( method='GET', uri=self.get_mock_url( 'image', append=['tasks', task_id], base_url_append='v2', ), json=args, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images', self.image_id], base_url_append='v2', ), json=image_no_checksums, ), dict( method='PATCH', uri=self.get_mock_url( 'image', append=['images', self.image_id], base_url_append='v2', ), validate=dict( json=sorted( [ { 'op': 'add', 'value': '{container}/{object}'.format( container=self.container_name, object=self.image_name, ), 'path': '/owner_specified.openstack.object', # noqa: E501 }, { 'op': 'add', 'value': self.fake_image_dict[ 'owner_specified.openstack.md5' ], 'path': '/owner_specified.openstack.md5', }, { 'op': 'add', 'value': self.fake_image_dict[ 'owner_specified.openstack.sha256' ], 'path': '/owner_specified.openstack.sha256', # noqa: E501 }, ], key=operator.itemgetter('path'), ), headers={ 'Content-Type': 'application/openstack-images-v2.1-json-patch' # noqa: E501 }, ), json=self.fake_search_return, ), dict( method='HEAD', uri='{endpoint}/{container}/{object}'.format( endpoint=endpoint, container=self.container_name, object=self.image_name, ), headers={ 'X-Timestamp': '1429036140.50253', 'X-Trans-Id': 'txbbb825960a3243b49a36f-005a0dadaedfw1', 'Content-Length': '1290170880', 'Last-Modified': 'Tue, 14 Apr 2015 18:29:01 GMT', 'X-Object-Meta-X-Sdk-Sha256': self.fake_image_dict[ 'owner_specified.openstack.sha256' ], 'X-Object-Meta-X-Sdk-Md5': self.fake_image_dict[ 'owner_specified.openstack.md5' ], 'Date': 'Thu, 16 Nov 2017 15:24:30 GMT', 'Accept-Ranges': 'bytes', 'Content-Type': 'application/octet-stream', 'Etag': fakes.NO_MD5, }, ), dict( method='DELETE', uri='{endpoint}/{container}/{object}'.format( endpoint=endpoint, container=self.container_name, object=self.image_name, ), ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), complete_qs=True, json=self.fake_search_return, ), ] ) self.cloud.create_image( self.image_name, self.imagefile.name, wait=True, timeout=1, disk_format='vhd', container_format='ovf', is_public=False, validate_checksum=True, container=self.container_name, ) self.assert_calls() def test_delete_autocreated_no_tasks(self): self.use_keystone_v3() self.cloud.image_api_use_tasks = False deleted = self.cloud.delete_autocreated_image_objects( container=self.container_name ) self.assertFalse(deleted) self.assert_calls([]) def test_delete_image_task(self): self.cloud.image_api_use_tasks = True endpoint = self.cloud.object_store.get_endpoint() object_path = self.fake_image_dict['owner_specified.openstack.object'] image_no_checksums = self.fake_image_dict.copy() del image_no_checksums['owner_specified.openstack.md5'] del image_no_checksums['owner_specified.openstack.sha256'] del image_no_checksums['owner_specified.openstack.object'] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json=self.fake_search_return, ), dict( method='DELETE', uri='https://image.example.com/v2/images/{id}'.format( id=self.image_id ), ), dict( method='HEAD', uri='{endpoint}/{object}'.format( endpoint=endpoint, object=object_path ), headers={ 'X-Timestamp': '1429036140.50253', 'X-Trans-Id': 'txbbb825960a3243b49a36f-005a0dadaedfw1', 'Content-Length': '1290170880', 'Last-Modified': 'Tue, 14 Apr 2015 18:29:01 GMT', 'X-Object-Meta-X-Sdk-Sha256': self.fake_image_dict[ 'owner_specified.openstack.sha256' ], 'X-Object-Meta-X-Sdk-Md5': self.fake_image_dict[ 'owner_specified.openstack.md5' ], 'Date': 'Thu, 16 Nov 2017 15:24:30 GMT', 'Accept-Ranges': 'bytes', 'Content-Type': 'application/octet-stream', 'Etag': fakes.NO_MD5, }, ), dict( method='DELETE', uri='{endpoint}/{object}'.format( endpoint=endpoint, object=object_path ), ), ] ) self.cloud.delete_image(self.image_id) self.assert_calls() def test_delete_autocreated_image_objects(self): self.use_keystone_v3() self.cloud.image_api_use_tasks = True endpoint = self.cloud.object_store.get_endpoint() other_image = self.getUniqueString('no-delete') self.register_uris( [ dict( method='GET', uri=self.get_mock_url( service_type='object-store', resource=self.container_name, qs_elements=['format=json'], ), json=[ { 'content_type': 'application/octet-stream', 'bytes': 1437258240, 'hash': '249219347276c331b87bf1ac2152d9af', 'last_modified': '2015-02-16T17:50:05.289600', 'name': other_image, }, { 'content_type': 'application/octet-stream', 'bytes': 1290170880, 'hash': fakes.NO_MD5, 'last_modified': '2015-04-14T18:29:00.502530', 'name': self.image_name, }, ], ), dict( method='HEAD', uri=self.get_mock_url( service_type='object-store', resource=self.container_name, append=[other_image], ), headers={ 'X-Timestamp': '1429036140.50253', 'X-Trans-Id': 'txbbb825960a3243b49a36f-005a0dadaedfw1', 'Content-Length': '1290170880', 'Last-Modified': 'Tue, 14 Apr 2015 18:29:01 GMT', 'X-Object-Meta-X-Shade-Sha256': 'does not matter', 'X-Object-Meta-X-Shade-Md5': 'does not matter', 'Date': 'Thu, 16 Nov 2017 15:24:30 GMT', 'Accept-Ranges': 'bytes', 'Content-Type': 'application/octet-stream', 'Etag': '249219347276c331b87bf1ac2152d9af', }, ), dict( method='HEAD', uri=self.get_mock_url( service_type='object-store', resource=self.container_name, append=[self.image_name], ), headers={ 'X-Timestamp': '1429036140.50253', 'X-Trans-Id': 'txbbb825960a3243b49a36f-005a0dadaedfw1', 'Content-Length': '1290170880', 'Last-Modified': 'Tue, 14 Apr 2015 18:29:01 GMT', 'X-Object-Meta-X-Shade-Sha256': fakes.NO_SHA256, 'X-Object-Meta-X-Shade-Md5': fakes.NO_MD5, 'Date': 'Thu, 16 Nov 2017 15:24:30 GMT', 'Accept-Ranges': 'bytes', 'Content-Type': 'application/octet-stream', ( 'X-Object-Meta-' + self.cloud._OBJECT_AUTOCREATE_KEY ): 'true', 'Etag': fakes.NO_MD5, 'X-Static-Large-Object': 'false', }, ), dict( method='DELETE', uri='{endpoint}/{container}/{object}'.format( endpoint=endpoint, container=self.container_name, object=self.image_name, ), ), ] ) deleted = self.cloud.delete_autocreated_image_objects( container=self.container_name ) self.assertTrue(deleted) self.assert_calls() def _image_dict(self, fake_image): return self.cloud._normalize_image(meta.obj_to_munch(fake_image)) def _call_create_image(self, name, **kwargs): imagefile = tempfile.NamedTemporaryFile(delete=False) imagefile.write(b'\0') imagefile.close() self.cloud.create_image( name, imagefile.name, wait=True, timeout=1, is_public=False, validate_checksum=True, **kwargs, ) def test_create_image_put_v1(self): self.cloud.config.config['image_api_version'] = '1' args = { 'name': self.image_name, 'container_format': 'bare', 'disk_format': 'qcow2', 'properties': { 'owner_specified.openstack.md5': fakes.NO_MD5, 'owner_specified.openstack.sha256': fakes.NO_SHA256, 'owner_specified.openstack.object': 'images/{name}'.format( name=self.image_name ), 'is_public': False, }, } ret = args.copy() ret['id'] = self.image_id ret['status'] = 'success' self.register_uris( [ dict( method='GET', uri='https://image.example.com/v1/images/' + self.image_name, status_code=404, ), dict( method='GET', uri='https://image.example.com/v1/images/detail?name=' + self.image_name, json={'images': []}, ), dict( method='POST', uri='https://image.example.com/v1/images', json={'image': ret}, validate=dict(json=args), ), dict( method='PUT', uri='https://image.example.com/v1/images/{id}'.format( id=self.image_id ), json={'image': ret}, validate=dict( headers={ 'x-image-meta-checksum': fakes.NO_MD5, 'x-glance-registry-purge-props': 'false', } ), ), dict( method='GET', uri='https://image.example.com/v1/images/detail', json={'images': [ret]}, ), ] ) self._call_create_image(self.image_name) [ self._compare_images_v1(b, a) for a, b in zip(self.cloud.list_images(), [ret]) ] def test_create_image_put_v1_bad_delete(self): self.cloud.config.config['image_api_version'] = '1' args = { 'name': self.image_name, 'container_format': 'bare', 'disk_format': 'qcow2', 'properties': { 'owner_specified.openstack.md5': fakes.NO_MD5, 'owner_specified.openstack.sha256': fakes.NO_SHA256, 'owner_specified.openstack.object': 'images/{name}'.format( name=self.image_name ), 'is_public': False, }, 'validate_checksum': True, } ret = args.copy() ret['id'] = self.image_id ret['status'] = 'success' self.register_uris( [ dict( method='GET', uri='https://image.example.com/v1/images/' + self.image_name, status_code=404, ), dict( method='GET', uri='https://image.example.com/v1/images/detail?name=' + self.image_name, json={'images': []}, ), dict( method='POST', uri='https://image.example.com/v1/images', json={'image': ret}, validate=dict(json=args), ), dict( method='PUT', uri='https://image.example.com/v1/images/{id}'.format( id=self.image_id ), status_code=400, validate=dict( headers={ 'x-image-meta-checksum': fakes.NO_MD5, 'x-glance-registry-purge-props': 'false', } ), ), dict( method='DELETE', uri='https://image.example.com/v1/images/{id}'.format( id=self.image_id ), json={'images': [ret]}, ), ] ) self.assertRaises( exceptions.HttpException, self._call_create_image, self.image_name, ) self.assert_calls() def test_update_image_no_patch(self): self.cloud.image_api_use_tasks = False args = { 'name': self.image_name, 'container_format': 'bare', 'disk_format': 'qcow2', 'owner_specified.openstack.md5': fakes.NO_MD5, 'owner_specified.openstack.sha256': fakes.NO_SHA256, 'owner_specified.openstack.object': 'images/{name}'.format( name=self.image_name ), 'visibility': 'private', } ret = args.copy() ret['id'] = self.image_id ret['status'] = 'success' self.cloud.update_image_properties( image=image.Image.existing(**ret), **{ 'owner_specified.openstack.object': 'images/{name}'.format( name=self.image_name ) }, ) self.assert_calls() def test_create_image_put_v2_bad_delete(self): self.cloud.image_api_use_tasks = False args = { 'name': self.image_name, 'container_format': 'bare', 'disk_format': 'qcow2', 'owner_specified.openstack.md5': fakes.NO_MD5, 'owner_specified.openstack.sha256': fakes.NO_SHA256, 'owner_specified.openstack.object': 'images/{name}'.format( name=self.image_name ), 'visibility': 'private', } ret = args.copy() ret['id'] = self.image_id ret['status'] = 'success' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images', self.image_name], base_url_append='v2', ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=['name=' + self.image_name], ), validate=dict(), json={'images': []}, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=['os_hidden=True'], ), json={'images': []}, ), dict( method='POST', uri='https://image.example.com/v2/images', json=ret, validate=dict(json=args), ), dict( method='PUT', uri='https://image.example.com/v2/images/{id}/file'.format( id=self.image_id ), status_code=400, validate=dict( headers={ 'Content-Type': 'application/octet-stream', }, ), ), dict( method='DELETE', uri='https://image.example.com/v2/images/{id}'.format( id=self.image_id ), ), ] ) self.assertRaises( exceptions.HttpException, self._call_create_image, self.image_name, ) self.assert_calls() def test_create_image_put_v2_wrong_checksum_delete(self): self.cloud.image_api_use_tasks = False fake_image = self.fake_image_dict fake_image['owner_specified.openstack.md5'] = 'a' fake_image['owner_specified.openstack.sha256'] = 'b' self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json=self.fake_image_dict, validate=dict( json={ 'container_format': 'bare', 'disk_format': 'qcow2', 'name': self.image_name, 'owner_specified.openstack.md5': fake_image[ 'owner_specified.openstack.md5' ], 'owner_specified.openstack.object': self.object_name, # noqa: E501 'owner_specified.openstack.sha256': fake_image[ 'owner_specified.openstack.sha256' ], 'visibility': 'private', } ), ), dict( method='PUT', uri=self.get_mock_url( 'image', append=['images', self.image_id, 'file'], base_url_append='v2', ), request_headers={ 'Content-Type': 'application/octet-stream' }, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images', self.fake_image_dict['id']], base_url_append='v2', ), json=fake_image, ), dict( method='DELETE', uri='https://image.example.com/v2/images/{id}'.format( id=self.image_id ), ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.create_image, self.image_name, self.imagefile.name, is_public=False, md5='a', sha256='b', allow_duplicates=True, validate_checksum=True, ) self.assert_calls() def test_create_image_put_bad_int(self): self.cloud.image_api_use_tasks = False self.assertRaises( exceptions.SDKException, self._call_create_image, self.image_name, allow_duplicates=True, min_disk='fish', min_ram=0, ) self.assert_calls() def test_create_image_put_user_int(self): self.cloud.image_api_use_tasks = False args = { 'name': self.image_name, 'container_format': 'bare', 'disk_format': 'qcow2', 'owner_specified.openstack.md5': fakes.NO_MD5, 'owner_specified.openstack.sha256': fakes.NO_SHA256, 'owner_specified.openstack.object': 'images/{name}'.format( name=self.image_name ), 'int_v': '12345', 'visibility': 'private', 'min_disk': 0, 'min_ram': 0, } ret = args.copy() ret['id'] = self.image_id ret['status'] = 'success' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images', self.image_name], base_url_append='v2', ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=['name=' + self.image_name], ), validate=dict(), json={'images': []}, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=['os_hidden=True'], ), json={'images': []}, ), dict( method='POST', uri='https://image.example.com/v2/images', json=ret, validate=dict(json=args), ), dict( method='PUT', uri='https://image.example.com/v2/images/{id}/file'.format( id=self.image_id ), validate=dict( headers={ 'Content-Type': 'application/octet-stream', }, ), ), dict( method='GET', uri='https://image.example.com/v2/images/{id}'.format( id=self.image_id ), json=ret, ), dict( method='GET', uri='https://image.example.com/v2/images', complete_qs=True, json={'images': [ret]}, ), ] ) self._call_create_image( self.image_name, min_disk='0', min_ram=0, int_v=12345 ) self.assert_calls() def test_create_image_put_meta_int(self): self.cloud.image_api_use_tasks = False args = { 'name': self.image_name, 'container_format': 'bare', 'disk_format': 'qcow2', 'owner_specified.openstack.md5': fakes.NO_MD5, 'owner_specified.openstack.sha256': fakes.NO_SHA256, 'owner_specified.openstack.object': 'images/{name}'.format( name=self.image_name ), 'int_v': 12345, 'visibility': 'private', 'min_disk': 0, 'min_ram': 0, } ret = args.copy() ret['id'] = self.image_id ret['status'] = 'success' ret['checksum'] = fakes.NO_MD5 self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images', self.image_name], base_url_append='v2', ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=['name=' + self.image_name], ), validate=dict(), json={'images': []}, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=['os_hidden=True'], ), json={'images': []}, ), dict( method='POST', uri='https://image.example.com/v2/images', json=ret, validate=dict(json=args), ), dict( method='PUT', uri='https://image.example.com/v2/images/{id}/file'.format( id=self.image_id ), validate=dict( headers={ 'Content-Type': 'application/octet-stream', }, ), ), dict( method='GET', uri='https://image.example.com/v2/images/{id}'.format( id=self.image_id ), json=ret, ), dict( method='GET', uri='https://image.example.com/v2/images', complete_qs=True, json={'images': [ret]}, ), ] ) self._call_create_image( self.image_name, min_disk='0', min_ram=0, meta={'int_v': 12345} ) self.assert_calls() def test_create_image_put_protected(self): self.cloud.image_api_use_tasks = False args = { 'name': self.image_name, 'container_format': 'bare', 'disk_format': 'qcow2', 'owner_specified.openstack.md5': fakes.NO_MD5, 'owner_specified.openstack.sha256': fakes.NO_SHA256, 'owner_specified.openstack.object': 'images/{name}'.format( name=self.image_name ), 'int_v': '12345', 'protected': False, 'visibility': 'private', 'min_disk': 0, 'min_ram': 0, } ret = args.copy() ret['id'] = self.image_id ret['status'] = 'success' ret['checksum'] = fakes.NO_MD5 self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images', self.image_name], base_url_append='v2', ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=['name=' + self.image_name], ), validate=dict(), json={'images': []}, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=['os_hidden=True'], ), json={'images': []}, ), dict( method='POST', uri='https://image.example.com/v2/images', json=ret, validate=dict(json=args), ), dict( method='PUT', uri='https://image.example.com/v2/images/{id}/file'.format( id=self.image_id ), validate=dict( headers={ 'Content-Type': 'application/octet-stream', }, ), ), dict( method='GET', uri='https://image.example.com/v2/images/{id}'.format( id=self.image_id ), json=ret, ), dict( method='GET', uri='https://image.example.com/v2/images', complete_qs=True, json={'images': [ret]}, ), ] ) self._call_create_image( self.image_name, min_disk='0', min_ram=0, properties={'int_v': 12345}, is_protected=False, ) self.assert_calls() class TestImageSuburl(BaseTestImage): def setUp(self): super().setUp() self.os_fixture.use_suburl() self.os_fixture.build_tokens() self.use_keystone_v3() self.use_glance( image_version_json='image-version-suburl.json', image_discovery_url='https://example.com/image', ) def test_list_images(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json=self.fake_search_return, ) ] ) [ self._compare_images(b, a) for a, b in zip(self.cloud.list_images(), [self.fake_image_dict]) ] self.assert_calls() def test_list_images_paginated(self): marker = str(uuid.uuid4()) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json={ 'images': [self.fake_image_dict], 'next': '/v2/images?marker={marker}'.format( marker=marker ), }, ), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2', qs_elements=[f'marker={marker}'], ), json=self.fake_search_return, ), ] ) [ self._compare_images(b, a) for a, b in zip( self.cloud.list_images(), [self.fake_image_dict, self.fake_image_dict], ) ] self.assert_calls() class TestImageVolume(BaseTestImage): def setUp(self): super().setUp() self.volume_id = str(uuid.uuid4()) def test_create_image_volume(self): self.register_uris( [ self.get_cinder_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'volumev3', append=['volumes', self.volume_id, 'action'], ), json={ 'os-volume_upload_image': {'image_id': self.image_id} }, validate=dict( json={ 'os-volume_upload_image': { 'container_format': 'bare', 'disk_format': 'qcow2', 'force': False, 'image_name': 'fake_image', } } ), ), # NOTE(notmorgan): Glance discovery happens here, insert the # glance discovery mock at this point, DO NOT use the # .use_glance() method, that is intended only for use in # .setUp self.get_glance_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json=self.fake_search_return, ), ] ) self.cloud.create_image( 'fake_image', self.imagefile.name, wait=True, timeout=1, volume={'id': self.volume_id}, ) self.assert_calls() def test_create_image_volume_duplicate(self): self.register_uris( [ self.get_cinder_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'volumev3', append=['volumes', self.volume_id, 'action'], ), json={ 'os-volume_upload_image': {'image_id': self.image_id} }, validate=dict( json={ 'os-volume_upload_image': { 'container_format': 'bare', 'disk_format': 'qcow2', 'force': True, 'image_name': 'fake_image', } } ), ), # NOTE(notmorgan): Glance discovery happens here, insert the # glance discovery mock at this point, DO NOT use the # .use_glance() method, that is intended only for use in # .setUp self.get_glance_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'image', append=['images'], base_url_append='v2' ), json=self.fake_search_return, ), ] ) self.cloud.create_image( 'fake_image', self.imagefile.name, wait=True, timeout=1, volume={'id': self.volume_id}, allow_duplicates=True, ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_image_snapshot.py0000664000175000017500000001122500000000000026060 0ustar00zuulzuul00000000000000# Copyright 2016 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base class TestImageSnapshot(base.TestCase): def setUp(self): super().setUp() self.server_id = str(uuid.uuid4()) self.image_id = str(uuid.uuid4()) self.server_name = self.getUniqueString('name') self.fake_server = fakes.make_fake_server( self.server_id, self.server_name ) def test_create_image_snapshot_wait_until_active_never_active(self): snapshot_name = 'test-snapshot' fake_image = fakes.make_fake_image(self.image_id, status='pending') self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri='{endpoint}/servers/{server_id}/action'.format( endpoint=fakes.COMPUTE_ENDPOINT, server_id=self.server_id, ), headers=dict( Location='{endpoint}/images/{image_id}'.format( endpoint='https://images.example.com', image_id=self.image_id, ) ), validate=dict( json={ "createImage": { "name": snapshot_name, "metadata": {}, } } ), ), self.get_glance_discovery_mock_dict(), dict( method='GET', uri='https://image.example.com/v2/images', json=dict(images=[fake_image]), ), ] ) self.assertRaises( exceptions.ResourceTimeout, self.cloud.create_image_snapshot, snapshot_name, dict(id=self.server_id), wait=True, timeout=0.01, ) # After the fifth call, we just keep polling get images for status. # Due to mocking sleep, we have no clue how many times we'll call it. self.assert_calls(stop_after=5, do_count=False) def test_create_image_snapshot_wait_active(self): snapshot_name = 'test-snapshot' pending_image = fakes.make_fake_image(self.image_id, status='pending') fake_image = fakes.make_fake_image(self.image_id) self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri='{endpoint}/servers/{server_id}/action'.format( endpoint=fakes.COMPUTE_ENDPOINT, server_id=self.server_id, ), headers=dict( Location='{endpoint}/images/{image_id}'.format( endpoint='https://images.example.com', image_id=self.image_id, ) ), validate=dict( json={ "createImage": { "name": snapshot_name, "metadata": {}, } } ), ), self.get_glance_discovery_mock_dict(), dict( method='GET', uri='https://image.example.com/v2/images', json=dict(images=[pending_image]), ), dict( method='GET', uri='https://image.example.com/v2/images', json=dict(images=[fake_image]), ), ] ) image = self.cloud.create_image_snapshot( 'test-snapshot', dict(id=self.server_id), wait=True, timeout=2 ) self.assertEqual(image['id'], self.image_id) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_inventory.py0000664000175000017500000001305200000000000025114 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.cloud import inventory import openstack.config from openstack.tests import fakes from openstack.tests.unit import base class TestInventory(base.TestCase): def setUp(self): super().setUp() @mock.patch("openstack.config.loader.OpenStackConfig") @mock.patch("openstack.connection.Connection") def test__init(self, mock_cloud, mock_config): mock_config.return_value.get_all.return_value = [{}] inv = inventory.OpenStackInventory() mock_config.assert_called_once_with( config_files=openstack.config.loader.CONFIG_FILES ) self.assertIsInstance(inv.clouds, list) self.assertEqual(1, len(inv.clouds)) self.assertTrue(mock_config.return_value.get_all.called) @mock.patch("openstack.config.loader.OpenStackConfig") @mock.patch("openstack.connection.Connection") def test__init_one_cloud(self, mock_cloud, mock_config): mock_config.return_value.get_one.return_value = [{}] inv = inventory.OpenStackInventory(cloud='supercloud') mock_config.assert_called_once_with( config_files=openstack.config.loader.CONFIG_FILES ) self.assertIsInstance(inv.clouds, list) self.assertEqual(1, len(inv.clouds)) self.assertFalse(mock_config.return_value.get_all.called) mock_config.return_value.get_one.assert_called_once_with('supercloud') @mock.patch("openstack.config.loader.OpenStackConfig") @mock.patch("openstack.connection.Connection") def test_list_hosts(self, mock_cloud, mock_config): mock_config.return_value.get_all.return_value = [{}] inv = inventory.OpenStackInventory() server = dict(id='server_id', name='server_name') self.assertIsInstance(inv.clouds, list) self.assertEqual(1, len(inv.clouds)) inv.clouds[0].list_servers.return_value = [server] inv.clouds[0].get_openstack_vars.return_value = server ret = inv.list_hosts() inv.clouds[0].list_servers.assert_called_once_with( detailed=True, all_projects=False ) self.assertFalse(inv.clouds[0].get_openstack_vars.called) self.assertEqual([server], ret) @mock.patch("openstack.config.loader.OpenStackConfig") @mock.patch("openstack.connection.Connection") def test_list_hosts_no_detail(self, mock_cloud, mock_config): mock_config.return_value.get_all.return_value = [{}] inv = inventory.OpenStackInventory() server = self.cloud._normalize_server( fakes.make_fake_server('1234', 'test', 'ACTIVE', addresses={}) ) self.assertIsInstance(inv.clouds, list) self.assertEqual(1, len(inv.clouds)) inv.clouds[0].list_servers.return_value = [server] inv.list_hosts(expand=False) inv.clouds[0].list_servers.assert_called_once_with( detailed=False, all_projects=False ) self.assertFalse(inv.clouds[0].get_openstack_vars.called) @mock.patch("openstack.config.loader.OpenStackConfig") @mock.patch("openstack.connection.Connection") def test_list_hosts_all_projects(self, mock_cloud, mock_config): mock_config.return_value.get_all.return_value = [{}] inv = inventory.OpenStackInventory() server = dict(id='server_id', name='server_name') self.assertIsInstance(inv.clouds, list) self.assertEqual(1, len(inv.clouds)) inv.clouds[0].list_servers.return_value = [server] inv.clouds[0].get_openstack_vars.return_value = server ret = inv.list_hosts(all_projects=True) inv.clouds[0].list_servers.assert_called_once_with( detailed=True, all_projects=True ) self.assertFalse(inv.clouds[0].get_openstack_vars.called) self.assertEqual([server], ret) @mock.patch("openstack.config.loader.OpenStackConfig") @mock.patch("openstack.connection.Connection") def test_search_hosts(self, mock_cloud, mock_config): mock_config.return_value.get_all.return_value = [{}] inv = inventory.OpenStackInventory() server = dict(id='server_id', name='server_name') self.assertIsInstance(inv.clouds, list) self.assertEqual(1, len(inv.clouds)) inv.clouds[0].list_servers.return_value = [server] inv.clouds[0].get_openstack_vars.return_value = server ret = inv.search_hosts('server_id') self.assertEqual([server], ret) @mock.patch("openstack.config.loader.OpenStackConfig") @mock.patch("openstack.connection.Connection") def test_get_host(self, mock_cloud, mock_config): mock_config.return_value.get_all.return_value = [{}] inv = inventory.OpenStackInventory() server = dict(id='server_id', name='server_name') self.assertIsInstance(inv.clouds, list) self.assertEqual(1, len(inv.clouds)) inv.clouds[0].list_servers.return_value = [server] inv.clouds[0].get_openstack_vars.return_value = server ret = inv.get_host('server_id') self.assertEqual(server, ret) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_keypair.py0000664000175000017500000001416200000000000024526 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import fixtures from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base class TestKeypair(base.TestCase): def setUp(self): super().setUp() self.keyname = self.getUniqueString('key') self.key = fakes.make_fake_keypair(self.keyname) self.useFixture( fixtures.MonkeyPatch( 'openstack.utils.maximum_supported_microversion', lambda *args, **kwargs: '2.10', ) ) def test_create_keypair(self): self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['os-keypairs'] ), json={'keypair': self.key}, validate=dict( json={ 'keypair': { 'name': self.key['name'], 'public_key': self.key['public_key'], } } ), ), ] ) new_key = self.cloud.create_keypair( self.keyname, self.key['public_key'] ) new_key_cmp = new_key.to_dict(ignore_none=True) new_key_cmp.pop('location') new_key_cmp.pop('id') self.assertEqual(new_key_cmp, self.key) self.assert_calls() def test_create_keypair_exception(self): self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['os-keypairs'] ), status_code=400, validate=dict( json={ 'keypair': { 'name': self.key['name'], 'public_key': self.key['public_key'], } } ), ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.create_keypair, self.keyname, self.key['public_key'], ) self.assert_calls() def test_delete_keypair(self): self.register_uris( [ dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=['os-keypairs', self.keyname], ), status_code=202, ), ] ) self.assertTrue(self.cloud.delete_keypair(self.keyname)) self.assert_calls() def test_delete_keypair_not_found(self): self.register_uris( [ dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=['os-keypairs', self.keyname], ), status_code=404, ), ] ) self.assertFalse(self.cloud.delete_keypair(self.keyname)) self.assert_calls() def test_list_keypairs(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['os-keypairs'] ), json={'keypairs': [{'keypair': self.key}]}, ), ] ) keypairs = self.cloud.list_keypairs() self.assertEqual(len(keypairs), 1) self.assertEqual(keypairs[0].name, self.key['name']) self.assert_calls() def test_list_keypairs_empty_filters(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['os-keypairs'] ), json={'keypairs': [{'keypair': self.key}]}, ), ] ) keypairs = self.cloud.list_keypairs(filters=None) self.assertEqual(len(keypairs), 1) self.assertEqual(keypairs[0].name, self.key['name']) self.assert_calls() def test_list_keypairs_notempty_filters(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['os-keypairs'], qs_elements=['user_id=b'], ), json={'keypairs': [{'keypair': self.key}]}, ), ] ) keypairs = self.cloud.list_keypairs( filters={'user_id': 'b', 'fake': 'dummy'} ) self.assertEqual(len(keypairs), 1) self.assertEqual(keypairs[0].name, self.key['name']) self.assert_calls() def test_list_keypairs_exception(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['os-keypairs'] ), status_code=400, ), ] ) self.assertRaises(exceptions.SDKException, self.cloud.list_keypairs) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_limits.py0000664000175000017500000001022100000000000024353 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.unit import base class TestLimits(base.TestCase): def test_get_compute_limits(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['limits'] ), json={ "limits": { "absolute": { "maxImageMeta": 128, "maxPersonality": 5, "maxPersonalitySize": 10240, "maxSecurityGroupRules": 20, "maxSecurityGroups": 10, "maxServerMeta": 128, "maxTotalCores": 20, "maxTotalFloatingIps": 10, "maxTotalInstances": 10, "maxTotalKeypairs": 100, "maxTotalRAMSize": 51200, "maxServerGroups": 10, "maxServerGroupMembers": 10, "totalCoresUsed": 0, "totalInstancesUsed": 0, "totalRAMUsed": 0, "totalSecurityGroupsUsed": 0, "totalFloatingIpsUsed": 0, "totalServerGroupsUsed": 0, }, "rate": [], } }, ), ] ) self.cloud.get_compute_limits() self.assert_calls() def test_other_get_compute_limits(self): project = self.mock_for_keystone_projects( project_count=1, id_get=True )[0] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['limits'], qs_elements=[f'tenant_id={project.project_id}'], ), json={ "limits": { "absolute": { "maxImageMeta": 128, "maxPersonality": 5, "maxPersonalitySize": 10240, "maxSecurityGroupRules": 20, "maxSecurityGroups": 10, "maxServerMeta": 128, "maxTotalCores": 20, "maxTotalFloatingIps": 10, "maxTotalInstances": 10, "maxTotalKeypairs": 100, "maxTotalRAMSize": 51200, "maxServerGroups": 10, "maxServerGroupMembers": 10, "totalCoresUsed": 0, "totalInstancesUsed": 0, "totalRAMUsed": 0, "totalSecurityGroupsUsed": 0, "totalFloatingIpsUsed": 0, "totalServerGroupsUsed": 0, }, "rate": [], } }, ), ] ) self.cloud.get_compute_limits(project.project_id) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_magnum_services.py0000664000175000017500000000307600000000000026253 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.container_infrastructure_management.v1 import service from openstack.tests.unit import base magnum_service_obj = dict( binary='fake-service', created_at='2015-08-27T09:49:58-05:00', disabled_reason=None, host='fake-host', id=1, report_count=1, state='up', updated_at=None, ) class TestMagnumServices(base.TestCase): def test_list_magnum_services(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( service_type='container-infrastructure-management', resource='mservices', ), json=dict(mservices=[magnum_service_obj]), ) ] ) mservices_list = self.cloud.list_magnum_services() self.assertEqual( mservices_list[0].to_dict(computed=False), service.Service(**magnum_service_obj).to_dict(computed=False), ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_meta.py0000664000175000017500000013750400000000000024016 0ustar00zuulzuul00000000000000# Copyrigh # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from openstack.cloud import meta from openstack.compute.v2 import server as _server from openstack import connection from openstack.tests import fakes from openstack.tests.unit import base PRIVATE_V4 = '198.51.100.3' PUBLIC_V4 = '192.0.2.99' PUBLIC_V6 = '2001:0db8:face:0da0:face::0b00:1c' # rfc3849 class FakeConfig: def get_region_name(self, service_type=None): # TODO(efried): Validate service_type? return 'test-region' class FakeCloud: config = FakeConfig() name = 'test-name' private = False force_ipv4 = False service_val = True _unused = "useless" _local_ipv6 = True def get_flavor_name(self, id): return 'test-flavor-name' def get_image_name(self, id): return 'test-image-name' def get_volumes(self, server): return [] def has_service(self, service_name): return self.service_val def use_internal_network(self): return True def use_external_network(self): return True def get_internal_networks(self): return [] def get_external_networks(self): return [] def get_internal_ipv4_networks(self): return [] def get_external_ipv4_networks(self): return [] def get_internal_ipv6_networks(self): return [] def get_external_ipv6_networks(self): return [] def list_server_security_groups(self, server): return [] def get_default_network(self): return None standard_fake_server = fakes.make_fake_server( server_id='test-id-0', name='test-id-0', status='ACTIVE', addresses={ 'private': [ {'OS-EXT-IPS:type': 'fixed', 'addr': PRIVATE_V4, 'version': 4} ], 'public': [ {'OS-EXT-IPS:type': 'floating', 'addr': PUBLIC_V4, 'version': 4} ], }, flavor={'id': '101'}, image={'id': '471c2475-da2f-47ac-aba5-cb4aa3d546f5'}, ) standard_fake_server['metadata'] = {'group': 'test-group'} SUBNETS_WITH_NAT = [ { 'name': '', 'enable_dhcp': True, 'network_id': '5ef0358f-9403-4f7b-9151-376ca112abf7', 'tenant_id': '29c79f394b2946f1a0f8446d715dc301', 'dns_nameservers': [], 'ipv6_ra_mode': None, 'allocation_pools': [{'start': '10.10.10.2', 'end': '10.10.10.254'}], 'gateway_ip': '10.10.10.1', 'ipv6_address_mode': None, 'ip_version': 4, 'host_routes': [], 'cidr': '10.10.10.0/24', 'id': '14025a85-436e-4418-b0ee-f5b12a50f9b4', }, ] OSIC_NETWORKS = [ { 'admin_state_up': True, 'id': '7004a83a-13d3-4dcd-8cf5-52af1ace4cae', 'mtu': 0, 'name': 'GATEWAY_NET', 'router:external': True, 'shared': True, 'status': 'ACTIVE', 'subnets': ['cf785ee0-6cc9-4712-be3d-0bf6c86cf455'], 'tenant_id': '7a1ca9f7cc4e4b13ac0ed2957f1e8c32', }, { 'admin_state_up': True, 'id': '405abfcc-77dc-49b2-a271-139619ac9b26', 'mtu': 0, 'name': 'openstackjenkins-network1', 'router:external': False, 'shared': False, 'status': 'ACTIVE', 'subnets': ['a47910bc-f649-45db-98ec-e2421c413f4e'], 'tenant_id': '7e9c4d5842b3451d94417bd0af03a0f4', }, { 'admin_state_up': True, 'id': '54753d2c-0a58-4928-9b32-084c59dd20a6', 'mtu': 0, 'name': 'GATEWAY_NET_V6', 'router:external': True, 'shared': True, 'status': 'ACTIVE', 'subnets': [ '9c21d704-a8b9-409a-b56d-501cb518d380', '7cb0ce07-64c3-4a3d-92d3-6f11419b45b9', ], 'tenant_id': '7a1ca9f7cc4e4b13ac0ed2957f1e8c32', }, ] OSIC_SUBNETS = [ { 'allocation_pools': [ {'end': '172.99.106.254', 'start': '172.99.106.5'} ], 'cidr': '172.99.106.0/24', 'dns_nameservers': ['69.20.0.164', '69.20.0.196'], 'enable_dhcp': True, 'gateway_ip': '172.99.106.1', 'host_routes': [], 'id': 'cf785ee0-6cc9-4712-be3d-0bf6c86cf455', 'ip_version': 4, 'ipv6_address_mode': None, 'ipv6_ra_mode': None, 'name': 'GATEWAY_NET', 'network_id': '7004a83a-13d3-4dcd-8cf5-52af1ace4cae', 'subnetpool_id': None, 'tenant_id': '7a1ca9f7cc4e4b13ac0ed2957f1e8c32', }, { 'allocation_pools': [{'end': '10.0.1.254', 'start': '10.0.1.2'}], 'cidr': '10.0.1.0/24', 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], 'enable_dhcp': True, 'gateway_ip': '10.0.1.1', 'host_routes': [], 'id': 'a47910bc-f649-45db-98ec-e2421c413f4e', 'ip_version': 4, 'ipv6_address_mode': None, 'ipv6_ra_mode': None, 'name': 'openstackjenkins-subnet1', 'network_id': '405abfcc-77dc-49b2-a271-139619ac9b26', 'subnetpool_id': None, 'tenant_id': '7e9c4d5842b3451d94417bd0af03a0f4', }, { 'allocation_pools': [{'end': '10.255.255.254', 'start': '10.0.0.2'}], 'cidr': '10.0.0.0/8', 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], 'enable_dhcp': True, 'gateway_ip': '10.0.0.1', 'host_routes': [], 'id': '9c21d704-a8b9-409a-b56d-501cb518d380', 'ip_version': 4, 'ipv6_address_mode': None, 'ipv6_ra_mode': None, 'name': 'GATEWAY_SUBNET_V6V4', 'network_id': '54753d2c-0a58-4928-9b32-084c59dd20a6', 'subnetpool_id': None, 'tenant_id': '7a1ca9f7cc4e4b13ac0ed2957f1e8c32', }, { 'allocation_pools': [ { 'end': '2001:4800:1ae1:18:ffff:ffff:ffff:ffff', 'start': '2001:4800:1ae1:18::2', } ], 'cidr': '2001:4800:1ae1:18::/64', 'dns_nameservers': ['2001:4860:4860::8888'], 'enable_dhcp': True, 'gateway_ip': '2001:4800:1ae1:18::1', 'host_routes': [], 'id': '7cb0ce07-64c3-4a3d-92d3-6f11419b45b9', 'ip_version': 6, 'ipv6_address_mode': 'dhcpv6-stateless', 'ipv6_ra_mode': None, 'name': 'GATEWAY_SUBNET_V6V6', 'network_id': '54753d2c-0a58-4928-9b32-084c59dd20a6', 'subnetpool_id': None, 'tenant_id': '7a1ca9f7cc4e4b13ac0ed2957f1e8c32', }, ] class TestMeta(base.TestCase): def test_find_nova_addresses_key_name(self): # Note 198.51.100.0/24 is TEST-NET-2 from rfc5737 addrs = { 'public': [{'addr': '198.51.100.1', 'version': 4}], 'private': [{'addr': '192.0.2.5', 'version': 4}], } self.assertEqual( ['198.51.100.1'], meta.find_nova_addresses(addrs, key_name='public'), ) self.assertEqual([], meta.find_nova_addresses(addrs, key_name='foo')) def test_find_nova_addresses_ext_tag(self): addrs = { 'public': [ { 'OS-EXT-IPS:type': 'fixed', 'addr': '198.51.100.2', 'version': 4, } ] } self.assertEqual( ['198.51.100.2'], meta.find_nova_addresses(addrs, ext_tag='fixed') ) self.assertEqual([], meta.find_nova_addresses(addrs, ext_tag='foo')) def test_find_nova_addresses_key_name_and_ext_tag(self): addrs = { 'public': [ { 'OS-EXT-IPS:type': 'fixed', 'addr': '198.51.100.2', 'version': 4, } ] } self.assertEqual( ['198.51.100.2'], meta.find_nova_addresses( addrs, key_name='public', ext_tag='fixed' ), ) self.assertEqual( [], meta.find_nova_addresses(addrs, key_name='public', ext_tag='foo'), ) self.assertEqual( [], meta.find_nova_addresses(addrs, key_name='bar', ext_tag='fixed'), ) def test_find_nova_addresses_all(self): addrs = { 'public': [ { 'OS-EXT-IPS:type': 'fixed', 'addr': '198.51.100.2', 'version': 4, } ] } self.assertEqual( ['198.51.100.2'], meta.find_nova_addresses( addrs, key_name='public', ext_tag='fixed', version=4 ), ) self.assertEqual( [], meta.find_nova_addresses( addrs, key_name='public', ext_tag='fixed', version=6 ), ) def test_find_nova_addresses_floating_first(self): # Note 198.51.100.0/24 is TEST-NET-2 from rfc5737 addrs = { 'private': [ {'addr': '192.0.2.5', 'version': 4, 'OS-EXT-IPS:type': 'fixed'} ], 'public': [ { 'addr': '198.51.100.1', 'version': 4, 'OS-EXT-IPS:type': 'floating', } ], } self.assertEqual( ['198.51.100.1', '192.0.2.5'], meta.find_nova_addresses(addrs) ) def test_get_server_ip(self): srv = meta.obj_to_munch(standard_fake_server) self.assertEqual(PRIVATE_V4, meta.get_server_ip(srv, ext_tag='fixed')) self.assertEqual( PUBLIC_V4, meta.get_server_ip(srv, ext_tag='floating') ) def test_get_server_private_ip(self): self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks', json={ 'networks': [ {'id': 'test-net-id', 'name': 'test-net-name'} ] }, ), dict( method='GET', uri='https://network.example.com/v2.0/subnets', json={'subnets': SUBNETS_WITH_NAT}, ), ] ) srv = fakes.make_fake_server( server_id='test-id', name='test-name', status='ACTIVE', addresses={ 'private': [ { 'OS-EXT-IPS:type': 'fixed', 'addr': PRIVATE_V4, 'version': 4, } ], 'public': [ { 'OS-EXT-IPS:type': 'floating', 'addr': PUBLIC_V4, 'version': 4, } ], }, ) self.assertEqual( PRIVATE_V4, meta.get_server_private_ip(srv, self.cloud) ) self.assert_calls() def test_get_server_multiple_private_ip(self): self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks', json={ 'networks': [{'id': 'test-net-id', 'name': 'test-net'}] }, ), dict( method='GET', uri='https://network.example.com/v2.0/subnets', json={'subnets': SUBNETS_WITH_NAT}, ), ] ) shared_mac = '11:22:33:44:55:66' distinct_mac = '66:55:44:33:22:11' srv = fakes.make_fake_server( server_id='test-id', name='test-name', status='ACTIVE', addresses={ 'test-net': [ { 'OS-EXT-IPS:type': 'fixed', 'OS-EXT-IPS-MAC:mac_addr': distinct_mac, 'addr': '10.0.0.100', 'version': 4, }, { 'OS-EXT-IPS:type': 'fixed', 'OS-EXT-IPS-MAC:mac_addr': shared_mac, 'addr': '10.0.0.101', 'version': 4, }, ], 'public': [ { 'OS-EXT-IPS:type': 'floating', 'OS-EXT-IPS-MAC:mac_addr': shared_mac, 'addr': PUBLIC_V4, 'version': 4, } ], }, ) self.assertEqual( '10.0.0.101', meta.get_server_private_ip(srv, self.cloud) ) self.assert_calls() @mock.patch.object(connection.Connection, 'has_service') @mock.patch.object(connection.Connection, 'get_volumes') @mock.patch.object(connection.Connection, 'get_image_name') @mock.patch.object(connection.Connection, 'get_flavor_name') def test_get_server_private_ip_devstack( self, mock_get_flavor_name, mock_get_image_name, mock_get_volumes, mock_has_service, ): mock_get_image_name.return_value = 'cirros-0.3.4-x86_64-uec' mock_get_flavor_name.return_value = 'm1.tiny' mock_get_volumes.return_value = [] mock_has_service.return_value = True fake_server = fakes.make_fake_server( server_id='test-id', name='test-name', status='ACTIVE', flavor={'id': '1'}, image={ 'name': 'cirros-0.3.4-x86_64-uec', 'id': 'f93d000b-7c29-4489-b375-3641a1758fe1', }, addresses={ 'test_pnztt_net': [ { 'OS-EXT-IPS:type': 'fixed', 'addr': PRIVATE_V4, 'version': 4, 'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:ae:7d:42', } ] }, ) self.register_uris( [ dict( method='GET', uri=( 'https://network.example.com/v2.0/ports?' 'device_id=test-id' ), json={ 'ports': [ { 'id': 'test_port_id', 'mac_address': 'fa:16:3e:ae:7d:42', 'device_id': 'test-id', } ] }, ), dict( method='GET', uri=( 'https://network.example.com/v2.0/' 'floatingips?port_id=test_port_id' ), json={'floatingips': []}, ), dict( method='GET', uri='https://network.example.com/v2.0/networks', json={ 'networks': [ { 'id': 'test_pnztt_net', 'name': 'test_pnztt_net', 'router:external': False, }, {'id': 'private', 'name': 'private'}, ] }, ), dict( method='GET', uri='https://network.example.com/v2.0/subnets', json={'subnets': SUBNETS_WITH_NAT}, ), self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', fake_server['id']], ), json=fake_server, ), dict( method='GET', uri='{endpoint}/servers/test-id/os-security-groups'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_groups': []}, ), ] ) srv = self.cloud.get_openstack_vars(_server.Server(**fake_server)) self.assertEqual(PRIVATE_V4, srv['private_v4']) self.assert_calls() @mock.patch.object(connection.Connection, 'get_volumes') @mock.patch.object(connection.Connection, 'get_image_name') @mock.patch.object(connection.Connection, 'get_flavor_name') def test_get_server_private_ip_no_fip( self, mock_get_flavor_name, mock_get_image_name, mock_get_volumes ): self.cloud._floating_ip_source = None mock_get_image_name.return_value = 'cirros-0.3.4-x86_64-uec' mock_get_flavor_name.return_value = 'm1.tiny' mock_get_volumes.return_value = [] fake_server = fakes.make_fake_server( server_id='test-id', name='test-name', status='ACTIVE', flavor={'id': '1'}, image={ 'name': 'cirros-0.3.4-x86_64-uec', 'id': 'f93d000b-7c29-4489-b375-3641a1758fe1', }, addresses={ 'test_pnztt_net': [ { 'OS-EXT-IPS:type': 'fixed', 'addr': PRIVATE_V4, 'version': 4, 'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:ae:7d:42', } ] }, ) self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks', json={ 'networks': [ { 'id': 'test_pnztt_net', 'name': 'test_pnztt_net', 'router:external': False, }, {'id': 'private', 'name': 'private'}, ] }, ), dict( method='GET', uri='https://network.example.com/v2.0/subnets', json={'subnets': SUBNETS_WITH_NAT}, ), self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', fake_server['id']], ), json=fake_server, ), dict( method='GET', uri='{endpoint}/servers/test-id/os-security-groups'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_groups': []}, ), ] ) srv = self.cloud.get_openstack_vars(_server.Server(**fake_server)) self.assertEqual(PRIVATE_V4, srv['private_v4']) self.assert_calls() @mock.patch.object(connection.Connection, 'get_volumes') @mock.patch.object(connection.Connection, 'get_image_name') @mock.patch.object(connection.Connection, 'get_flavor_name') def test_get_server_cloud_no_fips( self, mock_get_flavor_name, mock_get_image_name, mock_get_volumes ): self.cloud._floating_ip_source = None mock_get_image_name.return_value = 'cirros-0.3.4-x86_64-uec' mock_get_flavor_name.return_value = 'm1.tiny' mock_get_volumes.return_value = [] fake_server = fakes.make_fake_server( server_id='test-id', name='test-name', status='ACTIVE', flavor={'id': '1'}, image={ 'name': 'cirros-0.3.4-x86_64-uec', 'id': 'f93d000b-7c29-4489-b375-3641a1758fe1', }, addresses={ 'test_pnztt_net': [ { 'addr': PRIVATE_V4, 'version': 4, } ] }, ) self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks', json={ 'networks': [ { 'id': 'test_pnztt_net', 'name': 'test_pnztt_net', 'router:external': False, }, {'id': 'private', 'name': 'private'}, ] }, ), dict( method='GET', uri='https://network.example.com/v2.0/subnets', json={'subnets': SUBNETS_WITH_NAT}, ), self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', fake_server['id']], ), json=fake_server, ), dict( method='GET', uri='{endpoint}/servers/test-id/os-security-groups'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_groups': []}, ), ] ) srv = self.cloud.get_openstack_vars(_server.Server(**fake_server)) self.assertEqual(PRIVATE_V4, srv['private_v4']) self.assert_calls() @mock.patch.object(connection.Connection, 'has_service') @mock.patch.object(connection.Connection, 'get_volumes') @mock.patch.object(connection.Connection, 'get_image_name') @mock.patch.object(connection.Connection, 'get_flavor_name') def test_get_server_cloud_missing_fips( self, mock_get_flavor_name, mock_get_image_name, mock_get_volumes, mock_has_service, ): mock_get_image_name.return_value = 'cirros-0.3.4-x86_64-uec' mock_get_flavor_name.return_value = 'm1.tiny' mock_get_volumes.return_value = [] mock_has_service.return_value = True fake_server = fakes.make_fake_server( server_id='test-id', name='test-name', status='ACTIVE', flavor={'id': '1'}, image={ 'name': 'cirros-0.3.4-x86_64-uec', 'id': 'f93d000b-7c29-4489-b375-3641a1758fe1', }, addresses={ 'test_pnztt_net': [ { 'addr': PRIVATE_V4, 'version': 4, 'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:ae:7d:42', } ] }, ) self.register_uris( [ # self.get_nova_discovery_mock_dict(), dict( method='GET', uri=( 'https://network.example.com/v2.0/ports?' 'device_id=test-id' ), json={ 'ports': [ { 'id': 'test_port_id', 'mac_address': 'fa:16:3e:ae:7d:42', 'device_id': 'test-id', } ] }, ), dict( method='GET', uri=( 'https://network.example.com/v2.0/floatingips' '?port_id=test_port_id' ), json={ 'floatingips': [ { 'id': 'floating-ip-id', 'port_id': 'test_port_id', 'fixed_ip_address': PRIVATE_V4, 'floating_ip_address': PUBLIC_V4, } ] }, ), dict( method='GET', uri='https://network.example.com/v2.0/networks', json={ 'networks': [ { 'id': 'test_pnztt_net', 'name': 'test_pnztt_net', 'router:external': False, }, { 'id': 'private', 'name': 'private', }, ] }, ), dict( method='GET', uri='https://network.example.com/v2.0/subnets', json={'subnets': SUBNETS_WITH_NAT}, ), self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', fake_server['id']], ), json=fake_server, ), dict( method='GET', uri='{endpoint}/servers/test-id/os-security-groups'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_groups': []}, ), ] ) srv = self.cloud.get_openstack_vars(_server.Server(**fake_server)) self.assertEqual(PUBLIC_V4, srv['public_v4']) self.assert_calls() @mock.patch.object(connection.Connection, 'get_volumes') @mock.patch.object(connection.Connection, 'get_image_name') @mock.patch.object(connection.Connection, 'get_flavor_name') def test_get_server_cloud_rackspace_v6( self, mock_get_flavor_name, mock_get_image_name, mock_get_volumes ): self.cloud.config.config['has_network'] = False self.cloud._floating_ip_source = None self.cloud.force_ipv4 = False self.cloud._local_ipv6 = True mock_get_image_name.return_value = 'cirros-0.3.4-x86_64-uec' mock_get_flavor_name.return_value = 'm1.tiny' mock_get_volumes.return_value = [] fake_server = fakes.make_fake_server( server_id='test-id', name='test-name', status='ACTIVE', flavor={'id': '1'}, image={ 'name': 'cirros-0.3.4-x86_64-uec', 'id': 'f93d000b-7c29-4489-b375-3641a1758fe1', }, addresses={ 'private': [{'addr': "10.223.160.141", 'version': 4}], 'public': [ {'addr': "104.130.246.91", 'version': 4}, { 'addr': "2001:4800:7819:103:be76:4eff:fe05:8525", 'version': 6, }, ], }, ) self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', fake_server['id']], ), json=fake_server, ), dict( method='GET', uri='{endpoint}/servers/test-id/os-security-groups'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_groups': []}, ), ] ) srv = self.cloud.get_openstack_vars(_server.Server(**fake_server)) self.assertEqual("10.223.160.141", srv['private_v4']) self.assertEqual("104.130.246.91", srv['public_v4']) self.assertEqual( "2001:4800:7819:103:be76:4eff:fe05:8525", srv['public_v6'] ) self.assertEqual( "2001:4800:7819:103:be76:4eff:fe05:8525", srv['interface_ip'] ) self.assert_calls() @mock.patch.object(connection.Connection, 'get_volumes') @mock.patch.object(connection.Connection, 'get_image_name') @mock.patch.object(connection.Connection, 'get_flavor_name') def test_get_server_cloud_osic_split( self, mock_get_flavor_name, mock_get_image_name, mock_get_volumes ): self.cloud._floating_ip_source = None self.cloud.force_ipv4 = False self.cloud._local_ipv6 = True self.cloud._external_ipv4_names = ['GATEWAY_NET'] self.cloud._external_ipv6_names = ['GATEWAY_NET_V6'] self.cloud._internal_ipv4_names = ['GATEWAY_NET_V6'] self.cloud._internal_ipv6_names = [] mock_get_image_name.return_value = 'cirros-0.3.4-x86_64-uec' mock_get_flavor_name.return_value = 'm1.tiny' mock_get_volumes.return_value = [] fake_server = fakes.make_fake_server( server_id='test-id', name='test-name', status='ACTIVE', flavor={'id': '1'}, image={ 'name': 'cirros-0.3.4-x86_64-uec', 'id': 'f93d000b-7c29-4489-b375-3641a1758fe1', }, addresses={ 'private': [{'addr': "10.223.160.141", 'version': 4}], 'public': [ {'addr': "104.130.246.91", 'version': 4}, { 'addr': "2001:4800:7819:103:be76:4eff:fe05:8525", 'version': 6, }, ], }, ) self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks', json={'networks': OSIC_NETWORKS}, ), dict( method='GET', uri='https://network.example.com/v2.0/subnets', json={'subnets': OSIC_SUBNETS}, ), self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', fake_server['id']], ), json=fake_server, ), dict( method='GET', uri='{endpoint}/servers/test-id/os-security-groups'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_groups': []}, ), ] ) srv = self.cloud.get_openstack_vars(_server.Server(**fake_server)) self.assertEqual("10.223.160.141", srv['private_v4']) self.assertEqual("104.130.246.91", srv['public_v4']) self.assertEqual( "2001:4800:7819:103:be76:4eff:fe05:8525", srv['public_v6'] ) self.assertEqual( "2001:4800:7819:103:be76:4eff:fe05:8525", srv['interface_ip'] ) self.assert_calls() def test_get_server_external_ipv4_neutron(self): # Testing Clouds with Neutron self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks', json={ 'networks': [ { 'id': 'test-net-id', 'name': 'test-net', 'router:external': True, } ] }, ), dict( method='GET', uri='https://network.example.com/v2.0/subnets', json={'subnets': SUBNETS_WITH_NAT}, ), ] ) srv = fakes.make_fake_server( server_id='test-id', name='test-name', status='ACTIVE', addresses={'test-net': [{'addr': PUBLIC_V4, 'version': 4}]}, ) ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) self.assertEqual(PUBLIC_V4, ip) self.assert_calls() def test_get_server_external_provider_ipv4_neutron(self): # Testing Clouds with Neutron self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks', json={ 'networks': [ { 'id': 'test-net-id', 'name': 'test-net', 'provider:network_type': 'vlan', 'provider:physical_network': 'vlan', } ] }, ), dict( method='GET', uri='https://network.example.com/v2.0/subnets', json={'subnets': SUBNETS_WITH_NAT}, ), ] ) srv = fakes.make_fake_server( server_id='test-id', name='test-name', status='ACTIVE', addresses={'test-net': [{'addr': PUBLIC_V4, 'version': 4}]}, ) ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) self.assertEqual(PUBLIC_V4, ip) self.assert_calls() def test_get_server_internal_provider_ipv4_neutron(self): # Testing Clouds with Neutron self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks', json={ 'networks': [ { 'id': 'test-net-id', 'name': 'test-net', 'router:external': False, 'provider:network_type': 'vxlan', 'provider:physical_network': None, } ] }, ), dict( method='GET', uri='https://network.example.com/v2.0/subnets', json={'subnets': SUBNETS_WITH_NAT}, ), ] ) srv = fakes.make_fake_server( server_id='test-id', name='test-name', status='ACTIVE', addresses={'test-net': [{'addr': PRIVATE_V4, 'version': 4}]}, ) self.assertIsNone( meta.get_server_external_ipv4(cloud=self.cloud, server=srv) ) int_ip = meta.get_server_private_ip(cloud=self.cloud, server=srv) self.assertEqual(PRIVATE_V4, int_ip) self.assert_calls() def test_get_server_external_none_ipv4_neutron(self): # Testing Clouds with Neutron self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks', json={ 'networks': [ { 'id': 'test-net-id', 'name': 'test-net', 'router:external': False, } ] }, ), dict( method='GET', uri='https://network.example.com/v2.0/subnets', json={'subnets': SUBNETS_WITH_NAT}, ), ] ) srv = fakes.make_fake_server( server_id='test-id', name='test-name', status='ACTIVE', addresses={'test-net': [{'addr': PUBLIC_V4, 'version': 4}]}, ) ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) self.assertIsNone(ip) self.assert_calls() def test_get_server_external_ipv4_neutron_accessIPv4(self): srv = fakes.make_fake_server( server_id='test-id', name='test-name', status='ACTIVE' ) srv['accessIPv4'] = PUBLIC_V4 ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) self.assertEqual(PUBLIC_V4, ip) def test_get_server_external_ipv4_neutron_accessIPv6(self): srv = fakes.make_fake_server( server_id='test-id', name='test-name', status='ACTIVE' ) srv['accessIPv6'] = PUBLIC_V6 ip = meta.get_server_external_ipv6(server=srv) self.assertEqual(PUBLIC_V6, ip) def test_get_server_external_ipv4_neutron_exception(self): # Testing Clouds with a non working Neutron self.register_uris( [ dict( method='GET', uri='https://network.example.com/v2.0/networks', status_code=404, ) ] ) srv = fakes.make_fake_server( server_id='test-id', name='test-name', status='ACTIVE', addresses={'public': [{'addr': PUBLIC_V4, 'version': 4}]}, ) ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) self.assertEqual(PUBLIC_V4, ip) self.assert_calls() def test_get_server_external_ipv4_nova_public(self): # Testing Clouds w/o Neutron and a network named public self.cloud.config.config['has_network'] = False srv = fakes.make_fake_server( server_id='test-id', name='test-name', status='ACTIVE', addresses={'public': [{'addr': PUBLIC_V4, 'version': 4}]}, ) ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) self.assertEqual(PUBLIC_V4, ip) def test_get_server_external_ipv4_nova_none(self): # Testing Clouds w/o Neutron or a globally routable IP self.cloud.config.config['has_network'] = False srv = fakes.make_fake_server( server_id='test-id', name='test-name', status='ACTIVE', addresses={'test-net': [{'addr': PRIVATE_V4}]}, ) ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) self.assertIsNone(ip) def test_get_server_external_ipv6(self): srv = fakes.make_fake_server( server_id='test-id', name='test-name', status='ACTIVE', addresses={ 'test-net': [ {'addr': PUBLIC_V4, 'version': 4}, {'addr': PUBLIC_V6, 'version': 6}, ] }, ) ip = meta.get_server_external_ipv6(srv) self.assertEqual(PUBLIC_V6, ip) def test_get_groups_from_server(self): server_vars = { 'flavor': 'test-flavor', 'image': 'test-image', 'az': 'test-az', } self.assertEqual( [ 'test-name', 'test-region', 'test-name_test-region', 'test-group', 'instance-test-id-0', 'meta-group_test-group', 'test-az', 'test-region_test-az', 'test-name_test-region_test-az', ], meta.get_groups_from_server( FakeCloud(), meta.obj_to_munch(standard_fake_server), server_vars, ), ) def test_obj_list_to_munch(self): """Test conversion of a list of objects to a list of dictonaries""" class obj0: value = 0 class obj1: value = 1 list = [obj0, obj1] new_list = meta.obj_list_to_munch(list) self.assertEqual(new_list[0]['value'], 0) self.assertEqual(new_list[1]['value'], 1) @mock.patch.object(FakeCloud, 'list_server_security_groups') def test_get_security_groups(self, mock_list_server_security_groups): '''This test verifies that calling get_hostvars_froms_server ultimately calls list_server_security_groups, and that the return value from list_server_security_groups ends up in server['security_groups'].''' mock_list_server_security_groups.return_value = [ {'name': 'testgroup', 'id': '1'} ] server = meta.obj_to_munch(standard_fake_server) hostvars = meta.get_hostvars_from_server(FakeCloud(), server) mock_list_server_security_groups.assert_called_once_with(server) self.assertEqual('testgroup', hostvars['security_groups'][0]['name']) @mock.patch.object(meta, 'get_server_external_ipv6') @mock.patch.object(meta, 'get_server_external_ipv4') def test_basic_hostvars( self, mock_get_server_external_ipv4, mock_get_server_external_ipv6 ): mock_get_server_external_ipv4.return_value = PUBLIC_V4 mock_get_server_external_ipv6.return_value = PUBLIC_V6 hostvars = meta.get_hostvars_from_server( FakeCloud(), self.cloud._normalize_server( meta.obj_to_munch(standard_fake_server) ), ) self.assertNotIn('links', hostvars) self.assertEqual(PRIVATE_V4, hostvars['private_v4']) self.assertEqual(PUBLIC_V4, hostvars['public_v4']) self.assertEqual(PUBLIC_V6, hostvars['public_v6']) self.assertEqual(PUBLIC_V6, hostvars['interface_ip']) self.assertEqual('RegionOne', hostvars['region']) self.assertEqual('_test_cloud_', hostvars['cloud']) self.assertIn('location', hostvars) self.assertEqual('_test_cloud_', hostvars['location']['cloud']) self.assertEqual('RegionOne', hostvars['location']['region_name']) self.assertEqual( fakes.PROJECT_ID, hostvars['location']['project']['id'] ) self.assertEqual("test-image-name", hostvars['image']['name']) self.assertEqual( standard_fake_server['image']['id'], hostvars['image']['id'] ) self.assertNotIn('links', hostvars['image']) self.assertEqual( standard_fake_server['flavor']['id'], hostvars['flavor']['id'] ) self.assertEqual("test-flavor-name", hostvars['flavor']['name']) self.assertNotIn('links', hostvars['flavor']) # test having volumes # test volume exception self.assertEqual([], hostvars['volumes']) @mock.patch.object(meta, 'get_server_external_ipv6') @mock.patch.object(meta, 'get_server_external_ipv4') def test_ipv4_hostvars( self, mock_get_server_external_ipv4, mock_get_server_external_ipv6 ): mock_get_server_external_ipv4.return_value = PUBLIC_V4 mock_get_server_external_ipv6.return_value = PUBLIC_V6 fake_cloud = FakeCloud() fake_cloud.force_ipv4 = True hostvars = meta.get_hostvars_from_server( fake_cloud, meta.obj_to_munch(standard_fake_server) ) self.assertEqual(PUBLIC_V4, hostvars['interface_ip']) self.assertEqual('', hostvars['public_v6']) @mock.patch.object(meta, 'get_server_external_ipv4') def test_private_interface_ip(self, mock_get_server_external_ipv4): mock_get_server_external_ipv4.return_value = PUBLIC_V4 cloud = FakeCloud() cloud.private = True hostvars = meta.get_hostvars_from_server( cloud, meta.obj_to_munch(standard_fake_server) ) self.assertEqual(PRIVATE_V4, hostvars['interface_ip']) @mock.patch.object(meta, 'get_server_external_ipv4') def test_image_string(self, mock_get_server_external_ipv4): mock_get_server_external_ipv4.return_value = PUBLIC_V4 server = standard_fake_server server['image'] = 'fake-image-id' hostvars = meta.get_hostvars_from_server( FakeCloud(), meta.obj_to_munch(server) ) self.assertEqual('fake-image-id', hostvars['image']['id']) def test_az(self): server = standard_fake_server server['OS-EXT-AZ:availability_zone'] = 'az1' hostvars = self.cloud._normalize_server(meta.obj_to_munch(server)) self.assertEqual('az1', hostvars['az']) def test_current_location(self): self.assertEqual( { 'cloud': '_test_cloud_', 'project': { 'id': mock.ANY, 'name': 'admin', 'domain_id': None, 'domain_name': 'default', }, 'region_name': 'RegionOne', 'zone': None, }, self.cloud.current_location, ) def test_current_project(self): self.assertEqual( { 'id': mock.ANY, 'name': 'admin', 'domain_id': None, 'domain_name': 'default', }, self.cloud.current_project, ) def test_has_volume(self): mock_cloud = mock.MagicMock() fake_volume = fakes.FakeVolume( id='volume1', status='available', name='Volume 1 Display Name', attachments=[{'device': '/dev/sda0'}], ) fake_volume_dict = meta.obj_to_munch(fake_volume) mock_cloud.get_volumes.return_value = [fake_volume_dict] hostvars = meta.get_hostvars_from_server( mock_cloud, meta.obj_to_munch(standard_fake_server) ) self.assertEqual('volume1', hostvars['volumes'][0]['id']) self.assertEqual('/dev/sda0', hostvars['volumes'][0]['device']) def test_has_no_volume_service(self): fake_cloud = FakeCloud() fake_cloud.service_val = False hostvars = meta.get_hostvars_from_server( fake_cloud, meta.obj_to_munch(standard_fake_server) ) self.assertEqual([], hostvars['volumes']) def test_unknown_volume_exception(self): mock_cloud = mock.MagicMock() class FakeException(Exception): pass def side_effect(*args): raise FakeException("No Volumes") mock_cloud.get_volumes.side_effect = side_effect self.assertRaises( FakeException, meta.get_hostvars_from_server, mock_cloud, meta.obj_to_munch(standard_fake_server), ) def test_obj_to_munch(self): cloud = FakeCloud() cloud.subcloud = FakeCloud() cloud_dict = meta.obj_to_munch(cloud) self.assertEqual(FakeCloud.name, cloud_dict['name']) self.assertNotIn('_unused', cloud_dict) self.assertNotIn('get_flavor_name', cloud_dict) self.assertNotIn('subcloud', cloud_dict) self.assertTrue(hasattr(cloud_dict, 'name')) self.assertEqual(cloud_dict.name, cloud_dict['name']) def test_obj_to_munch_subclass(self): class FakeObjDict(dict): additional = 1 obj = FakeObjDict(foo='bar') obj_dict = meta.obj_to_munch(obj) self.assertIn('additional', obj_dict) self.assertIn('foo', obj_dict) self.assertEqual(obj_dict['additional'], 1) self.assertEqual(obj_dict['foo'], 'bar') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_network.py0000664000175000017500000005706700000000000024566 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from unittest import mock import testtools from openstack import exceptions from openstack.network.v2 import network as _network from openstack.tests.unit import base class TestNeutronExtensions(base.TestCase): def test__neutron_extensions(self): body = [ { "updated": "2014-06-1T10:00:00-00:00", "name": "Distributed Virtual Router", "links": [], "alias": "dvr", "description": "Enables configuration of Distributed Virtual Routers.", # noqa: E501 }, { "updated": "2013-07-23T10:00:00-00:00", "name": "Allowed Address Pairs", "links": [], "alias": "allowed-address-pairs", "description": "Provides allowed address pairs", }, ] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json=dict(extensions=body), ) ] ) extensions = self.cloud._neutron_extensions() self.assertEqual({'dvr', 'allowed-address-pairs'}, extensions) self.assert_calls() def test__neutron_extensions_fails(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), status_code=404, ) ] ) with testtools.ExpectedException(exceptions.NotFoundException): self.cloud._neutron_extensions() self.assert_calls() def test__has_neutron_extension(self): body = [ { "updated": "2014-06-1T10:00:00-00:00", "name": "Distributed Virtual Router", "links": [], "alias": "dvr", "description": "Enables configuration of Distributed Virtual Routers.", # noqa: E501 }, { "updated": "2013-07-23T10:00:00-00:00", "name": "Allowed Address Pairs", "links": [], "alias": "allowed-address-pairs", "description": "Provides allowed address pairs", }, ] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json=dict(extensions=body), ) ] ) self.assertTrue(self.cloud._has_neutron_extension('dvr')) self.assert_calls() def test__has_neutron_extension_missing(self): body = [ { "updated": "2014-06-1T10:00:00-00:00", "name": "Distributed Virtual Router", "links": [], "alias": "dvr", "description": "Enables configuration of Distributed Virtual Routers.", # noqa: E501 }, { "updated": "2013-07-23T10:00:00-00:00", "name": "Allowed Address Pairs", "links": [], "alias": "allowed-address-pairs", "description": "Provides allowed address pairs", }, ] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json=dict(extensions=body), ) ] ) self.assertFalse(self.cloud._has_neutron_extension('invalid')) self.assert_calls() class TestNetworks(base.TestCase): mock_new_network_rep = { 'provider:physical_network': None, 'ipv6_address_scope': None, 'revision_number': 3, 'port_security_enabled': True, 'provider:network_type': 'local', 'id': '881d1bb7-a663-44c0-8f9f-ee2765b74486', 'router:external': False, 'availability_zone_hints': [], 'availability_zones': [], 'provider:segmentation_id': None, 'ipv4_address_scope': None, 'shared': False, 'project_id': '861808a93da0484ea1767967c4df8a23', 'status': 'ACTIVE', 'subnets': [], 'description': '', 'tags': [], 'updated_at': '2017-04-22T19:22:53Z', 'is_default': False, 'qos_policy_id': None, 'name': 'netname', 'admin_state_up': True, 'created_at': '2017-04-22T19:22:53Z', 'mtu': 0, 'dns_domain': 'sample.openstack.org.', 'vlan_transparent': None, 'segments': None, } network_availability_zone_extension = { "alias": "network_availability_zone", "updated": "2015-01-01T10:00:00-00:00", "description": "Availability zone support for router.", "links": [], "name": "Network Availability Zone", } enabled_neutron_extensions = [network_availability_zone_extension] def _compare_networks(self, exp, real): self.assertDictEqual( _network.Network(**exp).to_dict(computed=False), real.to_dict(computed=False), ) def test_list_networks(self): net1 = {'id': '1', 'name': 'net1'} net2 = {'id': '2', 'name': 'net2'} self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': [net1, net2]}, ) ] ) nets = self.cloud.list_networks() self.assertEqual( [ _network.Network(**i).to_dict(computed=False) for i in [net1, net2] ], [i.to_dict(computed=False) for i in nets], ) self.assert_calls() def test_list_networks_filtered(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'], qs_elements=["name=test"], ), json={'networks': []}, ) ] ) self.cloud.list_networks(filters={'name': 'test'}) self.assert_calls() def test_list_networks_neutron_not_found(self): self.use_nothing() self.cloud.has_service = mock.Mock(return_value=False) self.assertEqual([], self.cloud.list_networks()) self.assert_calls() def test_create_network(self): self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'network': self.mock_new_network_rep}, validate=dict( json={ 'network': { 'admin_state_up': True, 'name': 'netname', } } ), ) ] ) network = self.cloud.create_network("netname") self._compare_networks(self.mock_new_network_rep, network) self.assert_calls() def test_create_network_specific_tenant(self): project_id = "project_id_value" mock_new_network_rep = copy.copy(self.mock_new_network_rep) mock_new_network_rep['project_id'] = project_id self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'network': mock_new_network_rep}, validate=dict( json={ 'network': { 'admin_state_up': True, 'name': 'netname', 'project_id': project_id, } } ), ) ] ) network = self.cloud.create_network("netname", project_id=project_id) self._compare_networks(mock_new_network_rep, network) self.assert_calls() def test_create_network_external(self): mock_new_network_rep = copy.copy(self.mock_new_network_rep) mock_new_network_rep['router:external'] = True self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'network': mock_new_network_rep}, validate=dict( json={ 'network': { 'admin_state_up': True, 'name': 'netname', 'router:external': True, } } ), ) ] ) network = self.cloud.create_network("netname", external=True) self._compare_networks(mock_new_network_rep, network) self.assert_calls() def test_create_network_provider(self): provider_opts = { 'physical_network': 'mynet', 'network_type': 'vlan', 'segmentation_id': 'vlan1', } new_network_provider_opts = { 'provider:physical_network': 'mynet', 'provider:network_type': 'vlan', 'provider:segmentation_id': 'vlan1', } mock_new_network_rep = copy.copy(self.mock_new_network_rep) mock_new_network_rep.update(new_network_provider_opts) expected_send_params = {'admin_state_up': True, 'name': 'netname'} expected_send_params.update(new_network_provider_opts) self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'network': mock_new_network_rep}, validate=dict(json={'network': expected_send_params}), ) ] ) network = self.cloud.create_network("netname", provider=provider_opts) self._compare_networks(mock_new_network_rep, network) self.assert_calls() def test_update_network_provider(self): network_id = "test-net-id" network_name = "network" network = {'id': network_id, 'name': network_name} provider_opts = { 'physical_network': 'mynet', 'network_type': 'vlan', 'segmentation_id': 'vlan1', 'should_not_be_passed': 1, } update_network_provider_opts = { 'provider:physical_network': 'mynet', 'provider:network_type': 'vlan', 'provider:segmentation_id': 'vlan1', } mock_update_rep = copy.copy(self.mock_new_network_rep) mock_update_rep.update(update_network_provider_opts) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', network_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'], qs_elements=['name=%s' % network_name], ), json={'networks': [network]}, ), dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', network_id], ), json={'network': mock_update_rep}, validate=dict( json={'network': update_network_provider_opts} ), ), ] ) network = self.cloud.update_network( network_name, provider=provider_opts ) self._compare_networks(mock_update_rep, network) self.assert_calls() def test_create_network_with_availability_zone_hints(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'network': self.mock_new_network_rep}, validate=dict( json={ 'network': { 'admin_state_up': True, 'name': 'netname', 'availability_zone_hints': ['nova'], } } ), ), ] ) network = self.cloud.create_network( "netname", availability_zone_hints=['nova'] ) self._compare_networks(self.mock_new_network_rep, network) self.assert_calls() def test_create_network_provider_ignored_value(self): provider_opts = { 'physical_network': 'mynet', 'network_type': 'vlan', 'segmentation_id': 'vlan1', 'should_not_be_passed': 1, } new_network_provider_opts = { 'provider:physical_network': 'mynet', 'provider:network_type': 'vlan', 'provider:segmentation_id': 'vlan1', } mock_new_network_rep = copy.copy(self.mock_new_network_rep) mock_new_network_rep.update(new_network_provider_opts) expected_send_params = {'admin_state_up': True, 'name': 'netname'} expected_send_params.update(new_network_provider_opts) self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'network': mock_new_network_rep}, validate=dict(json={'network': expected_send_params}), ) ] ) network = self.cloud.create_network("netname", provider=provider_opts) self._compare_networks(mock_new_network_rep, network) self.assert_calls() def test_create_network_wrong_availability_zone_hints_type(self): azh_opts = "invalid" with testtools.ExpectedException( exceptions.SDKException, "Parameter 'availability_zone_hints' must be a list", ): self.cloud.create_network( "netname", availability_zone_hints=azh_opts ) def test_create_network_provider_wrong_type(self): provider_opts = "invalid" with testtools.ExpectedException( exceptions.SDKException, "Parameter 'provider' must be a dict", ): self.cloud.create_network("netname", provider=provider_opts) def test_create_network_port_security_disabled(self): port_security_state = False mock_new_network_rep = copy.copy(self.mock_new_network_rep) mock_new_network_rep['port_security_enabled'] = port_security_state self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'network': mock_new_network_rep}, validate=dict( json={ 'network': { 'admin_state_up': True, 'name': 'netname', 'port_security_enabled': port_security_state, } } ), ) ] ) network = self.cloud.create_network( "netname", port_security_enabled=port_security_state ) self._compare_networks(mock_new_network_rep, network) self.assert_calls() def test_create_network_with_mtu(self): mtu_size = 1500 mock_new_network_rep = copy.copy(self.mock_new_network_rep) mock_new_network_rep['mtu'] = mtu_size self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'network': mock_new_network_rep}, validate=dict( json={ 'network': { 'admin_state_up': True, 'name': 'netname', 'mtu': mtu_size, } } ), ) ] ) network = self.cloud.create_network("netname", mtu_size=mtu_size) self._compare_networks(mock_new_network_rep, network) self.assert_calls() def test_create_network_with_wrong_mtu_size(self): with testtools.ExpectedException( exceptions.SDKException, "Parameter 'mtu_size' must be greater than 67.", ): self.cloud.create_network("netname", mtu_size=42) def test_create_network_with_wrong_mtu_type(self): with testtools.ExpectedException( exceptions.SDKException, "Parameter 'mtu_size' must be an integer.", ): self.cloud.create_network("netname", mtu_size="fourty_two") def test_delete_network(self): network_id = "test-net-id" network_name = "network" network = {'id': network_id, 'name': network_name} self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', network_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'], qs_elements=['name=%s' % network_name], ), json={'networks': [network]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', network_id], ), json={}, ), ] ) self.assertTrue(self.cloud.delete_network(network_name)) self.assert_calls() def test_delete_network_not_found(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', 'test-net'], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'], qs_elements=['name=test-net'], ), json={'networks': []}, ), ] ) self.assertFalse(self.cloud.delete_network('test-net')) self.assert_calls() def test_delete_network_exception(self): network_id = "test-net-id" network_name = "network" network = {'id': network_id, 'name': network_name} self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', network_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'], qs_elements=['name=%s' % network_name], ), json={'networks': [network]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', network_id], ), status_code=503, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.delete_network, network_name, ) self.assert_calls() def test_get_network_by_id(self): network_id = "test-net-id" network_name = "network" network = {'id': network_id, 'name': network_name} self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', network_id], ), json={'network': network}, ) ] ) self.assertTrue(self.cloud.get_network_by_id(network_id)) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_object.py0000664000175000017500000017212400000000000024333 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import tempfile from unittest import mock import testtools from openstack.cloud import _object_store from openstack import exceptions from openstack.object_store.v1 import _proxy from openstack.object_store.v1 import container from openstack.object_store.v1 import obj from openstack.tests.unit import base from openstack import utils class BaseTestObject(base.TestCase): def setUp(self): super().setUp() self.container = self.getUniqueString() self.object = self.getUniqueString() self.endpoint = self.cloud.object_store.get_endpoint() self.container_endpoint = '{endpoint}/{container}'.format( endpoint=self.endpoint, container=self.container ) self.object_endpoint = '{endpoint}/{object}'.format( endpoint=self.container_endpoint, object=self.object ) def _compare_containers(self, exp, real): self.assertDictEqual( container.Container(**exp).to_dict(computed=False), real.to_dict(computed=False), ) def _compare_objects(self, exp, real): self.assertDictEqual( obj.Object(**exp).to_dict(computed=False), real.to_dict(computed=False), ) class TestObject(BaseTestObject): def test_create_container(self): """Test creating a (private) container""" self.register_uris( [ dict( method='HEAD', uri=self.container_endpoint, status_code=404 ), dict( method='PUT', uri=self.container_endpoint, status_code=201, headers={ 'Date': 'Fri, 16 Dec 2016 18:21:20 GMT', 'Content-Length': '0', 'Content-Type': 'text/html; charset=UTF-8', }, ), dict( method='HEAD', uri=self.container_endpoint, headers={ 'Content-Length': '0', 'X-Container-Object-Count': '0', 'Accept-Ranges': 'bytes', 'X-Storage-Policy': 'Policy-0', 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', 'X-Timestamp': '1481912480.41664', 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', 'X-Container-Bytes-Used': '0', 'Content-Type': 'text/plain; charset=utf-8', }, ), ] ) self.cloud.create_container(self.container) self.assert_calls() def test_create_container_public(self): """Test creating a public container""" self.register_uris( [ dict( method='HEAD', uri=self.container_endpoint, status_code=404 ), dict( method='PUT', uri=self.container_endpoint, status_code=201, headers={ 'Date': 'Fri, 16 Dec 2016 18:21:20 GMT', 'Content-Length': '0', 'Content-Type': 'text/html; charset=UTF-8', 'x-container-read': _object_store.OBJECT_CONTAINER_ACLS[ 'public' ], }, ), dict( method='HEAD', uri=self.container_endpoint, headers={ 'Content-Length': '0', 'X-Container-Object-Count': '0', 'Accept-Ranges': 'bytes', 'X-Storage-Policy': 'Policy-0', 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', 'X-Timestamp': '1481912480.41664', 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', 'X-Container-Bytes-Used': '0', 'Content-Type': 'text/plain; charset=utf-8', }, ), ] ) self.cloud.create_container(self.container, public=True) self.assert_calls() def test_create_container_exists(self): """Test creating a container that exists.""" self.register_uris( [ dict( method='HEAD', uri=self.container_endpoint, headers={ 'Content-Length': '0', 'X-Container-Object-Count': '0', 'Accept-Ranges': 'bytes', 'X-Storage-Policy': 'Policy-0', 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', 'X-Timestamp': '1481912480.41664', 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', 'X-Container-Bytes-Used': '0', 'Content-Type': 'text/plain; charset=utf-8', }, ) ] ) container = self.cloud.create_container(self.container) self.assert_calls() self.assertIsNotNone(container) def test_delete_container(self): self.register_uris( [dict(method='DELETE', uri=self.container_endpoint)] ) self.assertTrue(self.cloud.delete_container(self.container)) self.assert_calls() def test_delete_container_404(self): """No exception when deleting a container that does not exist""" self.register_uris( [ dict( method='DELETE', uri=self.container_endpoint, status_code=404, ) ] ) self.assertFalse(self.cloud.delete_container(self.container)) self.assert_calls() def test_delete_container_error(self): """Non-404 swift error re-raised as OSCE""" # 409 happens if the container is not empty self.register_uris( [ dict( method='DELETE', uri=self.container_endpoint, status_code=409, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.delete_container, self.container, ) self.assert_calls() def test_update_container(self): headers = { 'x-container-read': _object_store.OBJECT_CONTAINER_ACLS['public'] } self.register_uris( [ dict( method='POST', uri=self.container_endpoint, status_code=204, validate=dict(headers=headers), ) ] ) self.cloud.update_container(self.container, headers) self.assert_calls() def test_update_container_error(self): """Swift error re-raised as OSCE""" # This test is of questionable value - the swift API docs do not # declare error codes (other than 404 for the container) for this # method, and I cannot make a synthetic failure to validate a real # error code. So we're really just testing the shade adapter error # raising logic here, rather than anything specific to swift. self.register_uris( [dict(method='POST', uri=self.container_endpoint, status_code=409)] ) self.assertRaises( exceptions.SDKException, self.cloud.update_container, self.container, dict(foo='bar'), ) self.assert_calls() def test_set_container_access_public(self): self.register_uris( [ dict( method='POST', uri=self.container_endpoint, status_code=204, validate=dict( headers={ 'x-container-read': _object_store.OBJECT_CONTAINER_ACLS[ 'public' ] } ), ) ] ) self.cloud.set_container_access(self.container, 'public') self.assert_calls() def test_set_container_access_private(self): self.register_uris( [ dict( method='POST', uri=self.container_endpoint, status_code=204, validate=dict( headers={ 'x-container-read': _object_store.OBJECT_CONTAINER_ACLS[ 'private' ] } ), ) ] ) self.cloud.set_container_access(self.container, 'private') self.assert_calls() def test_set_container_access_invalid(self): self.assertRaises( exceptions.SDKException, self.cloud.set_container_access, self.container, 'invalid', ) def test_get_container_access(self): self.register_uris( [ dict( method='HEAD', uri=self.container_endpoint, headers={ 'x-container-read': str( _object_store.OBJECT_CONTAINER_ACLS['public'] ) }, ) ] ) access = self.cloud.get_container_access(self.container) self.assertEqual('public', access) def test_get_container_invalid(self): self.register_uris( [ dict( method='HEAD', uri=self.container_endpoint, headers={'x-container-read': 'invalid'}, ) ] ) with testtools.ExpectedException( exceptions.SDKException, "Could not determine container access for ACL: invalid", ): self.cloud.get_container_access(self.container) def test_get_container_access_not_found(self): self.register_uris( [dict(method='HEAD', uri=self.container_endpoint, status_code=404)] ) with testtools.ExpectedException( exceptions.SDKException, "Container not found: %s" % self.container, ): self.cloud.get_container_access(self.container) def test_list_containers(self): endpoint = f'{self.endpoint}/' containers = [{'count': 0, 'bytes': 0, 'name': self.container}] self.register_uris( [ dict( method='GET', uri=endpoint, complete_qs=True, json=containers, ) ] ) ret = self.cloud.list_containers() self.assert_calls() for a, b in zip(containers, ret): self._compare_containers(a, b) def test_list_containers_exception(self): endpoint = f'{self.endpoint}/' self.register_uris( [ dict( method='GET', uri=endpoint, complete_qs=True, status_code=416, ) ] ) self.assertRaises(exceptions.SDKException, self.cloud.list_containers) self.assert_calls() @mock.patch.object(_proxy, '_get_expiration', return_value=13345) def test_generate_form_signature_container_key(self, mock_expiration): self.register_uris( [ dict( method='HEAD', uri=self.container_endpoint, headers={ 'Content-Length': '0', 'X-Container-Object-Count': '0', 'Accept-Ranges': 'bytes', 'X-Storage-Policy': 'Policy-0', 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', 'X-Timestamp': '1481912480.41664', 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', 'X-Container-Bytes-Used': '0', 'X-Container-Meta-Temp-Url-Key': 'amazingly-secure-key', # noqa: E501 'Content-Type': 'text/plain; charset=utf-8', }, ) ] ) self.assertEqual( (13345, '60731fb66d46c97cdcb79b6154363179c500b9d9'), self.cloud.object_store.generate_form_signature( self.container, object_prefix='prefix/location', redirect_url='https://example.com/location', max_file_size=1024 * 1024 * 1024, max_upload_count=10, timeout=1000, temp_url_key=None, ), ) self.assert_calls() @mock.patch.object(_proxy, '_get_expiration', return_value=13345) def test_generate_form_signature_account_key(self, mock_expiration): self.register_uris( [ dict( method='HEAD', uri=self.container_endpoint, headers={ 'Content-Length': '0', 'X-Container-Object-Count': '0', 'Accept-Ranges': 'bytes', 'X-Storage-Policy': 'Policy-0', 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', 'X-Timestamp': '1481912480.41664', 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', 'X-Container-Bytes-Used': '0', 'Content-Type': 'text/plain; charset=utf-8', }, ), dict( method='HEAD', uri=self.endpoint + '/', headers={ 'X-Account-Meta-Temp-Url-Key': 'amazingly-secure-key' }, ), ] ) self.assertEqual( (13345, '3cb9bc83d5a4136421bb2c1f58b963740566646f'), self.cloud.object_store.generate_form_signature( self.container, object_prefix='prefix/location', redirect_url='https://example.com/location', max_file_size=1024 * 1024 * 1024, max_upload_count=10, timeout=1000, temp_url_key=None, ), ) self.assert_calls() @mock.patch.object(_proxy, '_get_expiration', return_value=13345) def test_generate_form_signature_key_argument(self, mock_expiration): self.assertEqual( (13345, '1c283a05c6628274b732212d9a885265e6f67b63'), self.cloud.object_store.generate_form_signature( self.container, object_prefix='prefix/location', redirect_url='https://example.com/location', max_file_size=1024 * 1024 * 1024, max_upload_count=10, timeout=1000, temp_url_key='amazingly-secure-key', ), ) self.assert_calls() def test_generate_form_signature_no_key(self): self.register_uris( [ dict( method='HEAD', uri=self.container_endpoint, headers={ 'Content-Length': '0', 'X-Container-Object-Count': '0', 'Accept-Ranges': 'bytes', 'X-Storage-Policy': 'Policy-0', 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', 'X-Timestamp': '1481912480.41664', 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', 'X-Container-Bytes-Used': '0', 'Content-Type': 'text/plain; charset=utf-8', }, ), dict(method='HEAD', uri=self.endpoint + '/', headers={}), ] ) self.assertRaises( exceptions.SDKException, self.cloud.object_store.generate_form_signature, self.container, object_prefix='prefix/location', redirect_url='https://example.com/location', max_file_size=1024 * 1024 * 1024, max_upload_count=10, timeout=1000, temp_url_key=None, ) self.assert_calls() def test_set_account_temp_url_key(self): key = 'super-secure-key' self.register_uris( [ dict( method='POST', uri=self.endpoint + '/', status_code=204, validate=dict( headers={'x-account-meta-temp-url-key': key} ), ), dict( method='HEAD', uri=self.endpoint + '/', headers={'x-account-meta-temp-url-key': key}, ), ] ) self.cloud.object_store.set_account_temp_url_key(key) self.assert_calls() def test_set_account_temp_url_key_secondary(self): key = 'super-secure-key' self.register_uris( [ dict( method='POST', uri=self.endpoint + '/', status_code=204, validate=dict( headers={'x-account-meta-temp-url-key-2': key} ), ), dict( method='HEAD', uri=self.endpoint + '/', headers={'x-account-meta-temp-url-key-2': key}, ), ] ) self.cloud.object_store.set_account_temp_url_key(key, secondary=True) self.assert_calls() def test_set_container_temp_url_key(self): key = 'super-secure-key' self.register_uris( [ dict( method='POST', uri=self.container_endpoint, status_code=204, validate=dict( headers={'x-container-meta-temp-url-key': key} ), ), dict( method='HEAD', uri=self.container_endpoint, headers={'x-container-meta-temp-url-key': key}, ), ] ) self.cloud.object_store.set_container_temp_url_key(self.container, key) self.assert_calls() def test_set_container_temp_url_key_secondary(self): key = 'super-secure-key' self.register_uris( [ dict( method='POST', uri=self.container_endpoint, status_code=204, validate=dict( headers={'x-container-meta-temp-url-key-2': key} ), ), dict( method='HEAD', uri=self.container_endpoint, headers={'x-container-meta-temp-url-key-2': key}, ), ] ) self.cloud.object_store.set_container_temp_url_key( self.container, key, secondary=True ) self.assert_calls() def test_list_objects(self): endpoint = '{endpoint}?format=json'.format( endpoint=self.container_endpoint ) objects = [ { 'bytes': 20304400896, 'last_modified': '2016-12-15T13:34:13.650090', 'hash': 'daaf9ed2106d09bba96cf193d866445e', 'name': self.object, 'content_type': 'application/octet-stream', } ] self.register_uris( [dict(method='GET', uri=endpoint, complete_qs=True, json=objects)] ) ret = self.cloud.list_objects(self.container) self.assert_calls() for a, b in zip(objects, ret): self._compare_objects(a, b) def test_list_objects_with_prefix(self): endpoint = '{endpoint}?format=json&prefix=test'.format( endpoint=self.container_endpoint ) objects = [ { 'bytes': 20304400896, 'last_modified': '2016-12-15T13:34:13.650090', 'hash': 'daaf9ed2106d09bba96cf193d866445e', 'name': self.object, 'content_type': 'application/octet-stream', } ] self.register_uris( [dict(method='GET', uri=endpoint, complete_qs=True, json=objects)] ) ret = self.cloud.list_objects(self.container, prefix='test') self.assert_calls() for a, b in zip(objects, ret): self._compare_objects(a, b) def test_list_objects_exception(self): endpoint = '{endpoint}?format=json'.format( endpoint=self.container_endpoint ) self.register_uris( [ dict( method='GET', uri=endpoint, complete_qs=True, status_code=416, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.list_objects, self.container, ) self.assert_calls() def test_delete_object(self): self.register_uris( [ dict( method='HEAD', uri=self.object_endpoint, headers={'X-Object-Meta': 'foo'}, ), dict( method='DELETE', uri=self.object_endpoint, status_code=204 ), ] ) self.assertTrue(self.cloud.delete_object(self.container, self.object)) self.assert_calls() def test_delete_object_not_found(self): self.register_uris( [dict(method='HEAD', uri=self.object_endpoint, status_code=404)] ) self.assertFalse(self.cloud.delete_object(self.container, self.object)) self.assert_calls() def test_get_object(self): headers = { 'Content-Length': '20304400896', 'Content-Type': 'application/octet-stream', 'Accept-Ranges': 'bytes', 'Last-Modified': 'Thu, 15 Dec 2016 13:34:14 GMT', 'Etag': '"b5c454b44fbd5344793e3fb7e3850768"', 'X-Timestamp': '1481808853.65009', 'X-Trans-Id': 'tx68c2a2278f0c469bb6de1-005857ed80dfw1', 'Date': 'Mon, 19 Dec 2016 14:24:00 GMT', 'X-Static-Large-Object': 'True', 'X-Object-Meta-Mtime': '1481513709.168512', } response_headers = {k.lower(): v for k, v in headers.items()} text = 'test body' self.register_uris( [ dict( method='GET', uri=self.object_endpoint, headers={ 'Content-Length': '20304400896', 'Content-Type': 'application/octet-stream', 'Accept-Ranges': 'bytes', 'Last-Modified': 'Thu, 15 Dec 2016 13:34:14 GMT', 'Etag': '"b5c454b44fbd5344793e3fb7e3850768"', 'X-Timestamp': '1481808853.65009', 'X-Trans-Id': 'tx68c2a2278f0c469bb6de1-005857ed80dfw1', 'Date': 'Mon, 19 Dec 2016 14:24:00 GMT', 'X-Static-Large-Object': 'True', 'X-Object-Meta-Mtime': '1481513709.168512', }, text='test body', ) ] ) resp = self.cloud.get_object(self.container, self.object) self.assert_calls() self.assertEqual((response_headers, text), resp) def test_stream_object(self): text = b'test body' self.register_uris( [ dict( method='GET', uri=self.object_endpoint, headers={ 'Content-Length': '20304400896', 'Content-Type': 'application/octet-stream', 'Accept-Ranges': 'bytes', 'Last-Modified': 'Thu, 15 Dec 2016 13:34:14 GMT', 'Etag': '"b5c454b44fbd5344793e3fb7e3850768"', 'X-Timestamp': '1481808853.65009', 'X-Trans-Id': 'tx68c2a2278f0c469bb6de1-005857ed80dfw1', 'Date': 'Mon, 19 Dec 2016 14:24:00 GMT', 'X-Static-Large-Object': 'True', 'X-Object-Meta-Mtime': '1481513709.168512', }, text='test body', ) ] ) response_text = b'' for data in self.cloud.stream_object(self.container, self.object): response_text += data self.assert_calls() self.assertEqual(text, response_text) def test_stream_object_not_found(self): self.register_uris( [ dict(method='GET', uri=self.object_endpoint, status_code=404), ] ) response_text = b'' for data in self.cloud.stream_object(self.container, self.object): response_text += data self.assert_calls() self.assertEqual(b'', response_text) def test_get_object_not_found(self): self.register_uris( [dict(method='GET', uri=self.object_endpoint, status_code=404)] ) self.assertIsNone(self.cloud.get_object(self.container, self.object)) self.assert_calls() def test_get_object_exception(self): self.register_uris( [dict(method='GET', uri=self.object_endpoint, status_code=416)] ) self.assertRaises( exceptions.SDKException, self.cloud.get_object, self.container, self.object, ) self.assert_calls() def test_get_object_segment_size_below_min(self): # Register directly becuase we make multiple calls. The number # of calls we make isn't interesting - what we do with the return # values is. Don't run assert_calls for the same reason. self.register_uris( [ dict( method='GET', uri='https://object-store.example.com/info', json=dict( swift={'max_file_size': 1000}, slo={'min_segment_size': 500}, ), headers={'Content-Type': 'application/json'}, ) ] ) self.assertEqual(500, self.cloud.get_object_segment_size(400)) self.assertEqual(900, self.cloud.get_object_segment_size(900)) self.assertEqual(1000, self.cloud.get_object_segment_size(1000)) self.assertEqual(1000, self.cloud.get_object_segment_size(1100)) def test_get_object_segment_size_http_404(self): self.register_uris( [ dict( method='GET', uri='https://object-store.example.com/info', status_code=404, reason='Not Found', ) ] ) self.assertEqual( _proxy.DEFAULT_OBJECT_SEGMENT_SIZE, self.cloud.get_object_segment_size(None), ) self.assert_calls() def test_get_object_segment_size_http_412(self): self.register_uris( [ dict( method='GET', uri='https://object-store.example.com/info', status_code=412, reason='Precondition failed', ) ] ) self.assertEqual( _proxy.DEFAULT_OBJECT_SEGMENT_SIZE, self.cloud.get_object_segment_size(None), ) self.assert_calls() def test_update_container_cors(self): headers = { 'X-Container-Meta-Web-Index': 'index.html', 'X-Container-Meta-Access-Control-Allow-Origin': '*', } self.register_uris( [ dict( method='POST', uri=self.container_endpoint, status_code=204, validate=dict(headers=headers), ) ] ) self.cloud.update_container(self.container, headers=headers) self.assert_calls() class TestObjectUploads(BaseTestObject): def setUp(self): super().setUp() self.content = self.getUniqueString().encode('latin-1') self.object_file = tempfile.NamedTemporaryFile(delete=False) self.object_file.write(self.content) self.object_file.close() self.md5, self.sha256 = utils._get_file_hashes(self.object_file.name) self.endpoint = self.cloud.object_store.get_endpoint() def test_create_object(self): self.register_uris( [ dict( method='GET', uri='https://object-store.example.com/info', json=dict( swift={'max_file_size': 1000}, slo={'min_segment_size': 500}, ), ), dict( method='HEAD', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=404, ), dict( method='PUT', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=201, validate=dict( headers={ 'x-object-meta-x-sdk-md5': self.md5, 'x-object-meta-x-sdk-sha256': self.sha256, } ), ), ] ) self.cloud.create_object( container=self.container, name=self.object, filename=self.object_file.name, ) self.assert_calls() def test_create_object_index_rax(self): self.register_uris( [ dict( method='PUT', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object='index.html', ), status_code=201, validate=dict( headers={ 'access-control-allow-origin': '*', 'content-type': 'text/html', } ), ) ] ) headers = { 'access-control-allow-origin': '*', 'content-type': 'text/html', } self.cloud.create_object( self.container, name='index.html', data='', **headers ) self.assert_calls() def test_create_directory_marker_object(self): self.register_uris( [ dict( method='PUT', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=201, validate=dict( headers={ 'content-type': 'application/directory', } ), ) ] ) self.cloud.create_directory_marker_object( container=self.container, name=self.object ) self.assert_calls() def test_create_dynamic_large_object(self): max_file_size = 2 min_file_size = 1 uris_to_mock = [ dict( method='GET', uri='https://object-store.example.com/info', json=dict( swift={'max_file_size': max_file_size}, slo={'min_segment_size': min_file_size}, ), ), dict( method='HEAD', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=404, ), ] uris_to_mock.extend( [ dict( method='PUT', uri='{endpoint}/{container}/{object}/{index:0>6}'.format( endpoint=self.endpoint, container=self.container, object=self.object, index=index, ), status_code=201, ) for index, offset in enumerate( range(0, len(self.content), max_file_size) ) ] ) uris_to_mock.append( dict( method='PUT', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=201, validate=dict( headers={ 'x-object-manifest': '{container}/{object}'.format( container=self.container, object=self.object ), 'x-object-meta-x-sdk-md5': self.md5, 'x-object-meta-x-sdk-sha256': self.sha256, } ), ) ) self.register_uris(uris_to_mock) self.cloud.create_object( container=self.container, name=self.object, filename=self.object_file.name, use_slo=False, ) # After call 3, order become indeterminate because of thread pool self.assert_calls(stop_after=3) for key, value in self.calls[-1]['headers'].items(): self.assertEqual( value, self.adapter.request_history[-1].headers[key], 'header mismatch in manifest call', ) def test_create_static_large_object(self): max_file_size = 25 min_file_size = 1 uris_to_mock = [ dict( method='GET', uri='https://object-store.example.com/info', json=dict( swift={'max_file_size': max_file_size}, slo={'min_segment_size': min_file_size}, ), ), dict( method='HEAD', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=404, ), ] uris_to_mock.extend( [ dict( method='PUT', uri='{endpoint}/{container}/{object}/{index:0>6}'.format( endpoint=self.endpoint, container=self.container, object=self.object, index=index, ), status_code=201, headers=dict(Etag=f'etag{index}'), ) for index, offset in enumerate( range(0, len(self.content), max_file_size) ) ] ) uris_to_mock.append( dict( method='PUT', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=201, validate=dict( params={'multipart-manifest', 'put'}, headers={ 'x-object-meta-x-sdk-md5': self.md5, 'x-object-meta-x-sdk-sha256': self.sha256, }, ), ) ) self.register_uris(uris_to_mock) self.cloud.create_object( container=self.container, name=self.object, filename=self.object_file.name, use_slo=True, ) # After call 3, order become indeterminate because of thread pool self.assert_calls(stop_after=3) for key, value in self.calls[-1]['headers'].items(): self.assertEqual( value, self.adapter.request_history[-1].headers[key], 'header mismatch in manifest call', ) base_object = '/{container}/{object}'.format( container=self.container, object=self.object ) self.assertEqual( [ { 'path': "{base_object}/000000".format( base_object=base_object ), 'size_bytes': 25, 'etag': 'etag0', }, { 'path': "{base_object}/000001".format( base_object=base_object ), 'size_bytes': 25, 'etag': 'etag1', }, { 'path': "{base_object}/000002".format( base_object=base_object ), 'size_bytes': 25, 'etag': 'etag2', }, { 'path': "{base_object}/000003".format( base_object=base_object ), 'size_bytes': len(self.object) - 75, 'etag': 'etag3', }, ], self.adapter.request_history[-1].json(), ) def test_slo_manifest_retry(self): """ Uploading the SLO manifest file should be retried up to 3 times before giving up. This test should succeed on the 3rd and final attempt. """ max_file_size = 25 min_file_size = 1 uris_to_mock = [ dict( method='GET', uri='https://object-store.example.com/info', json=dict( swift={'max_file_size': max_file_size}, slo={'min_segment_size': min_file_size}, ), ), dict( method='HEAD', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=404, ), ] uris_to_mock.extend( [ dict( method='PUT', uri='{endpoint}/{container}/{object}/{index:0>6}'.format( endpoint=self.endpoint, container=self.container, object=self.object, index=index, ), status_code=201, headers=dict(Etag=f'etag{index}'), ) for index, offset in enumerate( range(0, len(self.content), max_file_size) ) ] ) # manifest file upload calls uris_to_mock.extend( [ dict( method='PUT', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=400, validate=dict( params={'multipart-manifest', 'put'}, headers={ 'x-object-meta-x-sdk-md5': self.md5, 'x-object-meta-x-sdk-sha256': self.sha256, }, ), ), dict( method='PUT', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=400, validate=dict( params={'multipart-manifest', 'put'}, headers={ 'x-object-meta-x-sdk-md5': self.md5, 'x-object-meta-x-sdk-sha256': self.sha256, }, ), ), dict( method='PUT', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=201, validate=dict( params={'multipart-manifest', 'put'}, headers={ 'x-object-meta-x-sdk-md5': self.md5, 'x-object-meta-x-sdk-sha256': self.sha256, }, ), ), ] ) self.register_uris(uris_to_mock) self.cloud.create_object( container=self.container, name=self.object, filename=self.object_file.name, use_slo=True, ) # After call 3, order become indeterminate because of thread pool self.assert_calls(stop_after=3) for key, value in self.calls[-1]['headers'].items(): self.assertEqual( value, self.adapter.request_history[-1].headers[key], 'header mismatch in manifest call', ) base_object = '/{container}/{object}'.format( container=self.container, object=self.object ) self.assertEqual( [ { 'path': "{base_object}/000000".format( base_object=base_object ), 'size_bytes': 25, 'etag': 'etag0', }, { 'path': "{base_object}/000001".format( base_object=base_object ), 'size_bytes': 25, 'etag': 'etag1', }, { 'path': "{base_object}/000002".format( base_object=base_object ), 'size_bytes': 25, 'etag': 'etag2', }, { 'path': "{base_object}/000003".format( base_object=base_object ), 'size_bytes': len(self.object) - 75, 'etag': 'etag3', }, ], self.adapter.request_history[-1].json(), ) def test_slo_manifest_fail(self): """ Uploading the SLO manifest file should be retried up to 3 times before giving up. This test fails all 3 attempts and should verify that we delete uploaded segments that begin with the object prefix. """ max_file_size = 25 min_file_size = 1 uris_to_mock = [ dict( method='GET', uri='https://object-store.example.com/info', json=dict( swift={'max_file_size': max_file_size}, slo={'min_segment_size': min_file_size}, ), ), dict( method='HEAD', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=404, ), ] uris_to_mock.extend( [ dict( method='PUT', uri='{endpoint}/{container}/{object}/{index:0>6}'.format( endpoint=self.endpoint, container=self.container, object=self.object, index=index, ), status_code=201, headers=dict(Etag=f'etag{index}'), ) for index, offset in enumerate( range(0, len(self.content), max_file_size) ) ] ) # manifest file upload calls uris_to_mock.extend( [ dict( method='PUT', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=400, validate=dict( params={'multipart-manifest', 'put'}, headers={ 'x-object-meta-x-sdk-md5': self.md5, 'x-object-meta-x-sdk-sha256': self.sha256, }, ), ), dict( method='PUT', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=400, validate=dict( params={'multipart-manifest', 'put'}, headers={ 'x-object-meta-x-sdk-md5': self.md5, 'x-object-meta-x-sdk-sha256': self.sha256, }, ), ), dict( method='PUT', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=400, validate=dict( params={'multipart-manifest', 'put'}, headers={ 'x-object-meta-x-sdk-md5': self.md5, 'x-object-meta-x-sdk-sha256': self.sha256, }, ), ), ] ) # Cleaning up image upload segments involves calling the # delete_autocreated_image_objects() API method which will list # objects (LIST), get the object metadata (HEAD), then delete the # object (DELETE). uris_to_mock.extend( [ dict( method='GET', uri='{endpoint}/images?format=json&prefix={prefix}'.format( endpoint=self.endpoint, prefix=self.object ), complete_qs=True, json=[ { 'content_type': 'application/octet-stream', 'bytes': 1437258240, 'hash': '249219347276c331b87bf1ac2152d9af', 'last_modified': '2015-02-16T17:50:05.289600', 'name': self.object, } ], ), dict( method='HEAD', uri='{endpoint}/images/{object}'.format( endpoint=self.endpoint, object=self.object ), headers={ 'X-Timestamp': '1429036140.50253', 'X-Trans-Id': 'txbbb825960a3243b49a36f-005a0dadaedfw1', 'Content-Length': '1290170880', 'Last-Modified': 'Tue, 14 Apr 2015 18:29:01 GMT', 'X-Object-Meta-x-sdk-autocreated': 'true', 'X-Object-Meta-X-Shade-Sha256': 'does not matter', 'X-Object-Meta-X-Shade-Md5': 'does not matter', 'Date': 'Thu, 16 Nov 2017 15:24:30 GMT', 'Accept-Ranges': 'bytes', 'X-Static-Large-Object': 'false', 'Content-Type': 'application/octet-stream', 'Etag': '249219347276c331b87bf1ac2152d9af', }, ), dict( method='DELETE', uri='{endpoint}/images/{object}'.format( endpoint=self.endpoint, object=self.object ), ), ] ) self.register_uris(uris_to_mock) # image_api_use_tasks needs to be set to True in order for the API # method delete_autocreated_image_objects() to do the cleanup. self.cloud.image_api_use_tasks = True self.assertRaises( exceptions.SDKException, self.cloud.create_object, container=self.container, name=self.object, filename=self.object_file.name, use_slo=True, ) # After call 3, order become indeterminate because of thread pool self.assert_calls(stop_after=3) def test_object_segment_retry_failure(self): max_file_size = 25 min_file_size = 1 self.register_uris( [ dict( method='GET', uri='https://object-store.example.com/info', json=dict( swift={'max_file_size': max_file_size}, slo={'min_segment_size': min_file_size}, ), ), dict( method='HEAD', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=404, ), dict( method='PUT', uri='{endpoint}/{container}/{object}/000000'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=201, ), dict( method='PUT', uri='{endpoint}/{container}/{object}/000001'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=201, ), dict( method='PUT', uri='{endpoint}/{container}/{object}/000002'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=201, ), dict( method='PUT', uri='{endpoint}/{container}/{object}/000003'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=501, ), dict( method='PUT', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=201, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.create_object, container=self.container, name=self.object, filename=self.object_file.name, use_slo=True, ) # After call 3, order become indeterminate because of thread pool self.assert_calls(stop_after=3) def test_object_segment_retries(self): max_file_size = 25 min_file_size = 1 self.register_uris( [ dict( method='GET', uri='https://object-store.example.com/info', json=dict( swift={'max_file_size': max_file_size}, slo={'min_segment_size': min_file_size}, ), ), dict( method='HEAD', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=404, ), dict( method='PUT', uri='{endpoint}/{container}/{object}/000000'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), headers={'etag': 'etag0'}, status_code=201, ), dict( method='PUT', uri='{endpoint}/{container}/{object}/000001'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), headers={'etag': 'etag1'}, status_code=201, ), dict( method='PUT', uri='{endpoint}/{container}/{object}/000002'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), headers={'etag': 'etag2'}, status_code=201, ), dict( method='PUT', uri='{endpoint}/{container}/{object}/000003'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=501, ), dict( method='PUT', uri='{endpoint}/{container}/{object}/000003'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=201, headers={'etag': 'etag3'}, ), dict( method='PUT', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=201, validate=dict( params={'multipart-manifest', 'put'}, headers={ 'x-object-meta-x-sdk-md5': self.md5, 'x-object-meta-x-sdk-sha256': self.sha256, }, ), ), ] ) self.cloud.create_object( container=self.container, name=self.object, filename=self.object_file.name, use_slo=True, ) # After call 3, order become indeterminate because of thread pool self.assert_calls(stop_after=3) for key, value in self.calls[-1]['headers'].items(): self.assertEqual( value, self.adapter.request_history[-1].headers[key], 'header mismatch in manifest call', ) base_object = '/{container}/{object}'.format( container=self.container, object=self.object ) self.assertEqual( [ { 'path': "{base_object}/000000".format( base_object=base_object ), 'size_bytes': 25, 'etag': 'etag0', }, { 'path': "{base_object}/000001".format( base_object=base_object ), 'size_bytes': 25, 'etag': 'etag1', }, { 'path': "{base_object}/000002".format( base_object=base_object ), 'size_bytes': 25, 'etag': 'etag2', }, { 'path': "{base_object}/000003".format( base_object=base_object ), 'size_bytes': len(self.object) - 75, 'etag': 'etag3', }, ], self.adapter.request_history[-1].json(), ) def test_create_object_skip_checksum(self): self.register_uris( [ dict( method='GET', uri='https://object-store.example.com/info', json=dict( swift={'max_file_size': 1000}, slo={'min_segment_size': 500}, ), ), dict( method='HEAD', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=200, ), dict( method='PUT', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=201, validate=dict(headers={}), ), ] ) self.cloud.create_object( container=self.container, name=self.object, filename=self.object_file.name, generate_checksums=False, ) self.assert_calls() def test_create_object_data(self): self.register_uris( [ dict( method='PUT', uri='{endpoint}/{container}/{object}'.format( endpoint=self.endpoint, container=self.container, object=self.object, ), status_code=201, validate=dict( headers={}, data=self.content, ), ), ] ) self.cloud.create_object( container=self.container, name=self.object, data=self.content ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_openstackcloud.py0000664000175000017500000000757200000000000026107 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack import exceptions from openstack import proxy from openstack import resource from openstack.tests.unit import base class TestSearch(base.TestCase): class FakeResource(resource.Resource): allow_fetch = True allow_list = True foo = resource.Body("foo") def setUp(self): super().setUp() self.session = proxy.Proxy(self.cloud) self.session._sdk_connection = self.cloud self.session._get = mock.Mock() self.session._list = mock.Mock() self.session._resource_registry = dict(fake=self.FakeResource) # Set the mock into the cloud connection setattr(self.cloud, "mock_session", self.session) def test_raises_unknown_service(self): self.assertRaises( exceptions.SDKException, self.cloud.search_resources, "wrong_service.wrong_resource", "name", ) def test_raises_unknown_resource(self): self.assertRaises( exceptions.SDKException, self.cloud.search_resources, "mock_session.wrong_resource", "name", ) def test_search_resources_get_finds(self): self.session._get.return_value = self.FakeResource(foo="bar") ret = self.cloud.search_resources("mock_session.fake", "fake_name") self.session._get.assert_called_with(self.FakeResource, "fake_name") self.assertEqual(1, len(ret)) self.assertEqual( self.FakeResource(foo="bar").to_dict(), ret[0].to_dict() ) def test_search_resources_list(self): self.session._get.side_effect = exceptions.NotFoundException self.session._list.return_value = [self.FakeResource(foo="bar")] ret = self.cloud.search_resources("mock_session.fake", "fake_name") self.session._get.assert_called_with(self.FakeResource, "fake_name") self.session._list.assert_called_with( self.FakeResource, name="fake_name" ) self.assertEqual(1, len(ret)) self.assertEqual( self.FakeResource(foo="bar").to_dict(), ret[0].to_dict() ) def test_search_resources_args(self): self.session._get.side_effect = exceptions.NotFoundException self.session._list.return_value = [] self.cloud.search_resources( "mock_session.fake", "fake_name", get_args=["getarg1"], get_kwargs={"getkwarg1": "1"}, list_args=["listarg1"], list_kwargs={"listkwarg1": "1"}, filter1="foo", ) self.session._get.assert_called_with( self.FakeResource, "fake_name", "getarg1", getkwarg1="1" ) self.session._list.assert_called_with( self.FakeResource, "listarg1", listkwarg1="1", name="fake_name", filter1="foo", ) def test_search_resources_name_empty(self): self.session._list.return_value = [self.FakeResource(foo="bar")] ret = self.cloud.search_resources("mock_session.fake", None, foo="bar") self.session._get.assert_not_called() self.session._list.assert_called_with(self.FakeResource, foo="bar") self.assertEqual(1, len(ret)) self.assertEqual( self.FakeResource(foo="bar").to_dict(), ret[0].to_dict() ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_operator.py0000664000175000017500000001536600000000000024724 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid import testtools from openstack.config import cloud_region from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base class TestOperatorCloud(base.TestCase): def test_get_image_name(self): self.use_glance() image_id = self.getUniqueString() fake_image = fakes.make_fake_image(image_id=image_id) list_return = {'images': [fake_image]} self.register_uris( [ dict( method='GET', uri='https://image.example.com/v2/images', json=list_return, ), dict( method='GET', uri='https://image.example.com/v2/images', json=list_return, ), ] ) self.assertEqual('fake_image', self.cloud.get_image_name(image_id)) self.assertEqual('fake_image', self.cloud.get_image_name('fake_image')) self.assert_calls() def test_get_image_id(self): self.use_glance() image_id = self.getUniqueString() fake_image = fakes.make_fake_image(image_id=image_id) list_return = {'images': [fake_image]} self.register_uris( [ dict( method='GET', uri='https://image.example.com/v2/images', json=list_return, ), dict( method='GET', uri='https://image.example.com/v2/images', json=list_return, ), ] ) self.assertEqual(image_id, self.cloud.get_image_id(image_id)) self.assertEqual(image_id, self.cloud.get_image_id('fake_image')) self.assert_calls() @mock.patch.object(cloud_region.CloudRegion, 'get_session') def test_get_session_endpoint_exception(self, get_session_mock): class FakeException(Exception): pass def side_effect(*args, **kwargs): raise FakeException("No service") session_mock = mock.Mock() session_mock.get_endpoint.side_effect = side_effect get_session_mock.return_value = session_mock self.cloud.name = 'testcloud' self.cloud.config.config['region_name'] = 'testregion' with testtools.ExpectedException( exceptions.SDKException, "Error getting image endpoint on testcloud:testregion:" " No service", ): self.cloud.get_session_endpoint("image") @mock.patch.object(cloud_region.CloudRegion, 'get_session') def test_get_session_endpoint_unavailable(self, get_session_mock): session_mock = mock.Mock() session_mock.get_endpoint.return_value = None get_session_mock.return_value = session_mock image_endpoint = self.cloud.get_session_endpoint("image") self.assertIsNone(image_endpoint) @mock.patch.object(cloud_region.CloudRegion, 'get_session') def test_get_session_endpoint_identity(self, get_session_mock): session_mock = mock.Mock() get_session_mock.return_value = session_mock self.cloud.get_session_endpoint('identity') kwargs = dict( interface='public', region_name='RegionOne', service_name=None, service_type='identity', ) session_mock.get_endpoint.assert_called_with(**kwargs) @mock.patch.object(cloud_region.CloudRegion, 'get_session') def test_has_service_no(self, get_session_mock): session_mock = mock.Mock() session_mock.get_endpoint.return_value = None get_session_mock.return_value = session_mock self.assertFalse(self.cloud.has_service("image")) @mock.patch.object(cloud_region.CloudRegion, 'get_session') def test_has_service_yes(self, get_session_mock): session_mock = mock.Mock() session_mock.get_endpoint.return_value = 'http://fake.url' get_session_mock.return_value = session_mock self.assertTrue(self.cloud.has_service("image")) def test_list_hypervisors(self): '''This test verifies that calling list_hypervisors results in a call to nova client.''' uuid1 = uuid.uuid4().hex uuid2 = uuid.uuid4().hex self.use_compute_discovery() self.register_uris( [ dict( method='GET', uri='https://compute.example.com/v2.1/os-hypervisors/detail', # noqa: E501 json={ 'hypervisors': [ fakes.make_fake_hypervisor(uuid1, 'testserver1'), fakes.make_fake_hypervisor(uuid2, 'testserver2'), ] }, validate={ 'headers': {'OpenStack-API-Version': 'compute 2.53'} }, ), ] ) r = self.cloud.list_hypervisors() self.assertEqual(2, len(r)) self.assertEqual('testserver1', r[0]['name']) self.assertEqual(uuid1, r[0]['id']) self.assertEqual('testserver2', r[1]['name']) self.assertEqual(uuid2, r[1]['id']) self.assert_calls() def test_list_old_hypervisors(self): '''This test verifies that calling list_hypervisors on a pre-2.53 cloud calls the old version.''' self.use_compute_discovery( compute_version_json='old-compute-version.json' ) self.register_uris( [ dict( method='GET', uri='https://compute.example.com/v2.1/os-hypervisors/detail', # noqa: E501 json={ 'hypervisors': [ fakes.make_fake_hypervisor('1', 'testserver1'), fakes.make_fake_hypervisor('2', 'testserver2'), ] }, ), ] ) r = self.cloud.list_hypervisors() self.assertEqual(2, len(r)) self.assertEqual('testserver1', r[0]['name']) self.assertEqual('testserver2', r[1]['name']) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_operator_noauth.py0000664000175000017500000002256100000000000026275 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import openstack.cloud from openstack.tests.unit import base class TestOpenStackCloudOperatorNoAuth(base.TestCase): def setUp(self): """Setup Noauth OpenStackCloud tests Setup the test to utilize no authentication and an endpoint URL in the auth data. This is permits testing of the basic mechanism that enables Ironic noauth mode to be utilized with Shade. Uses base.TestCase instead of IronicTestCase because we need to do completely different things with discovery. """ super().setUp() # By clearing the URI registry, we remove all calls to a keystone # catalog or getting a token self._uri_registry.clear() self.register_uris( [ dict( method='GET', uri=self.get_mock_url( service_type='baremetal', base_url_append='v1' ), json={ 'id': 'v1', 'links': [ { "href": "https://baremetal.example.com/v1", "rel": "self", } ], }, ), dict( method='GET', uri=self.get_mock_url( service_type='baremetal', base_url_append='v1', resource='nodes', ), json={'nodes': []}, ), ] ) def test_ironic_noauth_none_auth_type(self): """Test noauth selection for Ironic in OpenStackCloud The new way of doing this is with the keystoneauth none plugin. """ # NOTE(TheJulia): When we are using the python-ironicclient # library, the library will automatically prepend the URI path # with 'v1'. As such, since we are overriding the endpoint, # we must explicitly do the same as we move away from the # client library. self.cloud_noauth = openstack.connect( auth_type='none', baremetal_endpoint_override="https://baremetal.example.com/v1", ) self.cloud_noauth.list_machines() self.assert_calls() def test_ironic_noauth_auth_endpoint(self): """Test noauth selection for Ironic in OpenStackCloud Sometimes people also write clouds.yaml files that look like this: :: clouds: bifrost: auth_type: "none" endpoint: https://baremetal.example.com """ self.cloud_noauth = openstack.connect( auth_type='none', endpoint='https://baremetal.example.com/v1', ) self.cloud_noauth.list_machines() self.assert_calls() def test_ironic_noauth_admin_token_auth_type(self): """Test noauth selection for Ironic in OpenStackCloud The old way of doing this was to abuse admin_token. """ self.cloud_noauth = openstack.connect( auth_type='admin_token', auth=dict( endpoint='https://baremetal.example.com/v1', token='ignored' ), ) self.cloud_noauth.list_machines() self.assert_calls() class TestOpenStackCloudOperatorNoAuthUnversioned(base.TestCase): def setUp(self): """Setup Noauth OpenStackCloud tests for unversioned endpoints Setup the test to utilize no authentication and an endpoint URL in the auth data. This is permits testing of the basic mechanism that enables Ironic noauth mode to be utilized with Shade. Uses base.TestCase instead of IronicTestCase because we need to do completely different things with discovery. """ super().setUp() # By clearing the URI registry, we remove all calls to a keystone # catalog or getting a token self._uri_registry.clear() self.register_uris( [ dict( method='GET', uri='https://baremetal.example.com/', json={ "default_version": { "status": "CURRENT", "min_version": "1.1", "version": "1.46", "id": "v1", "links": [ { "href": "https://baremetal.example.com/v1", "rel": "self", } ], }, "versions": [ { "status": "CURRENT", "min_version": "1.1", "version": "1.46", "id": "v1", "links": [ { "href": "https://baremetal.example.com/v1", # noqa: E501 "rel": "self", } ], } ], "name": "OpenStack Ironic API", "description": "Ironic is an OpenStack project.", }, ), dict( method='GET', uri=self.get_mock_url( service_type='baremetal', base_url_append='v1' ), json={ "media_types": [ { "base": "application/json", "type": "application/vnd.openstack.ironic.v1+json", # noqa: E501 } ], "links": [ { "href": "https://baremetal.example.com/v1", "rel": "self", } ], "ports": [ { "href": "https://baremetal.example.com/v1/ports/", # noqa: E501 "rel": "self", }, { "href": "https://baremetal.example.com/ports/", "rel": "bookmark", }, ], "nodes": [ { "href": "https://baremetal.example.com/v1/nodes/", # noqa: E501 "rel": "self", }, { "href": "https://baremetal.example.com/nodes/", "rel": "bookmark", }, ], "id": "v1", }, ), dict( method='GET', uri=self.get_mock_url( service_type='baremetal', base_url_append='v1', resource='nodes', ), json={'nodes': []}, ), ] ) def test_ironic_noauth_none_auth_type(self): """Test noauth selection for Ironic in OpenStackCloud The new way of doing this is with the keystoneauth none plugin. """ # NOTE(TheJulia): When we are using the python-ironicclient # library, the library will automatically prepend the URI path # with 'v1'. As such, since we are overriding the endpoint, # we must explicitly do the same as we move away from the # client library. self.cloud_noauth = openstack.connect( auth_type='none', baremetal_endpoint_override="https://baremetal.example.com", ) self.cloud_noauth.list_machines() self.assert_calls() def test_ironic_noauth_auth_endpoint(self): """Test noauth selection for Ironic in OpenStackCloud Sometimes people also write clouds.yaml files that look like this: :: clouds: bifrost: auth_type: "none" endpoint: https://baremetal.example.com """ self.cloud_noauth = openstack.connect( auth_type='none', endpoint='https://baremetal.example.com/', ) self.cloud_noauth.list_machines() self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_port.py0000664000175000017500000004547600000000000024062 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test_port ---------------------------------- Test port resource (managed by neutron) """ from openstack import exceptions from openstack.network.v2 import port as _port from openstack.tests.unit import base class TestPort(base.TestCase): mock_neutron_port_create_rep = { 'port': { 'status': 'DOWN', 'binding:host_id': '', 'name': 'test-port-name', 'allowed_address_pairs': [], 'admin_state_up': True, 'network_id': 'test-net-id', 'tenant_id': 'test-tenant-id', 'binding:vif_details': {}, 'binding:vnic_type': 'normal', 'binding:vif_type': 'unbound', 'extra_dhcp_opts': [], 'device_owner': '', 'mac_address': '50:1c:0d:e4:f0:0d', 'binding:profile': {}, 'fixed_ips': [ {'subnet_id': 'test-subnet-id', 'ip_address': '29.29.29.29'} ], 'id': 'test-port-id', 'security_groups': [], 'device_id': '', } } mock_neutron_port_update_rep = { 'port': { 'status': 'DOWN', 'binding:host_id': '', 'name': 'test-port-name-updated', 'allowed_address_pairs': [], 'admin_state_up': True, 'network_id': 'test-net-id', 'tenant_id': 'test-tenant-id', 'binding:vif_details': {}, 'extra_dhcp_opts': [], 'binding:vnic_type': 'normal', 'binding:vif_type': 'unbound', 'device_owner': '', 'mac_address': '50:1c:0d:e4:f0:0d', 'binding:profile': {}, 'fixed_ips': [ {'subnet_id': 'test-subnet-id', 'ip_address': '29.29.29.29'} ], 'id': 'test-port-id', 'security_groups': [], 'device_id': '', } } mock_neutron_port_list_rep = { 'ports': [ { 'status': 'ACTIVE', 'binding:host_id': 'devstack', 'name': 'first-port', 'allowed_address_pairs': [], 'admin_state_up': True, 'network_id': '70c1db1f-b701-45bd-96e0-a313ee3430b3', 'tenant_id': '', 'extra_dhcp_opts': [], 'binding:vif_details': { 'port_filter': True, 'ovs_hybrid_plug': True, }, 'binding:vif_type': 'ovs', 'device_owner': 'network:router_gateway', 'mac_address': 'fa:16:3e:58:42:ed', 'binding:profile': {}, 'binding:vnic_type': 'normal', 'fixed_ips': [ { 'subnet_id': '008ba151-0b8c-4a67-98b5-0d2b87666062', 'ip_address': '172.24.4.2', } ], 'id': 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b', 'security_groups': [], 'device_id': '9ae135f4-b6e0-4dad-9e91-3c223e385824', }, { 'status': 'ACTIVE', 'binding:host_id': 'devstack', 'name': '', 'allowed_address_pairs': [], 'admin_state_up': True, 'network_id': 'f27aa545-cbdd-4907-b0c6-c9e8b039dcc2', 'tenant_id': 'd397de8a63f341818f198abb0966f6f3', 'extra_dhcp_opts': [], 'binding:vif_details': { 'port_filter': True, 'ovs_hybrid_plug': True, }, 'binding:vif_type': 'ovs', 'device_owner': 'network:router_interface', 'mac_address': 'fa:16:3e:bb:3c:e4', 'binding:profile': {}, 'binding:vnic_type': 'normal', 'fixed_ips': [ { 'subnet_id': '288bf4a1-51ba-43b6-9d0a-520e9005db17', 'ip_address': '10.0.0.1', } ], 'id': 'f71a6703-d6de-4be1-a91a-a570ede1d159', 'security_groups': [], 'device_id': '9ae135f4-b6e0-4dad-9e91-3c223e385824', }, ] } def _compare_ports(self, exp, real): self.assertDictEqual( _port.Port(**exp).to_dict(computed=False), real.to_dict(computed=False), ) def test_create_port(self): self.register_uris( [ dict( method="POST", uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'] ), json=self.mock_neutron_port_create_rep, validate=dict( json={ 'port': { 'network_id': 'test-net-id', 'name': 'test-port-name', 'admin_state_up': True, } } ), ) ] ) port = self.cloud.create_port( network_id='test-net-id', name='test-port-name', admin_state_up=True, ) self._compare_ports(self.mock_neutron_port_create_rep['port'], port) self.assert_calls() def test_create_port_parameters(self): """Test that we detect invalid arguments passed to create_port""" self.assertRaises( TypeError, self.cloud.create_port, network_id='test-net-id', nome='test-port-name', stato_amministrativo_porta=True, ) def test_create_port_exception(self): self.register_uris( [ dict( method="POST", uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'] ), status_code=500, validate=dict( json={ 'port': { 'network_id': 'test-net-id', 'name': 'test-port-name', 'admin_state_up': True, } } ), ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.create_port, network_id='test-net-id', name='test-port-name', admin_state_up=True, ) self.assert_calls() def test_create_port_with_project(self): self.mock_neutron_port_create_rep["port"].update( { 'project_id': 'test-project-id', } ) self.register_uris( [ dict( method="POST", uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'] ), json=self.mock_neutron_port_create_rep, validate=dict( json={ 'port': { 'network_id': 'test-net-id', 'project_id': 'test-project-id', 'name': 'test-port-name', 'admin_state_up': True, } } ), ) ] ) port = self.cloud.create_port( network_id='test-net-id', name='test-port-name', admin_state_up=True, project_id='test-project-id', ) self._compare_ports(self.mock_neutron_port_create_rep['port'], port) self.assert_calls() def test_update_port(self): port_id = 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports', port_id] ), json=dict( port=self.mock_neutron_port_list_rep['ports'][0] ), ), dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports', port_id] ), json=self.mock_neutron_port_update_rep, validate=dict( json={'port': {'name': 'test-port-name-updated'}} ), ), ] ) port = self.cloud.update_port( name_or_id=port_id, name='test-port-name-updated' ) self._compare_ports(self.mock_neutron_port_update_rep['port'], port) self.assert_calls() def test_update_port_parameters(self): """Test that we detect invalid arguments passed to update_port""" self.assertRaises( TypeError, self.cloud.update_port, name_or_id='test-port-id', nome='test-port-name-updated', ) def test_update_port_exception(self): port_id = 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports', port_id] ), json=self.mock_neutron_port_list_rep, ), dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports', port_id] ), status_code=500, validate=dict( json={'port': {'name': 'test-port-name-updated'}} ), ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.update_port, name_or_id='d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b', name='test-port-name-updated', ) self.assert_calls() def test_list_ports(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'] ), json=self.mock_neutron_port_list_rep, ) ] ) ports = self.cloud.list_ports() for a, b in zip(self.mock_neutron_port_list_rep['ports'], ports): self._compare_ports(a, b) self.assert_calls() def test_list_ports_filtered(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'], qs_elements=['status=DOWN'], ), json=self.mock_neutron_port_list_rep, ) ] ) ports = self.cloud.list_ports(filters={'status': 'DOWN'}) for a, b in zip(self.mock_neutron_port_list_rep['ports'], ports): self._compare_ports(a, b) self.assert_calls() def test_list_ports_exception(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'] ), status_code=500, ) ] ) self.assertRaises(exceptions.SDKException, self.cloud.list_ports) def test_search_ports_by_id(self): port_id = 'f71a6703-d6de-4be1-a91a-a570ede1d159' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'] ), json=self.mock_neutron_port_list_rep, ) ] ) ports = self.cloud.search_ports(name_or_id=port_id) self.assertEqual(1, len(ports)) self.assertEqual('fa:16:3e:bb:3c:e4', ports[0]['mac_address']) self.assert_calls() def test_search_ports_by_name(self): port_name = "first-port" self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'] ), json=self.mock_neutron_port_list_rep, ) ] ) ports = self.cloud.search_ports(name_or_id=port_name) self.assertEqual(1, len(ports)) self.assertEqual('fa:16:3e:58:42:ed', ports[0]['mac_address']) self.assert_calls() def test_search_ports_not_found(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'] ), json=self.mock_neutron_port_list_rep, ) ] ) ports = self.cloud.search_ports(name_or_id='non-existent') self.assertEqual(0, len(ports)) self.assert_calls() def test_delete_port(self): port_id = 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports', 'first-port'], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'], qs_elements=['name=first-port'], ), json=self.mock_neutron_port_list_rep, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports', port_id] ), json={}, ), ] ) self.assertTrue(self.cloud.delete_port(name_or_id='first-port')) def test_delete_port_not_found(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports', 'non-existent'], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'], qs_elements=['name=non-existent'], ), json={'ports': []}, ), ] ) self.assertFalse(self.cloud.delete_port(name_or_id='non-existent')) self.assert_calls() def test_delete_subnet_multiple_found(self): port_name = "port-name" port1 = dict(id='123', name=port_name) port2 = dict(id='456', name=port_name) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports', port_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'], qs_elements=['name=%s' % port_name], ), json={'ports': [port1, port2]}, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.delete_port, port_name ) self.assert_calls() def test_delete_subnet_multiple_using_id(self): port_name = "port-name" port1 = dict(id='123', name=port_name) port2 = dict(id='456', name=port_name) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports', port1['id']], ), json={'ports': [port1, port2]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports', port1['id']], ), json={}, ), ] ) self.assertTrue(self.cloud.delete_port(name_or_id=port1['id'])) self.assert_calls() def test_get_port_by_id(self): fake_port = dict(id='123', name='456') self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports', fake_port['id']], ), json={'port': fake_port}, ) ] ) r = self.cloud.get_port_by_id(fake_port['id']) self.assertIsNotNone(r) self._compare_ports(fake_port, r) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_project.py0000664000175000017500000002276200000000000024535 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import testtools from testtools import matchers from openstack import exceptions from openstack.tests.unit import base class TestProject(base.TestCase): def get_mock_url( self, service_type='identity', interface='public', resource=None, append=None, base_url_append=None, v3=True, qs_elements=None, ): if v3 and resource is None: resource = 'projects' elif not v3 and resource is None: resource = 'tenants' if base_url_append is None and v3: base_url_append = 'v3' return super().get_mock_url( service_type=service_type, interface=interface, resource=resource, append=append, base_url_append=base_url_append, qs_elements=qs_elements, ) def test_create_project_v3( self, ): project_data = self._get_project_data( description=self.getUniqueString('projectDesc'), parent_id=uuid.uuid4().hex, ) reference_req = project_data.json_request.copy() reference_req['project']['enabled'] = True self.register_uris( [ dict( method='POST', uri=self.get_mock_url(), status_code=200, json=project_data.json_response, validate=dict(json=reference_req), ) ] ) project = self.cloud.create_project( name=project_data.project_name, description=project_data.description, domain_id=project_data.domain_id, parent_id=project_data.parent_id, ) self.assertThat(project.id, matchers.Equals(project_data.project_id)) self.assertThat( project.name, matchers.Equals(project_data.project_name) ) self.assertThat( project.description, matchers.Equals(project_data.description) ) self.assertThat( project.domain_id, matchers.Equals(project_data.domain_id) ) self.assert_calls() def test_delete_project_v3(self): project_data = self._get_project_data(v3=False) self.register_uris( [ dict( method='GET', uri=self.get_mock_url(append=[project_data.project_id]), status_code=200, json=project_data.json_response, ), dict( method='DELETE', uri=self.get_mock_url(append=[project_data.project_id]), status_code=204, ), ] ) self.cloud.delete_project(project_data.project_id) self.assert_calls() def test_update_project_not_found(self): project_data = self._get_project_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url(append=[project_data.project_id]), status_code=404, ), dict( method='GET', uri=self.get_mock_url( qs_elements=['name=' + project_data.project_id] ), status_code=200, json={'projects': []}, ), ] ) # NOTE(notmorgan): This test (and shade) does not represent a case # where the project is in the project list but a 404 is raised when # the PATCH is issued. This is a bug in shade and should be fixed, # shade will raise an attribute error instead of the proper # project not found exception. with testtools.ExpectedException( exceptions.SDKException, "Project %s not found." % project_data.project_id, ): self.cloud.update_project(project_data.project_id) self.assert_calls() def test_update_project_v3(self): project_data = self._get_project_data( description=self.getUniqueString('projectDesc') ) reference_req = project_data.json_request.copy() # Remove elements not actually sent in the update reference_req['project'].pop('domain_id') reference_req['project'].pop('name') reference_req['project'].pop('enabled') self.register_uris( [ dict( method='GET', uri=self.get_mock_url( append=[project_data.project_id], qs_elements=['domain_id=' + project_data.domain_id], ), status_code=200, json={'projects': [project_data.json_response['project']]}, ), dict( method='PATCH', uri=self.get_mock_url(append=[project_data.project_id]), status_code=200, json=project_data.json_response, validate=dict(json=reference_req), ), ] ) project = self.cloud.update_project( project_data.project_id, description=project_data.description, domain_id=project_data.domain_id, ) self.assertThat(project.id, matchers.Equals(project_data.project_id)) self.assertThat( project.name, matchers.Equals(project_data.project_name) ) self.assertThat( project.description, matchers.Equals(project_data.description) ) self.assert_calls() def test_list_projects_v3(self): project_data = self._get_project_data( description=self.getUniqueString('projectDesc') ) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource=( 'projects?domain_id=%s' % project_data.domain_id ) ), status_code=200, json={'projects': [project_data.json_response['project']]}, ) ] ) projects = self.cloud.list_projects(project_data.domain_id) self.assertThat(len(projects), matchers.Equals(1)) self.assertThat( projects[0].id, matchers.Equals(project_data.project_id) ) self.assert_calls() def test_list_projects_v3_kwarg(self): project_data = self._get_project_data( description=self.getUniqueString('projectDesc') ) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource=( 'projects?domain_id=%s' % project_data.domain_id ) ), status_code=200, json={'projects': [project_data.json_response['project']]}, ) ] ) projects = self.cloud.list_projects(domain_id=project_data.domain_id) self.assertThat(len(projects), matchers.Equals(1)) self.assertThat( projects[0].id, matchers.Equals(project_data.project_id) ) self.assert_calls() def test_list_projects_search_compat(self): project_data = self._get_project_data( description=self.getUniqueString('projectDesc') ) self.register_uris( [ dict( method='GET', uri=self.get_mock_url(), status_code=200, json={'projects': [project_data.json_response['project']]}, ) ] ) projects = self.cloud.search_projects(project_data.project_id) self.assertThat(len(projects), matchers.Equals(1)) self.assertThat( projects[0].id, matchers.Equals(project_data.project_id) ) self.assert_calls() def test_list_projects_search_compat_v3(self): project_data = self._get_project_data( description=self.getUniqueString('projectDesc') ) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource=( 'projects?domain_id=%s' % project_data.domain_id ) ), status_code=200, json={'projects': [project_data.json_response['project']]}, ) ] ) projects = self.cloud.search_projects(domain_id=project_data.domain_id) self.assertThat(len(projects), matchers.Equals(1)) self.assertThat( projects[0].id, matchers.Equals(project_data.project_id) ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_qos_bandwidth_limit_rule.py0000664000175000017500000005162600000000000030143 0ustar00zuulzuul00000000000000# Copyright 2017 OVH SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from openstack import exceptions from openstack.network.v2 import qos_bandwidth_limit_rule from openstack.tests.unit import base class TestQosBandwidthLimitRule(base.TestCase): policy_name = 'qos test policy' policy_id = '881d1bb7-a663-44c0-8f9f-ee2765b74486' project_id = 'c88fc89f-5121-4a4c-87fd-496b5af864e9' rule_id = 'ed1a2b05-0ad7-45d7-873f-008b575a02b3' rule_max_kbps = 1000 rule_max_burst = 100 mock_policy = { 'id': policy_id, 'name': policy_name, 'description': '', 'rules': [], 'project_id': project_id, 'tenant_id': project_id, 'shared': False, 'is_default': False, } mock_rule = { 'id': rule_id, 'max_kbps': rule_max_kbps, 'max_burst_kbps': rule_max_burst, 'direction': 'egress', } qos_extension = { "updated": "2015-06-08T10:00:00-00:00", "name": "Quality of Service", "links": [], "alias": "qos", "description": "The Quality of Service extension.", } qos_bw_limit_direction_extension = { "updated": "2017-04-10T10:00:00-00:00", "name": "Direction for QoS bandwidth limit rule", "links": [], "alias": "qos-bw-limit-direction", "description": ( "Allow to configure QoS bandwidth limit rule with " "specific direction: ingress or egress" ), } enabled_neutron_extensions = [ qos_extension, qos_bw_limit_direction_extension, ] def _compare_rules(self, exp, real): self.assertDictEqual( qos_bandwidth_limit_rule.QoSBandwidthLimitRule(**exp).to_dict( computed=False ), real.to_dict(computed=False), ) def test_get_qos_bandwidth_limit_rule(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': [self.mock_policy]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'bandwidth_limit_rules', self.rule_id, ], ), json={'bandwidth_limit_rule': self.mock_rule}, ), ] ) r = self.cloud.get_qos_bandwidth_limit_rule( self.policy_name, self.rule_id ) self._compare_rules(self.mock_rule, r) self.assert_calls() def test_get_qos_bandwidth_limit_rule_no_qos_policy_found(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': []}, ), ] ) self.assertRaises( exceptions.NotFoundException, self.cloud.get_qos_bandwidth_limit_rule, self.policy_name, self.rule_id, ) self.assert_calls() def test_get_qos_bandwidth_limit_rule_no_qos_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.get_qos_bandwidth_limit_rule, self.policy_name, self.rule_id, ) self.assert_calls() def test_create_qos_bandwidth_limit_rule(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': [self.mock_policy]}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'bandwidth_limit_rules', ], ), json={'bandwidth_limit_rule': self.mock_rule}, ), ] ) rule = self.cloud.create_qos_bandwidth_limit_rule( self.policy_name, max_kbps=self.rule_max_kbps ) self._compare_rules(self.mock_rule, rule) self.assert_calls() def test_create_qos_bandwidth_limit_rule_no_qos_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.create_qos_bandwidth_limit_rule, self.policy_name, max_kbps=100, ) self.assert_calls() def test_create_qos_bandwidth_limit_rule_no_qos_direction_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': [self.qos_extension]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': [self.mock_policy]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': [self.qos_extension]}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'bandwidth_limit_rules', ], ), json={'bandwidth_limit_rule': self.mock_rule}, ), ] ) rule = self.cloud.create_qos_bandwidth_limit_rule( self.policy_name, max_kbps=self.rule_max_kbps, direction="ingress" ) self._compare_rules(self.mock_rule, rule) self.assert_calls() def test_update_qos_bandwidth_limit_rule(self): expected_rule = copy.copy(self.mock_rule) expected_rule['max_kbps'] = self.rule_max_kbps + 100 self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': [self.qos_extension]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_id], ), json=self.mock_policy, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'bandwidth_limit_rules', self.rule_id, ], ), json={'bandwidth_limit_rule': self.mock_rule}, ), dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'bandwidth_limit_rules', self.rule_id, ], ), json={'bandwidth_limit_rule': expected_rule}, validate=dict( json={ 'bandwidth_limit_rule': { 'max_kbps': self.rule_max_kbps + 100 } } ), ), ] ) rule = self.cloud.update_qos_bandwidth_limit_rule( self.policy_id, self.rule_id, max_kbps=self.rule_max_kbps + 100 ) self._compare_rules(expected_rule, rule) self.assert_calls() def test_update_qos_bandwidth_limit_rule_no_qos_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.update_qos_bandwidth_limit_rule, self.policy_id, self.rule_id, max_kbps=2000, ) self.assert_calls() def test_update_qos_bandwidth_limit_rule_no_qos_direction_extension(self): expected_rule = copy.copy(self.mock_rule) expected_rule['direction'] = self.rule_max_kbps + 100 self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': [self.qos_extension]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_id], ), json=self.mock_policy, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': [self.qos_extension]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'bandwidth_limit_rules', self.rule_id, ], ), json={'bandwidth_limit_rule': self.mock_rule}, ), dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'bandwidth_limit_rules', self.rule_id, ], ), json={'bandwidth_limit_rule': expected_rule}, validate=dict( json={ 'bandwidth_limit_rule': { 'max_kbps': self.rule_max_kbps + 100 } } ), ), ] ) rule = self.cloud.update_qos_bandwidth_limit_rule( self.policy_id, self.rule_id, max_kbps=self.rule_max_kbps + 100, direction="ingress", ) # Even if there was attempt to change direction to 'ingress' it should # be not changed in returned rule self._compare_rules(expected_rule, rule) self.assert_calls() def test_delete_qos_bandwidth_limit_rule(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': [self.mock_policy]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'bandwidth_limit_rules', self.rule_id, ], ), json={}, ), ] ) self.assertTrue( self.cloud.delete_qos_bandwidth_limit_rule( self.policy_name, self.rule_id ) ) self.assert_calls() def test_delete_qos_bandwidth_limit_rule_no_qos_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.delete_qos_bandwidth_limit_rule, self.policy_name, self.rule_id, ) self.assert_calls() def test_delete_qos_bandwidth_limit_rule_not_found(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': [self.mock_policy]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'bandwidth_limit_rules', self.rule_id, ], ), status_code=404, ), ] ) self.assertFalse( self.cloud.delete_qos_bandwidth_limit_rule( self.policy_name, self.rule_id ) ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_qos_dscp_marking_rule.py0000664000175000017500000003640000000000000027433 0ustar00zuulzuul00000000000000# Copyright 2017 OVH SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from openstack import exceptions from openstack.network.v2 import qos_dscp_marking_rule from openstack.tests.unit import base class TestQosDscpMarkingRule(base.TestCase): policy_name = 'qos test policy' policy_id = '881d1bb7-a663-44c0-8f9f-ee2765b74486' project_id = 'c88fc89f-5121-4a4c-87fd-496b5af864e9' rule_id = 'ed1a2b05-0ad7-45d7-873f-008b575a02b3' rule_dscp_mark = 32 mock_policy = { 'id': policy_id, 'name': policy_name, 'description': '', 'rules': [], 'project_id': project_id, 'tenant_id': project_id, 'shared': False, 'is_default': False, } mock_rule = { 'id': rule_id, 'dscp_mark': rule_dscp_mark, } qos_extension = { "updated": "2015-06-08T10:00:00-00:00", "name": "Quality of Service", "links": [], "alias": "qos", "description": "The Quality of Service extension.", } enabled_neutron_extensions = [qos_extension] def _compare_rules(self, exp, real): self.assertDictEqual( qos_dscp_marking_rule.QoSDSCPMarkingRule(**exp).to_dict( computed=False ), real.to_dict(computed=False), ) def test_get_qos_dscp_marking_rule(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': [self.mock_policy]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'dscp_marking_rules', self.rule_id, ], ), json={'dscp_marking_rule': self.mock_rule}, ), ] ) r = self.cloud.get_qos_dscp_marking_rule( self.policy_name, self.rule_id ) self._compare_rules(self.mock_rule, r) self.assert_calls() def test_get_qos_dscp_marking_rule_no_qos_policy_found(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': []}, ), ] ) self.assertRaises( exceptions.NotFoundException, self.cloud.get_qos_dscp_marking_rule, self.policy_name, self.rule_id, ) self.assert_calls() def test_get_qos_dscp_marking_rule_no_qos_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.get_qos_dscp_marking_rule, self.policy_name, self.rule_id, ) self.assert_calls() def test_create_qos_dscp_marking_rule(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': [self.mock_policy]}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'dscp_marking_rules', ], ), json={'dscp_marking_rule': self.mock_rule}, ), ] ) rule = self.cloud.create_qos_dscp_marking_rule( self.policy_name, dscp_mark=self.rule_dscp_mark ) self._compare_rules(self.mock_rule, rule) self.assert_calls() def test_create_qos_dscp_marking_rule_no_qos_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.create_qos_dscp_marking_rule, self.policy_name, dscp_mark=16, ) self.assert_calls() def test_update_qos_dscp_marking_rule(self): new_dscp_mark_value = 16 expected_rule = copy.copy(self.mock_rule) expected_rule['dscp_mark'] = new_dscp_mark_value self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_id], ), json=self.mock_policy, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'dscp_marking_rules', self.rule_id, ], ), json={'dscp_marking_rule': self.mock_rule}, ), dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'dscp_marking_rules', self.rule_id, ], ), json={'dscp_marking_rule': expected_rule}, validate=dict( json={ 'dscp_marking_rule': { 'dscp_mark': new_dscp_mark_value } } ), ), ] ) rule = self.cloud.update_qos_dscp_marking_rule( self.policy_id, self.rule_id, dscp_mark=new_dscp_mark_value ) self._compare_rules(expected_rule, rule) self.assert_calls() def test_update_qos_dscp_marking_rule_no_qos_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.update_qos_dscp_marking_rule, self.policy_id, self.rule_id, dscp_mark=8, ) self.assert_calls() def test_delete_qos_dscp_marking_rule(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': [self.mock_policy]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'dscp_marking_rules', self.rule_id, ], ), json={}, ), ] ) self.assertTrue( self.cloud.delete_qos_dscp_marking_rule( self.policy_name, self.rule_id ) ) self.assert_calls() def test_delete_qos_dscp_marking_rule_no_qos_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.delete_qos_dscp_marking_rule, self.policy_name, self.rule_id, ) self.assert_calls() def test_delete_qos_dscp_marking_rule_not_found(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': [self.mock_policy]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'dscp_marking_rules', self.rule_id, ], ), status_code=404, ), ] ) self.assertFalse( self.cloud.delete_qos_dscp_marking_rule( self.policy_name, self.rule_id ) ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_qos_minimum_bandwidth_rule.py0000664000175000017500000003667000000000000030502 0ustar00zuulzuul00000000000000# Copyright 2017 OVH SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from openstack import exceptions from openstack.network.v2 import qos_minimum_bandwidth_rule from openstack.tests.unit import base class TestQosMinimumBandwidthRule(base.TestCase): policy_name = 'qos test policy' policy_id = '881d1bb7-a663-44c0-8f9f-ee2765b74486' project_id = 'c88fc89f-5121-4a4c-87fd-496b5af864e9' rule_id = 'ed1a2b05-0ad7-45d7-873f-008b575a02b3' rule_min_kbps = 1000 mock_policy = { 'id': policy_id, 'name': policy_name, 'description': '', 'rules': [], 'project_id': project_id, 'tenant_id': project_id, 'shared': False, 'is_default': False, } mock_rule = { 'id': rule_id, 'min_kbps': rule_min_kbps, 'direction': 'egress', } qos_extension = { "updated": "2015-06-08T10:00:00-00:00", "name": "Quality of Service", "links": [], "alias": "qos", "description": "The Quality of Service extension.", } enabled_neutron_extensions = [qos_extension] def _compare_rules(self, exp, real): self.assertDictEqual( qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule(**exp).to_dict( computed=False ), real.to_dict(computed=False), ) def test_get_qos_minimum_bandwidth_rule(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': [self.mock_policy]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'minimum_bandwidth_rules', self.rule_id, ], ), json={'minimum_bandwidth_rule': self.mock_rule}, ), ] ) r = self.cloud.get_qos_minimum_bandwidth_rule( self.policy_name, self.rule_id ) self._compare_rules(self.mock_rule, r) self.assert_calls() def test_get_qos_minimum_bandwidth_rule_no_qos_policy_found(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': []}, ), ] ) self.assertRaises( exceptions.NotFoundException, self.cloud.get_qos_minimum_bandwidth_rule, self.policy_name, self.rule_id, ) self.assert_calls() def test_get_qos_minimum_bandwidth_rule_no_qos_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.get_qos_minimum_bandwidth_rule, self.policy_name, self.rule_id, ) self.assert_calls() def test_create_qos_minimum_bandwidth_rule(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': [self.mock_policy]}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'minimum_bandwidth_rules', ], ), json={'minimum_bandwidth_rule': self.mock_rule}, ), ] ) rule = self.cloud.create_qos_minimum_bandwidth_rule( self.policy_name, min_kbps=self.rule_min_kbps ) self._compare_rules(self.mock_rule, rule) self.assert_calls() def test_create_qos_minimum_bandwidth_rule_no_qos_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.create_qos_minimum_bandwidth_rule, self.policy_name, min_kbps=100, ) self.assert_calls() def test_update_qos_minimum_bandwidth_rule(self): expected_rule = copy.copy(self.mock_rule) expected_rule['min_kbps'] = self.rule_min_kbps + 100 self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_id], ), json=self.mock_policy, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'minimum_bandwidth_rules', self.rule_id, ], ), json={'minimum_bandwidth_rule': self.mock_rule}, ), dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'minimum_bandwidth_rules', self.rule_id, ], ), json={'minimum_bandwidth_rule': expected_rule}, validate=dict( json={ 'minimum_bandwidth_rule': { 'min_kbps': self.rule_min_kbps + 100 } } ), ), ] ) rule = self.cloud.update_qos_minimum_bandwidth_rule( self.policy_id, self.rule_id, min_kbps=self.rule_min_kbps + 100 ) self._compare_rules(expected_rule, rule) self.assert_calls() def test_update_qos_minimum_bandwidth_rule_no_qos_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.update_qos_minimum_bandwidth_rule, self.policy_id, self.rule_id, min_kbps=2000, ) self.assert_calls() def test_delete_qos_minimum_bandwidth_rule(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': [self.mock_policy]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'minimum_bandwidth_rules', self.rule_id, ], ), json={}, ), ] ) self.assertTrue( self.cloud.delete_qos_minimum_bandwidth_rule( self.policy_name, self.rule_id ) ) self.assert_calls() def test_delete_qos_minimum_bandwidth_rule_no_qos_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.delete_qos_minimum_bandwidth_rule, self.policy_name, self.rule_id, ) self.assert_calls() def test_delete_qos_minimum_bandwidth_rule_not_found(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': [self.mock_policy]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'policies', self.policy_id, 'minimum_bandwidth_rules', self.rule_id, ], ), status_code=404, ), ] ) self.assertFalse( self.cloud.delete_qos_minimum_bandwidth_rule( self.policy_name, self.rule_id ) ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_qos_policy.py0000664000175000017500000003753500000000000025254 0ustar00zuulzuul00000000000000# Copyright 2017 OVH SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from openstack import exceptions from openstack.network.v2 import qos_policy as _policy from openstack.tests.unit import base class TestQosPolicy(base.TestCase): policy_name = 'qos test policy' policy_id = '881d1bb7-a663-44c0-8f9f-ee2765b74486' project_id = 'c88fc89f-5121-4a4c-87fd-496b5af864e9' mock_policy = { 'id': policy_id, 'name': policy_name, 'description': '', 'rules': [], 'project_id': project_id, 'tenant_id': project_id, 'shared': False, 'is_default': False, 'tags': [], } qos_extension = { "updated": "2015-06-08T10:00:00-00:00", "name": "Quality of Service", "links": [], "alias": "qos", "description": "The Quality of Service extension.", } qos_default_extension = { "updated": "2017-041-06T10:00:00-00:00", "name": "QoS default policy", "links": [], "alias": "qos-default", "description": "Expose the QoS default policy per project", } enabled_neutron_extensions = [qos_extension, qos_default_extension] def _compare_policies(self, exp, real): self.assertDictEqual( _policy.QoSPolicy(**exp).to_dict(computed=False), real.to_dict(computed=False), ) def test_get_qos_policy(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': [self.mock_policy]}, ), ] ) r = self.cloud.get_qos_policy(self.policy_name) self.assertIsNotNone(r) self._compare_policies(self.mock_policy, r) self.assert_calls() def test_get_qos_policy_no_qos_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.get_qos_policy, self.policy_name, ) self.assert_calls() def test_create_qos_policy(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'] ), json={'policy': self.mock_policy}, ), ] ) policy = self.cloud.create_qos_policy( name=self.policy_name, project_id=self.project_id ) self._compare_policies(self.mock_policy, policy) self.assert_calls() def test_create_qos_policy_no_qos_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.create_qos_policy, name=self.policy_name, ) self.assert_calls() def test_create_qos_policy_no_qos_default_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': [self.qos_extension]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': [self.qos_extension]}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'] ), json={'policy': self.mock_policy}, validate=dict( json={ 'policy': { 'name': self.policy_name, 'project_id': self.project_id, } } ), ), ] ) policy = self.cloud.create_qos_policy( name=self.policy_name, project_id=self.project_id, default=True ) self._compare_policies(self.mock_policy, policy) self.assert_calls() def test_delete_qos_policy(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': [self.mock_policy]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_id], ), json={}, ), ] ) self.assertTrue(self.cloud.delete_qos_policy(self.policy_name)) self.assert_calls() def test_delete_qos_policy_no_qos_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.delete_qos_policy, self.policy_name, ) self.assert_calls() def test_delete_qos_policy_not_found(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', 'goofy'], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=goofy'], ), json={'policies': []}, ), ] ) self.assertFalse(self.cloud.delete_qos_policy('goofy')) self.assert_calls() def test_delete_qos_policy_multiple_found(self): policy1 = dict(id='123', name=self.policy_name) policy2 = dict(id='456', name=self.policy_name) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies'], qs_elements=['name=%s' % self.policy_name], ), json={'policies': [policy1, policy2]}, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.delete_qos_policy, self.policy_name, ) self.assert_calls() def test_delete_qos_policy_using_id(self): policy1 = self.mock_policy self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', policy1['id']], ), json=policy1, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_id], ), json={}, ), ] ) self.assertTrue(self.cloud.delete_qos_policy(policy1['id'])) self.assert_calls() def test_update_qos_policy(self): expected_policy = copy.copy(self.mock_policy) expected_policy['name'] = 'goofy' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_id], ), json=self.mock_policy, ), dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_id], ), json={'policy': expected_policy}, validate=dict(json={'policy': {'name': 'goofy'}}), ), ] ) policy = self.cloud.update_qos_policy(self.policy_id, name='goofy') self._compare_policies(expected_policy, policy) self.assert_calls() def test_update_qos_policy_no_qos_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.update_qos_policy, self.policy_id, name="goofy", ) self.assert_calls() def test_update_qos_policy_no_qos_default_extension(self): expected_policy = copy.copy(self.mock_policy) expected_policy['name'] = 'goofy' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': [self.qos_extension]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': [self.qos_extension]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_id], ), json=self.mock_policy, ), dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'policies', self.policy_id], ), json={'policy': expected_policy}, validate=dict(json={'policy': {'name': "goofy"}}), ), ] ) policy = self.cloud.update_qos_policy( self.policy_id, name='goofy', default=True ) self._compare_policies(expected_policy, policy) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_qos_rule_type.py0000664000175000017500000001613100000000000025752 0ustar00zuulzuul00000000000000# Copyright 2017 OVH SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack import exceptions from openstack.network.v2 import qos_rule_type from openstack.tests.unit import base class TestQosRuleType(base.TestCase): rule_type_name = "bandwidth_limit" qos_extension = { "updated": "2015-06-08T10:00:00-00:00", "name": "Quality of Service", "links": [], "alias": "qos", "description": "The Quality of Service extension.", } qos_rule_type_details_extension = { "updated": "2017-06-22T10:00:00-00:00", "name": "Details of QoS rule types", "links": [], "alias": "qos-rule-type-details", "description": ( "Expose details about QoS rule types supported by " "loaded backend drivers" ), } mock_rule_type_bandwidth_limit = {'type': 'bandwidth_limit'} mock_rule_type_dscp_marking = {'type': 'dscp_marking'} mock_rule_types = [ mock_rule_type_bandwidth_limit, mock_rule_type_dscp_marking, ] mock_rule_type_details = { 'drivers': [ { 'name': 'linuxbridge', 'supported_parameters': [ { 'parameter_values': {'start': 0, 'end': 2147483647}, 'parameter_type': 'range', 'parameter_name': 'max_kbps', }, { 'parameter_values': ['ingress', 'egress'], 'parameter_type': 'choices', 'parameter_name': 'direction', }, { 'parameter_values': {'start': 0, 'end': 2147483647}, 'parameter_type': 'range', 'parameter_name': 'max_burst_kbps', }, ], } ], 'type': rule_type_name, } def _compare_rule_types(self, exp, real): self.assertDictEqual( qos_rule_type.QoSRuleType(**exp).to_dict(computed=False), real.to_dict(computed=False), ) def test_list_qos_rule_types(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': [self.qos_extension]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'qos', 'rule-types'], ), json={'rule_types': self.mock_rule_types}, ), ] ) rule_types = self.cloud.list_qos_rule_types() for a, b in zip(self.mock_rule_types, rule_types): self._compare_rule_types(a, b) self.assert_calls() def test_list_qos_rule_types_no_qos_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.list_qos_rule_types ) self.assert_calls() def test_get_qos_rule_type_details(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={ 'extensions': [ self.qos_extension, self.qos_rule_type_details_extension, ] }, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={ 'extensions': [ self.qos_extension, self.qos_rule_type_details_extension, ] }, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'qos', 'rule-types', self.rule_type_name, ], ), json={'rule_type': self.mock_rule_type_details}, ), ] ) self._compare_rule_types( self.mock_rule_type_details, self.cloud.get_qos_rule_type_details(self.rule_type_name), ) self.assert_calls() def test_get_qos_rule_type_details_no_qos_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': []}, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.get_qos_rule_type_details, self.rule_type_name, ) self.assert_calls() def test_get_qos_rule_type_details_no_qos_details_extension(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': [self.qos_extension]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': [self.qos_extension]}, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.get_qos_rule_type_details, self.rule_type_name, ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_quotas.py0000664000175000017500000003041200000000000024372 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack.network.v2 import quota as _quota from openstack.tests.unit import base fake_quota_set = { "cores": 20, "fixed_ips": -1, "floating_ips": 10, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 45, "server_groups": 10, "server_group_members": 10, } class TestQuotas(base.TestCase): def setUp(self, cloud_config_fixture='clouds.yaml'): super().setUp(cloud_config_fixture=cloud_config_fixture) def test_update_quotas(self): project = self._get_project_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'identity', 'public', append=['v3', 'projects', project.project_id], ), json={'project': project.json_response['project']}, ), self.get_nova_discovery_mock_dict(), dict( method='PUT', uri=self.get_mock_url( 'compute', 'public', append=['os-quota-sets', project.project_id], ), json={'quota_set': fake_quota_set}, validate=dict( json={'quota_set': {'cores': 1, 'force': True}} ), ), ] ) self.cloud.set_compute_quotas(project.project_id, cores=1) self.assert_calls() def test_update_quotas_bad_request(self): project = self._get_project_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'identity', 'public', append=['v3', 'projects', project.project_id], ), json={'project': project.json_response['project']}, ), self.get_nova_discovery_mock_dict(), dict( method='PUT', uri=self.get_mock_url( 'compute', 'public', append=['os-quota-sets', project.project_id], ), status_code=400, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.set_compute_quotas, project.project_id, ) self.assert_calls() def test_get_quotas(self): project = self._get_project_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'identity', 'public', append=['v3', 'projects', project.project_id], ), json={'project': project.json_response['project']}, ), self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['os-quota-sets', project.project_id], ), json={'quota_set': fake_quota_set}, ), ] ) self.cloud.get_compute_quotas(project.project_id) self.assert_calls() def test_delete_quotas(self): project = self._get_project_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'identity', 'public', append=['v3', 'projects', project.project_id], ), json={'project': project.json_response['project']}, ), self.get_nova_discovery_mock_dict(), dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=['os-quota-sets', project.project_id], ), ), ] ) self.cloud.delete_compute_quotas(project.project_id) self.assert_calls() def test_cinder_update_quotas(self): project = self._get_project_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'identity', 'public', append=['v3', 'projects', project.project_id], ), json={'project': project.json_response['project']}, ), self.get_cinder_discovery_mock_dict(), dict( method='PUT', uri=self.get_mock_url( 'volumev3', 'public', append=['os-quota-sets', project.project_id], ), json=dict(quota_set={'volumes': 1}), validate=dict(json={'quota_set': {'volumes': 1}}), ), ] ) self.cloud.set_volume_quotas(project.project_id, volumes=1) self.assert_calls() def test_cinder_get_quotas(self): project = self._get_project_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'identity', 'public', append=['v3', 'projects', project.project_id], ), json={'project': project.json_response['project']}, ), self.get_cinder_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['os-quota-sets', project.project_id], qs_elements=['usage=False'], ), json=dict(quota_set={'snapshots': 10, 'volumes': 20}), ), ] ) self.cloud.get_volume_quotas(project.project_id) self.assert_calls() def test_cinder_delete_quotas(self): project = self._get_project_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'identity', 'public', append=['v3', 'projects', project.project_id], ), json={'project': project.json_response['project']}, ), self.get_cinder_discovery_mock_dict(), dict( method='DELETE', uri=self.get_mock_url( 'volumev3', 'public', append=['os-quota-sets', project.project_id], ), ), ] ) self.cloud.delete_volume_quotas(project.project_id) self.assert_calls() def test_neutron_update_quotas(self): project = self.mock_for_keystone_projects( project_count=1, id_get=True )[0] self.register_uris( [ dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'quotas', project.project_id], ), json={}, validate=dict(json={'quota': {'network': 1}}), ) ] ) self.cloud.set_network_quotas(project.project_id, network=1) self.assert_calls() def test_neutron_get_quotas(self): quota = { 'subnet': 100, 'network': 100, 'floatingip': 50, 'subnetpool': -1, 'security_group_rule': 100, 'security_group': 10, 'router': 10, 'rbac_policy': 10, 'port': 500, } project = self.mock_for_keystone_projects( project_count=1, id_get=True )[0] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'quotas', project.project_id], ), json={'quota': quota}, ) ] ) received_quota = self.cloud.get_network_quotas( project.project_id ).to_dict(computed=False) expected_quota = _quota.Quota(**quota).to_dict(computed=False) received_quota.pop('id') received_quota.pop('name') expected_quota.pop('id') expected_quota.pop('name') self.assertDictEqual(expected_quota, received_quota) self.assert_calls() def test_neutron_get_quotas_details(self): quota_details = { 'subnet': {'limit': 100, 'used': 7, 'reserved': 0}, 'network': {'limit': 100, 'used': 6, 'reserved': 0}, 'floatingip': {'limit': 50, 'used': 0, 'reserved': 0}, 'subnetpool': {'limit': -1, 'used': 2, 'reserved': 0}, 'security_group_rule': {'limit': 100, 'used': 4, 'reserved': 0}, 'security_group': {'limit': 10, 'used': 1, 'reserved': 0}, 'router': {'limit': 10, 'used': 2, 'reserved': 0}, 'rbac_policy': {'limit': 10, 'used': 2, 'reserved': 0}, 'port': {'limit': 500, 'used': 7, 'reserved': 0}, } project = self.mock_for_keystone_projects( project_count=1, id_get=True )[0] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'quotas', project.project_id, 'details', ], ), json={'quota': quota_details}, ) ] ) received_quota_details = self.cloud.get_network_quotas( project.project_id, details=True ) self.assertDictEqual( _quota.QuotaDetails(**quota_details).to_dict(computed=False), received_quota_details.to_dict(computed=False), ) self.assert_calls() def test_neutron_delete_quotas(self): project = self.mock_for_keystone_projects( project_count=1, id_get=True )[0] self.register_uris( [ dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'quotas', project.project_id], ), json={}, ) ] ) self.cloud.delete_network_quotas(project.project_id) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_rebuild_server.py0000664000175000017500000002545200000000000026102 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_rebuild_server ---------------------------------- Tests for the `rebuild_server` command. """ import uuid from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base class TestRebuildServer(base.TestCase): def setUp(self): super().setUp() self.server_id = str(uuid.uuid4()) self.server_name = self.getUniqueString('name') self.fake_server = fakes.make_fake_server( self.server_id, self.server_name ) self.rebuild_server = fakes.make_fake_server( self.server_id, self.server_name, 'REBUILD' ) self.error_server = fakes.make_fake_server( self.server_id, self.server_name, 'ERROR' ) def test_rebuild_server_rebuild_exception(self): """ Test that an exception in the rebuild raises an exception in rebuild_server. """ self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.server_id, 'action'], ), status_code=400, validate=dict( json={'rebuild': {'imageRef': 'a', 'adminPass': 'b'}} ), ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.rebuild_server, self.fake_server['id'], "a", "b", ) self.assert_calls() def test_rebuild_server_server_error(self): """ Test that a server error while waiting for the server to rebuild raises an exception in rebuild_server. """ self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.server_id, 'action'], ), json={'server': self.rebuild_server}, validate=dict(json={'rebuild': {'imageRef': 'a'}}), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.server_id] ), json={'server': self.error_server}, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.rebuild_server, self.fake_server['id'], "a", wait=True, ) self.assert_calls() def test_rebuild_server_timeout(self): """ Test that a timeout while waiting for the server to rebuild raises an exception in rebuild_server. """ self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.server_id, 'action'], ), json={'server': self.rebuild_server}, validate=dict(json={'rebuild': {'imageRef': 'a'}}), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.server_id] ), json={'server': self.rebuild_server}, ), ] ) self.assertRaises( exceptions.ResourceTimeout, self.cloud.rebuild_server, self.fake_server['id'], "a", wait=True, timeout=0.001, ) self.assert_calls(do_count=False) def test_rebuild_server_no_wait(self): """ Test that rebuild_server with no wait and no exception in the rebuild call returns the server instance. """ self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.server_id, 'action'], ), json={'server': self.rebuild_server}, validate=dict(json={'rebuild': {'imageRef': 'a'}}), ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), ] ) self.assertEqual( self.rebuild_server['status'], self.cloud.rebuild_server(self.fake_server['id'], "a")['status'], ) self.assert_calls() def test_rebuild_server_with_admin_pass_no_wait(self): """ Test that a server with an admin_pass passed returns the password """ password = self.getUniqueString('password') rebuild_server = self.rebuild_server.copy() rebuild_server['adminPass'] = password self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.server_id, 'action'], ), json={'server': rebuild_server}, validate=dict( json={ 'rebuild': {'imageRef': 'a', 'adminPass': password} } ), ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), ] ) self.assertEqual( password, self.cloud.rebuild_server( self.fake_server['id'], 'a', admin_pass=password )['adminPass'], ) self.assert_calls() def test_rebuild_server_with_admin_pass_wait(self): """ Test that a server with an admin_pass passed returns the password """ password = self.getUniqueString('password') rebuild_server = self.rebuild_server.copy() rebuild_server['adminPass'] = password self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.server_id, 'action'], ), json={'server': rebuild_server}, validate=dict( json={ 'rebuild': {'imageRef': 'a', 'adminPass': password} } ), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.server_id] ), json={'server': self.rebuild_server}, ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.server_id] ), json={'server': self.fake_server}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), ] ) self.assertEqual( password, self.cloud.rebuild_server( self.fake_server['id'], 'a', admin_pass=password, wait=True )['adminPass'], ) self.assert_calls() def test_rebuild_server_wait(self): """ Test that rebuild_server with a wait returns the server instance when its status changes to "ACTIVE". """ self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.server_id, 'action'], ), json={'server': self.rebuild_server}, validate=dict(json={'rebuild': {'imageRef': 'a'}}), ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.server_id] ), json={'server': self.rebuild_server}, ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.server_id] ), json={'server': self.fake_server}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), ] ) self.assertEqual( 'ACTIVE', self.cloud.rebuild_server(self.fake_server['id'], 'a', wait=True)[ 'status' ], ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_recordset.py0000664000175000017500000004751400000000000025063 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack.tests.unit import base from openstack.tests.unit.cloud import test_zone zone = test_zone.zone_dict recordset = { 'name': 'www.example.net.', 'type': 'A', 'description': 'Example zone rec', 'ttl': 3600, 'records': ['192.168.1.1'], 'id': '1', 'zone_id': zone['id'], 'zone_name': zone['name'], } class RecordsetTestWrapper(test_zone.ZoneTestWrapper): pass class TestRecordset(base.TestCase): def setUp(self): super().setUp() self.use_designate() def test_create_recordset_zoneid(self): fake_zone = test_zone.ZoneTestWrapper(self, zone) fake_rs = RecordsetTestWrapper(self, recordset) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['id']], ), json=fake_zone.get_get_response_json(), ), dict( method='POST', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', zone['id'], 'recordsets'], ), json=fake_rs.get_create_response_json(), validate=dict( json={ "records": fake_rs['records'], "type": fake_rs['type'], "name": fake_rs['name'], "description": fake_rs['description'], "ttl": fake_rs['ttl'], } ), ), ] ) rs = self.cloud.create_recordset( zone=fake_zone['id'], name=fake_rs['name'], recordset_type=fake_rs['type'], records=fake_rs['records'], description=fake_rs['description'], ttl=fake_rs['ttl'], ) fake_rs.cmp(rs) self.assert_calls() def test_create_recordset_zonename(self): fake_zone = test_zone.ZoneTestWrapper(self, zone) fake_rs = RecordsetTestWrapper(self, recordset) self.register_uris( [ # try by directly dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['name']], ), status_code=404, ), # list with name dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones'], qs_elements=[ 'name={name}'.format(name=fake_zone['name']) ], ), json={'zones': [fake_zone.get_get_response_json()]}, ), dict( method='POST', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', zone['id'], 'recordsets'], ), json=fake_rs.get_create_response_json(), validate=dict( json={ "records": fake_rs['records'], "type": fake_rs['type'], "name": fake_rs['name'], "description": fake_rs['description'], "ttl": fake_rs['ttl'], } ), ), ] ) rs = self.cloud.create_recordset( zone=fake_zone['name'], name=fake_rs['name'], recordset_type=fake_rs['type'], records=fake_rs['records'], description=fake_rs['description'], ttl=fake_rs['ttl'], ) fake_rs.cmp(rs) self.assert_calls() def test_create_recordset_exception(self): fake_zone = test_zone.ZoneTestWrapper(self, zone) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['id']], ), json=fake_zone.get_get_response_json(), ), dict( method='POST', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', zone['id'], 'recordsets'], ), status_code=500, validate=dict( json={ 'name': 'www2.example.net.', 'records': ['192.168.1.2'], 'type': 'A', } ), ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.create_recordset, fake_zone['id'], 'www2.example.net.', 'a', ['192.168.1.2'], ) self.assert_calls() def test_update_recordset(self): fake_zone = test_zone.ZoneTestWrapper(self, zone) fake_rs = RecordsetTestWrapper(self, recordset) new_ttl = 7200 expected_recordset = recordset.copy() expected_recordset['ttl'] = new_ttl updated_rs = RecordsetTestWrapper(self, expected_recordset) self.register_uris( [ # try by directly dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['name']], ), status_code=404, ), # list with name dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones'], qs_elements=[ 'name={name}'.format(name=fake_zone['name']) ], ), json={'zones': [fake_zone.get_get_response_json()]}, ), # try directly dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=[ 'v2', 'zones', fake_zone['id'], 'recordsets', fake_rs['name'], ], ), status_code=404, ), # list with name dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['id'], 'recordsets'], qs_elements=[ 'name={name}'.format(name=fake_rs['name']) ], ), json={'recordsets': [fake_rs.get_get_response_json()]}, ), # update dict( method='PUT', uri=self.get_mock_url( 'dns', 'public', append=[ 'v2', 'zones', fake_zone['id'], 'recordsets', fake_rs['id'], ], ), json=updated_rs.get_get_response_json(), validate=dict(json={'ttl': new_ttl}), ), ] ) res = self.cloud.update_recordset( fake_zone['name'], fake_rs['name'], ttl=new_ttl ) updated_rs.cmp(res) self.assert_calls() def test_list_recordsets(self): fake_zone = test_zone.ZoneTestWrapper(self, zone) fake_rs = RecordsetTestWrapper(self, recordset) self.register_uris( [ # try by directly dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['id']], ), json=fake_zone.get_get_response_json(), ), dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['id'], 'recordsets'], ), json={ 'recordsets': [fake_rs.get_get_response_json()], 'links': { 'next': self.get_mock_url( 'dns', 'public', append=[ 'v2', 'zones', fake_zone['id'], 'recordsets', ], qs_elements=['limit=1', 'marker=asd'], ), 'self': self.get_mock_url( 'dns', 'public', append=[ 'v2', 'zones', fake_zone['id'], 'recordsets?limit=1', ], ), }, 'metadata': {'total_count': 2}, }, ), dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['id'], 'recordsets'], qs_elements=['limit=1', 'marker=asd'], ), json={'recordsets': [fake_rs.get_get_response_json()]}, ), ] ) res = self.cloud.list_recordsets(fake_zone['id']) self.assertEqual(2, len(res)) self.assert_calls() def test_delete_recordset(self): fake_zone = test_zone.ZoneTestWrapper(self, zone) fake_rs = RecordsetTestWrapper(self, recordset) self.register_uris( [ # try by directly dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['name']], ), status_code=404, ), # list with name dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones'], qs_elements=[ 'name={name}'.format(name=fake_zone['name']) ], ), json={'zones': [fake_zone.get_get_response_json()]}, ), # try directly dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=[ 'v2', 'zones', fake_zone['id'], 'recordsets', fake_rs['name'], ], ), status_code=404, ), # list with name dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['id'], 'recordsets'], qs_elements=[ 'name={name}'.format(name=fake_rs['name']) ], ), json={'recordsets': [fake_rs.get_get_response_json()]}, ), dict( method='DELETE', uri=self.get_mock_url( 'dns', 'public', append=[ 'v2', 'zones', zone['id'], 'recordsets', fake_rs['id'], ], ), status_code=202, ), ] ) self.assertTrue( self.cloud.delete_recordset(fake_zone['name'], fake_rs['name']) ) self.assert_calls() def test_get_recordset_by_id(self): fake_zone = test_zone.ZoneTestWrapper(self, zone) fake_rs = RecordsetTestWrapper(self, recordset) self.register_uris( [ # try by directly dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['name']], ), status_code=404, ), # list with name dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones'], qs_elements=[ 'name={name}'.format(name=fake_zone['name']) ], ), json={'zones': [fake_zone.get_get_response_json()]}, ), # try directly dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=[ 'v2', 'zones', fake_zone['id'], 'recordsets', fake_rs['id'], ], ), json=fake_rs.get_get_response_json(), ), ] ) res = self.cloud.get_recordset(fake_zone['name'], fake_rs['id']) fake_rs.cmp(res) self.assert_calls() def test_get_recordset_by_name(self): fake_zone = test_zone.ZoneTestWrapper(self, zone) fake_rs = RecordsetTestWrapper(self, recordset) self.register_uris( [ # try by directly dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['name']], ), status_code=404, ), # list with name dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones'], qs_elements=[ 'name={name}'.format(name=fake_zone['name']) ], ), json={'zones': [fake_zone.get_get_response_json()]}, ), # try directly dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=[ 'v2', 'zones', fake_zone['id'], 'recordsets', fake_rs['name'], ], ), status_code=404, ), # list with name dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['id'], 'recordsets'], qs_elements=[ 'name={name}'.format(name=fake_rs['name']) ], ), json={'recordsets': [fake_rs.get_get_response_json()]}, ), ] ) res = self.cloud.get_recordset(fake_zone['name'], fake_rs['name']) fake_rs.cmp(res) self.assert_calls() def test_get_recordset_not_found_returns_false(self): fake_zone = test_zone.ZoneTestWrapper(self, zone) self.register_uris( [ # try by directly dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['id']], ), json=fake_zone.get_get_response_json(), ), # try directly dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=[ 'v2', 'zones', fake_zone['id'], 'recordsets', 'fake', ], ), status_code=404, ), # list with name dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['id'], 'recordsets'], qs_elements=['name=fake'], ), json={'recordsets': []}, ), ] ) res = self.cloud.get_recordset(fake_zone['id'], 'fake') self.assertFalse(res) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_role_assignment.py0000664000175000017500000020561500000000000026260 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import testtools from testtools import matchers from openstack import exceptions from openstack.tests.unit import base class TestRoleAssignment(base.TestCase): def _build_role_assignment_response( self, role_id, scope_type, scope_id, entity_type, entity_id ): self.assertThat(['group', 'user'], matchers.Contains(entity_type)) self.assertThat(['project', 'domain'], matchers.Contains(scope_type)) # NOTE(notmorgan): Links are thrown out by shade, but we construct them # for corectness. link_str = ( 'https://identity.example.com/identity/v3/{scope_t}s' '/{scopeid}/{entity_t}s/{entityid}/roles/{roleid}' ) return [ { 'links': { 'assignment': link_str.format( scope_t=scope_type, scopeid=scope_id, entity_t=entity_type, entityid=entity_id, roleid=role_id, ) }, 'role': {'id': role_id}, 'scope': {scope_type: {'id': scope_id}}, entity_type: {'id': entity_id}, } ] def setUp(self, cloud_config_fixture='clouds.yaml'): super().setUp(cloud_config_fixture) self.role_data = self._get_role_data() self.domain_data = self._get_domain_data() self.user_data = self._get_user_data( domain_id=self.domain_data.domain_id ) self.project_data = self._get_project_data( domain_id=self.domain_data.domain_id ) self.project_data_v2 = self._get_project_data( project_name=self.project_data.project_name, project_id=self.project_data.project_id, v3=False, ) self.group_data = self._get_group_data( domain_id=self.domain_data.domain_id ) self.user_project_assignment = self._build_role_assignment_response( role_id=self.role_data.role_id, scope_type='project', scope_id=self.project_data.project_id, entity_type='user', entity_id=self.user_data.user_id, ) self.group_project_assignment = self._build_role_assignment_response( role_id=self.role_data.role_id, scope_type='project', scope_id=self.project_data.project_id, entity_type='group', entity_id=self.group_data.group_id, ) self.user_domain_assignment = self._build_role_assignment_response( role_id=self.role_data.role_id, scope_type='domain', scope_id=self.domain_data.domain_id, entity_type='user', entity_id=self.user_data.user_id, ) self.group_domain_assignment = self._build_role_assignment_response( role_id=self.role_data.role_id, scope_type='domain', scope_id=self.domain_data.domain_id, entity_type='group', entity_id=self.group_data.group_id, ) # Cleanup of instances to ensure garbage collection/no leaking memory # in tests. self.addCleanup(delattr, self, 'role_data') self.addCleanup(delattr, self, 'user_data') self.addCleanup(delattr, self, 'domain_data') self.addCleanup(delattr, self, 'group_data') self.addCleanup(delattr, self, 'project_data') self.addCleanup(delattr, self, 'project_data_v2') self.addCleanup(delattr, self, 'user_project_assignment') self.addCleanup(delattr, self, 'group_project_assignment') self.addCleanup(delattr, self, 'user_domain_assignment') self.addCleanup(delattr, self, 'group_domain_assignment') def get_mock_url( self, service_type='identity', interface='public', resource='role_assignments', append=None, base_url_append='v3', qs_elements=None, ): return super().get_mock_url( service_type, interface, resource, append, base_url_append, qs_elements, ) def __get( self, resource, data, attr, qs_elements, use_name=False, is_found=True ): if not use_name: if is_found: return [ dict( method='GET', uri=self.get_mock_url( resource=resource + 's', # do roles from role append=[getattr(data, attr)], qs_elements=qs_elements, ), status_code=200, json=data.json_response, ) ] else: return [ dict( method='GET', uri=self.get_mock_url( resource=resource + 's', # do roles from role append=[getattr(data, attr)], qs_elements=qs_elements, ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( resource=resource + 's', # do roles from role qs_elements=qs_elements, ), status_code=200, json={(resource + 's'): []}, ), ] else: return [ dict( method='GET', uri=self.get_mock_url( resource=resource + 's', append=[getattr(data, attr)], qs_elements=qs_elements, ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( resource=resource + 's', qs_elements=['name=' + getattr(data, attr)] + qs_elements, ), status_code=200, json={(resource + 's'): [data.json_response[resource]]}, ), ] def __user_mocks(self, user_data, use_name, is_found=True): uri_mocks = [] if not use_name: uri_mocks.append( dict( method='GET', uri=self.get_mock_url(resource='users'), status_code=200, json={ 'users': ( [user_data.json_response['user']] if is_found else [] ) }, ) ) else: uri_mocks.append( dict( method='GET', uri=self.get_mock_url( resource='users', qs_elements=['name=' + user_data.name], ), status_code=200, json={ 'users': ( [user_data.json_response['user']] if is_found else [] ) }, ) ) return uri_mocks def _get_mock_role_query_urls( self, role_data, domain_data=None, project_data=None, group_data=None, user_data=None, use_role_name=False, use_domain_name=False, use_project_name=False, use_group_name=False, use_user_name=False, use_domain_in_query=True, ): """Build uri mocks for querying role assignments""" uri_mocks = [] if domain_data: uri_mocks.extend( self.__get( 'domain', domain_data, 'domain_id' if not use_domain_name else 'domain_name', [], use_name=use_domain_name, ) ) qs_elements = [] if domain_data and use_domain_in_query: qs_elements = ['domain_id=' + domain_data.domain_id] uri_mocks.extend( self.__get( 'role', role_data, 'role_id' if not use_role_name else 'role_name', [], use_name=use_role_name, ) ) if user_data: uri_mocks.extend( self.__user_mocks(user_data, use_user_name, is_found=True) ) if group_data: uri_mocks.extend( self.__get( 'group', group_data, 'group_id' if not use_group_name else 'group_name', qs_elements, use_name=use_group_name, ) ) if project_data: uri_mocks.extend( self.__get( 'project', project_data, 'project_id' if not use_project_name else 'project_name', qs_elements, use_name=use_project_name, ) ) return uri_mocks def test_grant_role_user_id_project(self): uris = self._get_mock_role_query_urls( self.role_data, project_data=self.project_data, user_data=self.user_data, use_role_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=404, ), dict( method='PUT', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), status_code=200, ), ] ) self.register_uris(uris) self.assertTrue( self.cloud.grant_role( self.role_data.role_name, user=self.user_data.user_id, project=self.project_data.project_id, ) ) self.assert_calls() def test_grant_role_user_name_project(self): uris = self._get_mock_role_query_urls( self.role_data, project_data=self.project_data, user_data=self.user_data, use_role_name=True, use_user_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=404, ), dict( method='PUT', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), status_code=200, ), ] ) self.register_uris(uris) self.assertTrue( self.cloud.grant_role( self.role_data.role_name, user=self.user_data.name, project=self.project_data.project_id, ) ) def test_grant_role_user_id_project_exists(self): uris = self._get_mock_role_query_urls( self.role_data, project_data=self.project_data, user_data=self.user_data, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=204, ), ] ) self.register_uris(uris) self.assertFalse( self.cloud.grant_role( self.role_data.role_id, user=self.user_data.user_id, project=self.project_data.project_id, ) ) self.assert_calls() def test_grant_role_user_name_project_exists(self): uris = self._get_mock_role_query_urls( self.role_data, project_data=self.project_data, user_data=self.user_data, use_role_name=True, use_user_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=204, ), ] ) self.register_uris(uris) self.assertFalse( self.cloud.grant_role( self.role_data.role_name, user=self.user_data.name, project=self.project_data.project_id, ) ) self.assert_calls() def test_grant_role_group_id_project(self): uris = self._get_mock_role_query_urls( self.role_data, project_data=self.project_data, group_data=self.group_data, use_role_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=404, ), dict( method='PUT', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), status_code=200, ), ] ) self.register_uris(uris) self.assertTrue( self.cloud.grant_role( self.role_data.role_name, group=self.group_data.group_id, project=self.project_data.project_id, ) ) self.assert_calls() def test_grant_role_group_name_project(self): uris = self._get_mock_role_query_urls( self.role_data, project_data=self.project_data, group_data=self.group_data, use_role_name=True, use_group_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=404, ), dict( method='PUT', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), status_code=200, ), ] ) self.register_uris(uris) self.assertTrue( self.cloud.grant_role( self.role_data.role_name, group=self.group_data.group_name, project=self.project_data.project_id, ) ) self.assert_calls() def test_grant_role_group_id_project_exists(self): uris = self._get_mock_role_query_urls( self.role_data, project_data=self.project_data, group_data=self.group_data, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=204, ), ] ) self.register_uris(uris) self.assertFalse( self.cloud.grant_role( self.role_data.role_id, group=self.group_data.group_id, project=self.project_data.project_id, ) ) self.assert_calls() def test_grant_role_group_name_project_exists(self): uris = self._get_mock_role_query_urls( self.role_data, project_data=self.project_data, group_data=self.group_data, use_role_name=True, use_group_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=204, ), ] ) self.register_uris(uris) self.assertFalse( self.cloud.grant_role( self.role_data.role_name, group=self.group_data.group_name, project=self.project_data.project_id, ) ) self.assert_calls() # ===== Domain def test_grant_role_user_id_domain(self): uris = self._get_mock_role_query_urls( self.role_data, domain_data=self.domain_data, user_data=self.user_data, use_role_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=404, ), dict( method='PUT', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), status_code=200, ), ] ) self.register_uris(uris) self.assertTrue( self.cloud.grant_role( self.role_data.role_name, user=self.user_data.user_id, domain=self.domain_data.domain_id, ) ) self.assert_calls() def test_grant_role_user_name_domain(self): uris = self._get_mock_role_query_urls( self.role_data, domain_data=self.domain_data, user_data=self.user_data, use_role_name=True, use_user_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=404, ), dict( method='PUT', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), status_code=200, ), ] ) self.register_uris(uris) self.assertTrue( self.cloud.grant_role( self.role_data.role_name, user=self.user_data.name, domain=self.domain_data.domain_id, ) ) def test_grant_role_user_id_domain_exists(self): uris = self._get_mock_role_query_urls( self.role_data, domain_data=self.domain_data, user_data=self.user_data, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=204, ), ] ) self.register_uris(uris) self.assertFalse( self.cloud.grant_role( self.role_data.role_id, user=self.user_data.user_id, domain=self.domain_data.domain_id, ) ) self.assert_calls() def test_grant_role_user_name_domain_exists(self): uris = self._get_mock_role_query_urls( self.role_data, domain_data=self.domain_data, user_data=self.user_data, use_role_name=True, use_user_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=204, ), ] ) self.register_uris(uris) self.assertFalse( self.cloud.grant_role( self.role_data.role_name, user=self.user_data.name, domain=self.domain_data.domain_id, ) ) self.assert_calls() def test_grant_role_group_id_domain(self): uris = self._get_mock_role_query_urls( self.role_data, domain_data=self.domain_data, group_data=self.group_data, use_role_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=404, ), dict( method='PUT', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), status_code=200, ), ] ) self.register_uris(uris) self.assertTrue( self.cloud.grant_role( self.role_data.role_name, group=self.group_data.group_id, domain=self.domain_data.domain_id, ) ) self.assert_calls() def test_grant_role_group_name_domain(self): uris = self._get_mock_role_query_urls( self.role_data, domain_data=self.domain_data, group_data=self.group_data, use_role_name=True, use_group_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=404, ), dict( method='PUT', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), status_code=200, ), ] ) self.register_uris(uris) self.assertTrue( self.cloud.grant_role( self.role_data.role_name, group=self.group_data.group_name, domain=self.domain_data.domain_id, ) ) self.assert_calls() def test_grant_role_group_id_domain_exists(self): uris = self._get_mock_role_query_urls( self.role_data, domain_data=self.domain_data, group_data=self.group_data, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=204, ), ] ) self.register_uris(uris) self.assertFalse( self.cloud.grant_role( self.role_data.role_id, group=self.group_data.group_id, domain=self.domain_data.domain_id, ) ) self.assert_calls() def test_grant_role_group_name_domain_exists(self): uris = self._get_mock_role_query_urls( self.role_data, domain_data=self.domain_data, group_data=self.group_data, use_role_name=True, use_group_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=204, ), ] ) self.register_uris(uris) self.assertFalse( self.cloud.grant_role( self.role_data.role_name, group=self.group_data.group_name, domain=self.domain_data.domain_id, ) ) self.assert_calls() # ==== Revoke def test_revoke_role_user_id_project(self): uris = self._get_mock_role_query_urls( self.role_data, project_data=self.project_data, user_data=self.user_data, use_role_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=204, ), dict( method='DELETE', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), status_code=200, ), ] ) self.register_uris(uris) self.assertTrue( self.cloud.revoke_role( self.role_data.role_name, user=self.user_data.user_id, project=self.project_data.project_id, ) ) self.assert_calls() def test_revoke_role_user_name_project(self): uris = self._get_mock_role_query_urls( self.role_data, project_data=self.project_data, user_data=self.user_data, use_role_name=True, use_user_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=204, ), dict( method='DELETE', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), status_code=200, ), ] ) self.register_uris(uris) self.assertTrue( self.cloud.revoke_role( self.role_data.role_name, user=self.user_data.name, project=self.project_data.project_id, ) ) def test_revoke_role_user_id_project_not_exists(self): uris = self._get_mock_role_query_urls( self.role_data, project_data=self.project_data, user_data=self.user_data, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=404, ), ] ) self.register_uris(uris) self.assertFalse( self.cloud.revoke_role( self.role_data.role_id, user=self.user_data.user_id, project=self.project_data.project_id, ) ) self.assert_calls() def test_revoke_role_user_name_project_not_exists(self): uris = self._get_mock_role_query_urls( self.role_data, project_data=self.project_data, user_data=self.user_data, use_role_name=True, use_user_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=404, ), ] ) self.register_uris(uris) self.assertFalse( self.cloud.revoke_role( self.role_data.role_name, user=self.user_data.name, project=self.project_data.project_id, ) ) self.assert_calls() def test_revoke_role_group_id_project(self): uris = self._get_mock_role_query_urls( self.role_data, project_data=self.project_data, group_data=self.group_data, use_role_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=204, ), dict( method='DELETE', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), status_code=200, ), ] ) self.register_uris(uris) self.assertTrue( self.cloud.revoke_role( self.role_data.role_name, group=self.group_data.group_id, project=self.project_data.project_id, ) ) self.assert_calls() def test_revoke_role_group_name_project(self): uris = self._get_mock_role_query_urls( self.role_data, project_data=self.project_data, group_data=self.group_data, use_role_name=True, use_group_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=204, ), dict( method='DELETE', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), status_code=200, ), ] ) self.register_uris(uris) self.assertTrue( self.cloud.revoke_role( self.role_data.role_name, group=self.group_data.group_name, project=self.project_data.project_id, ) ) self.assert_calls() def test_revoke_role_group_id_project_not_exists(self): uris = self._get_mock_role_query_urls( self.role_data, project_data=self.project_data, group_data=self.group_data, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=404, ), ] ) self.register_uris(uris) self.assertFalse( self.cloud.revoke_role( self.role_data.role_id, group=self.group_data.group_id, project=self.project_data.project_id, ) ) self.assert_calls() def test_revoke_role_group_name_project_not_exists(self): uris = self._get_mock_role_query_urls( self.role_data, project_data=self.project_data, group_data=self.group_data, use_role_name=True, use_group_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=404, ), ] ) self.register_uris(uris) self.assertFalse( self.cloud.revoke_role( self.role_data.role_name, group=self.group_data.group_name, project=self.project_data.project_id, ) ) self.assert_calls() # ==== Domain def test_revoke_role_user_id_domain(self): uris = self._get_mock_role_query_urls( self.role_data, domain_data=self.domain_data, user_data=self.user_data, use_role_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=204, ), dict( method='DELETE', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), status_code=200, ), ] ) self.register_uris(uris) self.assertTrue( self.cloud.revoke_role( self.role_data.role_name, user=self.user_data.user_id, domain=self.domain_data.domain_id, ) ) self.assert_calls() def test_revoke_role_user_name_domain(self): uris = self._get_mock_role_query_urls( self.role_data, domain_data=self.domain_data, user_data=self.user_data, use_role_name=True, use_user_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=204, ), dict( method='DELETE', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), status_code=200, ), ] ) self.register_uris(uris) self.assertTrue( self.cloud.revoke_role( self.role_data.role_name, user=self.user_data.name, domain=self.domain_data.domain_id, ) ) def test_revoke_role_user_id_domain_not_exists(self): uris = self._get_mock_role_query_urls( self.role_data, domain_data=self.domain_data, user_data=self.user_data, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=404, ), ] ) self.register_uris(uris) self.assertFalse( self.cloud.revoke_role( self.role_data.role_id, user=self.user_data.user_id, domain=self.domain_data.domain_id, ) ) self.assert_calls() def test_revoke_role_user_name_domain_not_exists(self): uris = self._get_mock_role_query_urls( self.role_data, domain_data=self.domain_data, user_data=self.user_data, use_role_name=True, use_user_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=404, ), ] ) self.register_uris(uris) self.assertFalse( self.cloud.revoke_role( self.role_data.role_name, user=self.user_data.name, domain=self.domain_data.domain_id, ) ) self.assert_calls() def test_revoke_role_group_id_domain(self): uris = self._get_mock_role_query_urls( self.role_data, domain_data=self.domain_data, group_data=self.group_data, use_role_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=204, ), dict( method='DELETE', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), status_code=200, ), ] ) self.register_uris(uris) self.assertTrue( self.cloud.revoke_role( self.role_data.role_name, group=self.group_data.group_id, domain=self.domain_data.domain_id, ) ) self.assert_calls() def test_revoke_role_group_name_domain(self): uris = self._get_mock_role_query_urls( self.role_data, domain_data=self.domain_data, group_data=self.group_data, use_role_name=True, use_group_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=204, ), dict( method='DELETE', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), status_code=200, ), ] ) self.register_uris(uris) self.assertTrue( self.cloud.revoke_role( self.role_data.role_name, group=self.group_data.group_name, domain=self.domain_data.domain_id, ) ) self.assert_calls() def test_revoke_role_group_id_domain_not_exists(self): uris = self._get_mock_role_query_urls( self.role_data, domain_data=self.domain_data, group_data=self.group_data, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=404, ), ] ) self.register_uris(uris) self.assertFalse( self.cloud.revoke_role( self.role_data.role_id, group=self.group_data.group_id, domain=self.domain_data.domain_id, ) ) self.assert_calls() def test_revoke_role_group_name_domain_not_exists(self): uris = self._get_mock_role_query_urls( self.role_data, domain_data=self.domain_data, group_data=self.group_data, use_role_name=True, use_group_name=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='domains', append=[ self.domain_data.domain_id, 'groups', self.group_data.group_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=404, ), ] ) self.register_uris(uris) self.assertFalse( self.cloud.revoke_role( self.role_data.role_name, group=self.group_data.group_name, domain=self.domain_data.domain_id, ) ) self.assert_calls() def test_grant_no_role(self): uris = self.__get( 'domain', self.domain_data, 'domain_name', [], use_name=True ) uris.extend( [ dict( method='GET', uri=self.get_mock_url( resource='roles', append=[self.role_data.role_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( resource='roles', qs_elements=[ 'name=' + self.role_data.role_name, ], ), status_code=200, json={'roles': []}, ), ] ) self.register_uris(uris) with testtools.ExpectedException( exceptions.SDKException, f'Role {self.role_data.role_name} not found', ): self.cloud.grant_role( self.role_data.role_name, group=self.group_data.group_name, domain=self.domain_data.domain_name, ) self.assert_calls() def test_revoke_no_role(self): uris = self.__get( 'domain', self.domain_data, 'domain_name', [], use_name=True ) uris.extend( [ dict( method='GET', uri=self.get_mock_url( resource='roles', append=[self.role_data.role_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( resource='roles', qs_elements=[ 'name=' + self.role_data.role_name, ], ), status_code=200, json={'roles': []}, ), ] ) self.register_uris(uris) with testtools.ExpectedException( exceptions.SDKException, f'Role {self.role_data.role_name} not found', ): self.cloud.revoke_role( self.role_data.role_name, group=self.group_data.group_name, domain=self.domain_data.domain_name, ) self.assert_calls() def test_grant_no_user_or_group_specified(self): uris = self.__get( 'role', self.role_data, 'role_name', [], use_name=True ) self.register_uris(uris) with testtools.ExpectedException( exceptions.SDKException, 'Must specify either a user or a group', ): self.cloud.grant_role(self.role_data.role_name) self.assert_calls() def test_revoke_no_user_or_group_specified(self): uris = self.__get( 'role', self.role_data, 'role_name', [], use_name=True ) self.register_uris(uris) with testtools.ExpectedException( exceptions.SDKException, 'Must specify either a user or a group', ): self.cloud.revoke_role(self.role_data.role_name) self.assert_calls() def test_grant_no_user_or_group(self): uris = self.__get( 'role', self.role_data, 'role_name', [], use_name=True ) uris.extend( self.__user_mocks(self.user_data, use_name=True, is_found=False) ) self.register_uris(uris) with testtools.ExpectedException( exceptions.SDKException, 'Must specify either a user or a group', ): self.cloud.grant_role( self.role_data.role_name, user=self.user_data.name ) self.assert_calls() def test_revoke_no_user_or_group(self): uris = self.__get( 'role', self.role_data, 'role_name', [], use_name=True ) uris.extend( self.__user_mocks(self.user_data, use_name=True, is_found=False) ) self.register_uris(uris) with testtools.ExpectedException( exceptions.SDKException, 'Must specify either a user or a group', ): self.cloud.revoke_role( self.role_data.role_name, user=self.user_data.name ) self.assert_calls() def test_grant_both_user_and_group(self): uris = self.__get( 'role', self.role_data, 'role_name', [], use_name=True ) uris.extend(self.__user_mocks(self.user_data, use_name=True)) uris.extend( self.__get( 'group', self.group_data, 'group_name', [], use_name=True ) ) self.register_uris(uris) with testtools.ExpectedException( exceptions.SDKException, 'Specify either a group or a user, not both', ): self.cloud.grant_role( self.role_data.role_name, user=self.user_data.name, group=self.group_data.group_name, ) self.assert_calls() def test_revoke_both_user_and_group(self): uris = self.__get( 'role', self.role_data, 'role_name', [], use_name=True ) uris.extend(self.__user_mocks(self.user_data, use_name=True)) uris.extend( self.__get( 'group', self.group_data, 'group_name', [], use_name=True ) ) self.register_uris(uris) with testtools.ExpectedException( exceptions.SDKException, 'Specify either a group or a user, not both', ): self.cloud.revoke_role( self.role_data.role_name, user=self.user_data.name, group=self.group_data.group_name, ) def test_grant_both_project_and_domain(self): uris = self._get_mock_role_query_urls( self.role_data, project_data=self.project_data, user_data=self.user_data, domain_data=self.domain_data, use_role_name=True, use_user_name=True, use_project_name=True, use_domain_name=True, use_domain_in_query=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=404, ), dict( method='PUT', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), status_code=200, ), ] ) self.register_uris(uris) self.assertTrue( self.cloud.grant_role( self.role_data.role_name, user=self.user_data.name, project=self.project_data.project_name, domain=self.domain_data.domain_name, ) ) def test_revoke_both_project_and_domain(self): uris = self._get_mock_role_query_urls( self.role_data, project_data=self.project_data, user_data=self.user_data, domain_data=self.domain_data, use_role_name=True, use_user_name=True, use_project_name=True, use_domain_name=True, use_domain_in_query=True, ) uris.extend( [ dict( method='HEAD', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), complete_qs=True, status_code=204, ), dict( method='DELETE', uri=self.get_mock_url( resource='projects', append=[ self.project_data.project_id, 'users', self.user_data.user_id, 'roles', self.role_data.role_id, ], ), status_code=200, ), ] ) self.register_uris(uris) self.assertTrue( self.cloud.revoke_role( self.role_data.role_name, user=self.user_data.name, project=self.project_data.project_name, domain=self.domain_data.domain_name, ) ) def test_grant_no_project_or_domain(self): uris = self._get_mock_role_query_urls( self.role_data, user_data=self.user_data, use_role_name=True, use_user_name=True, ) self.register_uris(uris) with testtools.ExpectedException( exceptions.SDKException, 'Must specify either a domain, project or system', ): self.cloud.grant_role( self.role_data.role_name, user=self.user_data.name ) self.assert_calls() def test_revoke_no_project_or_domain_or_system(self): uris = self._get_mock_role_query_urls( self.role_data, user_data=self.user_data, use_role_name=True, use_user_name=True, ) self.register_uris(uris) with testtools.ExpectedException( exceptions.SDKException, 'Must specify either a domain, project or system', ): self.cloud.revoke_role( self.role_data.role_name, user=self.user_data.name ) self.assert_calls() def test_grant_bad_domain_exception(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='domains', append=['baddomain'] ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( resource='domains', qs_elements=['name=baddomain'] ), status_code=404, ), ] ) with testtools.ExpectedException(exceptions.NotFoundException): self.cloud.grant_role( self.role_data.role_name, user=self.user_data.name, domain='baddomain', ) self.assert_calls() def test_revoke_bad_domain_exception(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( resource='domains', append=['baddomain'] ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( resource='domains', qs_elements=['name=baddomain'] ), status_code=404, ), ] ) with testtools.ExpectedException(exceptions.NotFoundException): self.cloud.revoke_role( self.role_data.role_name, user=self.user_data.name, domain='baddomain', ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_router.py0000664000175000017500000005054200000000000024404 0ustar00zuulzuul00000000000000# Copyright 2017 OVH SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import testtools from openstack import exceptions from openstack.network.v2 import port as _port from openstack.network.v2 import router as _router from openstack.tests.unit import base class TestRouter(base.TestCase): router_name = 'goofy' router_id = '57076620-dcfb-42ed-8ad6-79ccb4a79ed2' subnet_id = '1f1696eb-7f47-47f6-835c-4889bff88604' mock_router_rep = { 'admin_state_up': True, 'availability_zone_hints': [], 'availability_zones': [], 'description': '', 'distributed': False, 'external_gateway_info': None, 'flavor_id': None, 'ha': False, 'id': router_id, 'name': router_name, 'project_id': '861808a93da0484ea1767967c4df8a23', 'routes': [{"destination": "179.24.1.0/24", "nexthop": "172.24.3.99"}], 'status': 'ACTIVE', 'tenant_id': '861808a93da0484ea1767967c4df8a23', } mock_router_interface_rep = { 'network_id': '53aee281-b06d-47fc-9e1a-37f045182b8e', 'subnet_id': '1f1696eb-7f47-47f6-835c-4889bff88604', 'tenant_id': '861808a93da0484ea1767967c4df8a23', 'subnet_ids': [subnet_id], 'port_id': '23999891-78b3-4a6b-818d-d1b713f67848', 'id': '57076620-dcfb-42ed-8ad6-79ccb4a79ed2', 'request_ids': ['req-f1b0b1b4-ae51-4ef9-b371-0cc3c3402cf7'], } router_availability_zone_extension = { "alias": "router_availability_zone", "updated": "2015-01-01T10:00:00-00:00", "description": "Availability zone support for router.", "links": [], "name": "Router Availability Zone", } router_extraroute_extension = { "alias": "extraroute", "updated": "2015-01-01T10:00:00-00:00", "description": "extra routes extension for router.", "links": [], "name": "Extra Routes", } enabled_neutron_extensions = [ router_availability_zone_extension, router_extraroute_extension, ] def _compare_routers(self, exp, real): self.assertDictEqual( _router.Router(**exp).to_dict(computed=False), real.to_dict(computed=False), ) def test_get_router(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers', self.router_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers'], qs_elements=['name=%s' % self.router_name], ), json={'routers': [self.mock_router_rep]}, ), ] ) r = self.cloud.get_router(self.router_name) self.assertIsNotNone(r) self._compare_routers(self.mock_router_rep, r) self.assert_calls() def test_get_router_not_found(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers', 'mickey'], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers'], qs_elements=['name=mickey'], ), json={'routers': []}, ), ] ) r = self.cloud.get_router('mickey') self.assertIsNone(r) self.assert_calls() def test_create_router(self): self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers'] ), json={'router': self.mock_router_rep}, validate=dict( json={ 'router': { 'name': self.router_name, 'admin_state_up': True, } } ), ) ] ) new_router = self.cloud.create_router( name=self.router_name, admin_state_up=True ) self._compare_routers(self.mock_router_rep, new_router) self.assert_calls() def test_create_router_specific_tenant(self): new_router_tenant_id = "project_id_value" mock_router_rep = copy.copy(self.mock_router_rep) mock_router_rep['tenant_id'] = new_router_tenant_id mock_router_rep['project_id'] = new_router_tenant_id self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers'] ), json={'router': mock_router_rep}, validate=dict( json={ 'router': { 'name': self.router_name, 'admin_state_up': True, 'project_id': new_router_tenant_id, } } ), ) ] ) self.cloud.create_router( self.router_name, project_id=new_router_tenant_id ) self.assert_calls() def test_create_router_with_availability_zone_hints(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers'] ), json={'router': self.mock_router_rep}, validate=dict( json={ 'router': { 'name': self.router_name, 'admin_state_up': True, 'availability_zone_hints': ['nova'], } } ), ), ] ) self.cloud.create_router( name=self.router_name, admin_state_up=True, availability_zone_hints=['nova'], ) self.assert_calls() def test_create_router_without_enable_snat(self): """Do not send enable_snat when not given.""" self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers'] ), json={'router': self.mock_router_rep}, validate=dict( json={ 'router': { 'name': self.router_name, 'admin_state_up': True, } } ), ) ] ) self.cloud.create_router(name=self.router_name, admin_state_up=True) self.assert_calls() def test_create_router_with_enable_snat_True(self): """Send enable_snat when it is True.""" self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers'] ), json={'router': self.mock_router_rep}, validate=dict( json={ 'router': { 'name': self.router_name, 'admin_state_up': True, 'external_gateway_info': {'enable_snat': True}, } } ), ) ] ) self.cloud.create_router( name=self.router_name, admin_state_up=True, enable_snat=True ) self.assert_calls() def test_create_router_with_enable_snat_False(self): """Send enable_snat when it is False.""" self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers'] ), json={'router': self.mock_router_rep}, validate=dict( json={ 'router': { 'name': self.router_name, 'external_gateway_info': { 'enable_snat': False }, 'admin_state_up': True, } } ), ) ] ) self.cloud.create_router( name=self.router_name, admin_state_up=True, enable_snat=False ) self.assert_calls() def test_create_router_wrong_availability_zone_hints_type(self): azh_opts = "invalid" with testtools.ExpectedException( exceptions.SDKException, "Parameter 'availability_zone_hints' must be a list", ): self.cloud.create_router( name=self.router_name, admin_state_up=True, availability_zone_hints=azh_opts, ) def test_add_router_interface(self): self.register_uris( [ dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'routers', self.router_id, 'add_router_interface', ], ), json={'port': self.mock_router_interface_rep}, validate=dict(json={'subnet_id': self.subnet_id}), ) ] ) self.cloud.add_router_interface( {'id': self.router_id}, subnet_id=self.subnet_id ) self.assert_calls() def test_remove_router_interface(self): self.register_uris( [ dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'routers', self.router_id, 'remove_router_interface', ], ), json={'port': self.mock_router_interface_rep}, validate=dict(json={'subnet_id': self.subnet_id}), ) ] ) self.cloud.remove_router_interface( {'id': self.router_id}, subnet_id=self.subnet_id ) self.assert_calls() def test_remove_router_interface_missing_argument(self): self.assertRaises( ValueError, self.cloud.remove_router_interface, {'id': '123'} ) def test_update_router(self): new_router_name = "mickey" new_routes = [] expected_router_rep = copy.copy(self.mock_router_rep) expected_router_rep['name'] = new_router_name expected_router_rep['routes'] = new_routes # validate_calls() asserts that these requests are done in order, # but the extensions call is only called if a non-None value is # passed in 'routes' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'extensions'] ), json={'extensions': self.enabled_neutron_extensions}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers', self.router_id], ), json=self.mock_router_rep, ), dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers', self.router_id], ), json={'router': expected_router_rep}, validate=dict( json={ 'router': { 'name': new_router_name, 'routes': new_routes, } } ), ), ] ) new_router = self.cloud.update_router( self.router_id, name=new_router_name, routes=new_routes ) self._compare_routers(expected_router_rep, new_router) self.assert_calls() def test_delete_router(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers', self.router_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers'], qs_elements=['name=%s' % self.router_name], ), json={'routers': [self.mock_router_rep]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers', self.router_id], ), json={}, ), ] ) self.assertTrue(self.cloud.delete_router(self.router_name)) self.assert_calls() def test_delete_router_not_found(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers', self.router_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers'], qs_elements=['name=%s' % self.router_name], ), json={'routers': []}, ), ] ) self.assertFalse(self.cloud.delete_router(self.router_name)) self.assert_calls() def test_delete_router_multiple_found(self): router1 = dict(id='123', name='mickey') router2 = dict(id='456', name='mickey') self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers', 'mickey'], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'routers'], qs_elements=['name=mickey'], ), json={'routers': [router1, router2]}, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.delete_router, 'mickey' ) self.assert_calls() def _test_list_router_interfaces( self, router, interface_type, expected_result=None ): internal_ports = [ { 'id': 'internal_port_id', 'fixed_ips': [ { 'subnet_id': 'internal_subnet_id', 'ip_address': "10.0.0.1", } ], 'device_id': self.router_id, 'device_owner': device_owner, } for device_owner in [ 'network:router_interface', 'network:ha_router_replicated_interface', 'network:router_interface_distributed', ] ] external_ports = [ { 'id': 'external_port_id', 'fixed_ips': [ { 'subnet_id': 'external_subnet_id', 'ip_address': "1.2.3.4", } ], 'device_id': self.router_id, 'device_owner': 'network:router_gateway', } ] if expected_result is None: if interface_type == "internal": expected_result = internal_ports elif interface_type == "external": expected_result = external_ports else: expected_result = internal_ports + external_ports mock_uri = dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'ports'], qs_elements=["device_id=%s" % self.router_id], ), json={'ports': (internal_ports + external_ports)}, ) self.register_uris([mock_uri]) ret = self.cloud.list_router_interfaces(router, interface_type) self.assertEqual( [_port.Port(**i).to_dict(computed=False) for i in expected_result], [i.to_dict(computed=False) for i in ret], ) self.assert_calls() router = { 'id': router_id, 'external_gateway_info': { 'external_fixed_ips': [ {'subnet_id': 'external_subnet_id', 'ip_address': '1.2.3.4'} ] }, } def test_list_router_interfaces_all(self): self._test_list_router_interfaces(self.router, interface_type=None) def test_list_router_interfaces_internal(self): self._test_list_router_interfaces( self.router, interface_type="internal" ) def test_list_router_interfaces_external(self): self._test_list_router_interfaces( self.router, interface_type="external" ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_security_groups.py0000664000175000017500000012345700000000000026340 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import openstack.cloud from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base # TODO(mordred): Move id and name to using a getUniqueString() value neutron_grp_dict = fakes.make_fake_neutron_security_group( id='1', name='neutron-sec-group', description='Test Neutron security group', rules=[ dict( id='1', port_range_min=80, port_range_max=81, protocol='tcp', remote_ip_prefix='0.0.0.0/0', ) ], ) nova_grp_dict = fakes.make_fake_nova_security_group( id='2', name='nova-sec-group', description='Test Nova security group #1', rules=[ fakes.make_fake_nova_security_group_rule( id='2', from_port=8000, to_port=8001, ip_protocol='tcp', cidr='0.0.0.0/0', ), ], ) class TestSecurityGroups(base.TestCase): def setUp(self): super().setUp() self.has_neutron = True def fake_has_service(*args, **kwargs): return self.has_neutron self.cloud.has_service = fake_has_service def test_list_security_groups_neutron(self): project_id = 42 self.cloud.secgroup_source = 'neutron' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'security-groups'], qs_elements=["project_id=%s" % project_id], ), json={'security_groups': [neutron_grp_dict]}, ) ] ) self.cloud.list_security_groups(filters={'project_id': project_id}) self.assert_calls() def test_list_security_groups_nova(self): self.register_uris( [ dict( method='GET', uri='{endpoint}/os-security-groups?project_id=42'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_groups': []}, ), ] ) self.cloud.secgroup_source = 'nova' self.has_neutron = False self.cloud.list_security_groups(filters={'project_id': 42}) self.assert_calls() def test_list_security_groups_none(self): self.cloud.secgroup_source = None self.has_neutron = False self.assertRaises( openstack.cloud.OpenStackCloudUnavailableFeature, self.cloud.list_security_groups, ) def test_delete_security_group_neutron(self): sg_id = neutron_grp_dict['id'] self.cloud.secgroup_source = 'neutron' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'security-groups'] ), json={'security_groups': [neutron_grp_dict]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'security-groups', '%s' % sg_id], ), status_code=200, json={}, ), ] ) self.assertTrue(self.cloud.delete_security_group('1')) self.assert_calls() def test_delete_security_group_nova(self): self.cloud.secgroup_source = 'nova' self.has_neutron = False nova_return = [nova_grp_dict] self.register_uris( [ dict( method='GET', uri='{endpoint}/os-security-groups'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_groups': nova_return}, ), dict( method='DELETE', uri='{endpoint}/os-security-groups/2'.format( endpoint=fakes.COMPUTE_ENDPOINT ), ), ] ) self.cloud.delete_security_group('2') self.assert_calls() def test_delete_security_group_neutron_not_found(self): self.cloud.secgroup_source = 'neutron' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'security-groups'] ), json={'security_groups': [neutron_grp_dict]}, ) ] ) self.assertFalse(self.cloud.delete_security_group('10')) self.assert_calls() def test_delete_security_group_nova_not_found(self): self.cloud.secgroup_source = 'nova' self.has_neutron = False nova_return = [nova_grp_dict] self.register_uris( [ dict( method='GET', uri='{endpoint}/os-security-groups'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_groups': nova_return}, ), ] ) self.assertFalse(self.cloud.delete_security_group('doesNotExist')) def test_delete_security_group_none(self): self.cloud.secgroup_source = None self.assertRaises( openstack.cloud.OpenStackCloudUnavailableFeature, self.cloud.delete_security_group, 'doesNotExist', ) def test_create_security_group_neutron(self): self.cloud.secgroup_source = 'neutron' group_name = self.getUniqueString() group_desc = self.getUniqueString('description') new_group = fakes.make_fake_neutron_security_group( id='2', name=group_name, description=group_desc, rules=[] ) self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'security-groups'] ), json={'security_group': new_group}, validate=dict( json={ 'security_group': { 'name': group_name, 'description': group_desc, } } ), ) ] ) r = self.cloud.create_security_group(group_name, group_desc) self.assertEqual(group_name, r['name']) self.assertEqual(group_desc, r['description']) self.assertEqual(True, r['stateful']) self.assert_calls() def test_create_security_group_neutron_specific_tenant(self): self.cloud.secgroup_source = 'neutron' project_id = "861808a93da0484ea1767967c4df8a23" group_name = self.getUniqueString() group_desc = ( 'security group from' ' test_create_security_group_neutron_specific_tenant' ) new_group = fakes.make_fake_neutron_security_group( id='2', name=group_name, description=group_desc, project_id=project_id, rules=[], ) self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'security-groups'] ), json={'security_group': new_group}, validate=dict( json={ 'security_group': { 'name': group_name, 'description': group_desc, 'tenant_id': project_id, } } ), ) ] ) r = self.cloud.create_security_group( group_name, group_desc, project_id ) self.assertEqual(group_name, r['name']) self.assertEqual(group_desc, r['description']) self.assertEqual(project_id, r['tenant_id']) self.assert_calls() def test_create_security_group_stateless_neutron(self): self.cloud.secgroup_source = 'neutron' group_name = self.getUniqueString() group_desc = self.getUniqueString('description') new_group = fakes.make_fake_neutron_security_group( id='2', name=group_name, description=group_desc, stateful=False, rules=[], ) self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'security-groups'] ), json={'security_group': new_group}, validate=dict( json={ 'security_group': { 'name': group_name, 'description': group_desc, 'stateful': False, } } ), ) ] ) r = self.cloud.create_security_group( group_name, group_desc, stateful=False ) self.assertEqual(group_name, r['name']) self.assertEqual(group_desc, r['description']) self.assertEqual(False, r['stateful']) self.assert_calls() def test_create_security_group_nova(self): group_name = self.getUniqueString() self.has_neutron = False group_desc = self.getUniqueString('description') new_group = fakes.make_fake_nova_security_group( id='2', name=group_name, description=group_desc, rules=[] ) self.register_uris( [ dict( method='POST', uri='{endpoint}/os-security-groups'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_group': new_group}, validate=dict( json={ 'security_group': { 'name': group_name, 'description': group_desc, } } ), ), ] ) self.cloud.secgroup_source = 'nova' r = self.cloud.create_security_group(group_name, group_desc) self.assertEqual(group_name, r['name']) self.assertEqual(group_desc, r['description']) self.assert_calls() def test_create_security_group_none(self): self.cloud.secgroup_source = None self.has_neutron = False self.assertRaises( openstack.cloud.OpenStackCloudUnavailableFeature, self.cloud.create_security_group, '', '', ) def test_update_security_group_neutron(self): self.cloud.secgroup_source = 'neutron' new_name = self.getUniqueString() sg_id = neutron_grp_dict['id'] update_return = neutron_grp_dict.copy() update_return['name'] = new_name update_return['stateful'] = False self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'security-groups'] ), json={'security_groups': [neutron_grp_dict]}, ), dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'security-groups', '%s' % sg_id], ), json={'security_group': update_return}, validate=dict( json={ 'security_group': { 'name': new_name, 'stateful': False, } } ), ), ] ) r = self.cloud.update_security_group( sg_id, name=new_name, stateful=False ) self.assertEqual(r['name'], new_name) self.assertEqual(r['stateful'], False) self.assert_calls() def test_update_security_group_nova(self): self.has_neutron = False new_name = self.getUniqueString() self.cloud.secgroup_source = 'nova' nova_return = [nova_grp_dict] update_return = nova_grp_dict.copy() update_return['name'] = new_name self.register_uris( [ dict( method='GET', uri='{endpoint}/os-security-groups'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_groups': nova_return}, ), dict( method='PUT', uri='{endpoint}/os-security-groups/2'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_group': update_return}, ), ] ) r = self.cloud.update_security_group( nova_grp_dict['id'], name=new_name ) self.assertEqual(r['name'], new_name) self.assert_calls() def test_update_security_group_bad_kwarg(self): self.assertRaises( TypeError, self.cloud.update_security_group, 'doesNotExist', bad_arg='', ) def test_create_security_group_rule_neutron(self): self.cloud.secgroup_source = 'neutron' args = dict( port_range_min=-1, port_range_max=40000, protocol='tcp', remote_ip_prefix='0.0.0.0/0', remote_group_id='456', remote_address_group_id='1234-5678', direction='egress', ethertype='IPv6', ) expected_args = copy.copy(args) # For neutron, -1 port should be converted to None expected_args['port_range_min'] = None expected_args['security_group_id'] = neutron_grp_dict['id'] expected_new_rule = copy.copy(expected_args) expected_new_rule['id'] = '1234' expected_new_rule['tenant_id'] = None expected_new_rule['project_id'] = expected_new_rule['tenant_id'] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'security-groups'] ), json={'security_groups': [neutron_grp_dict]}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'security-group-rules'], ), json={'security_group_rule': expected_new_rule}, validate=dict(json={'security_group_rule': expected_args}), ), ] ) new_rule = self.cloud.create_security_group_rule( secgroup_name_or_id=neutron_grp_dict['id'], **args ).to_dict(original_names=True) # NOTE(gtema): don't check location and not relevant properties # in new rule new_rule.pop('created_at') new_rule.pop('description') new_rule.pop('location') new_rule.pop('name') new_rule.pop('revision_number') new_rule.pop('tags') new_rule.pop('updated_at') self.assertEqual(expected_new_rule, new_rule) self.assert_calls() def test_create_security_group_rule_neutron_specific_tenant(self): self.cloud.secgroup_source = 'neutron' args = dict( port_range_min=-1, port_range_max=40000, protocol='tcp', remote_ip_prefix='0.0.0.0/0', remote_group_id='456', remote_address_group_id=None, direction='egress', ethertype='IPv6', project_id='861808a93da0484ea1767967c4df8a23', ) expected_args = copy.copy(args) # For neutron, -1 port should be converted to None expected_args['port_range_min'] = None expected_args['security_group_id'] = neutron_grp_dict['id'] expected_args['tenant_id'] = expected_args['project_id'] expected_args.pop('project_id') expected_new_rule = copy.copy(expected_args) expected_new_rule['id'] = '1234' expected_new_rule['project_id'] = expected_new_rule['tenant_id'] # This is not sent in body if == None so should not be in the # JSON; see SecurityGroupRule where it is removed. expected_args.pop('remote_address_group_id') self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'security-groups'] ), json={'security_groups': [neutron_grp_dict]}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'security-group-rules'], ), json={'security_group_rule': expected_new_rule}, validate=dict(json={'security_group_rule': expected_args}), ), ] ) new_rule = self.cloud.create_security_group_rule( secgroup_name_or_id=neutron_grp_dict['id'], **args ).to_dict(original_names=True) # NOTE(slaweq): don't check location and properties in new rule new_rule.pop('created_at') new_rule.pop('description') new_rule.pop('location') new_rule.pop('name') new_rule.pop('revision_number') new_rule.pop('tags') new_rule.pop('updated_at') self.assertEqual(expected_new_rule, new_rule) self.assert_calls() def test_create_security_group_rule_nova(self): self.has_neutron = False self.cloud.secgroup_source = 'nova' nova_return = [nova_grp_dict] new_rule = fakes.make_fake_nova_security_group_rule( id='xyz', from_port=1, to_port=2000, ip_protocol='tcp', cidr='1.2.3.4/32', ) self.register_uris( [ dict( method='GET', uri='{endpoint}/os-security-groups'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_groups': nova_return}, ), dict( method='POST', uri='{endpoint}/os-security-group-rules'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_group_rule': new_rule}, validate=dict( json={ "security_group_rule": { "from_port": 1, "ip_protocol": "tcp", "to_port": 2000, "parent_group_id": "2", "cidr": "1.2.3.4/32", "group_id": "123", } } ), ), ] ) self.cloud.create_security_group_rule( '2', port_range_min=1, port_range_max=2000, protocol='tcp', remote_ip_prefix='1.2.3.4/32', remote_group_id='123', ) self.assert_calls() def test_create_security_group_rule_nova_no_ports(self): self.has_neutron = False self.cloud.secgroup_source = 'nova' new_rule = fakes.make_fake_nova_security_group_rule( id='xyz', from_port=1, to_port=65535, ip_protocol='tcp', cidr='1.2.3.4/32', ) nova_return = [nova_grp_dict] self.register_uris( [ dict( method='GET', uri='{endpoint}/os-security-groups'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_groups': nova_return}, ), dict( method='POST', uri='{endpoint}/os-security-group-rules'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_group_rule': new_rule}, validate=dict( json={ "security_group_rule": { "from_port": 1, "ip_protocol": "tcp", "to_port": 65535, "parent_group_id": "2", "cidr": "1.2.3.4/32", "group_id": "123", } } ), ), ] ) self.cloud.create_security_group_rule( '2', protocol='tcp', remote_ip_prefix='1.2.3.4/32', remote_group_id='123', ) self.assert_calls() def test_create_security_group_rule_none(self): self.has_neutron = False self.cloud.secgroup_source = None self.assertRaises( openstack.cloud.OpenStackCloudUnavailableFeature, self.cloud.create_security_group_rule, '', ) def test_delete_security_group_rule_neutron(self): rule_id = "xyz" self.cloud.secgroup_source = 'neutron' self.register_uris( [ dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'security-group-rules', '%s' % rule_id, ], ), json={}, ) ] ) self.assertTrue(self.cloud.delete_security_group_rule(rule_id)) self.assert_calls() def test_delete_security_group_rule_nova(self): self.has_neutron = False self.cloud.secgroup_source = 'nova' self.register_uris( [ dict( method='DELETE', uri='{endpoint}/os-security-group-rules/xyz'.format( endpoint=fakes.COMPUTE_ENDPOINT ), ), ] ) r = self.cloud.delete_security_group_rule('xyz') self.assertTrue(r) self.assert_calls() def test_delete_security_group_rule_none(self): self.has_neutron = False self.cloud.secgroup_source = None self.assertRaises( openstack.cloud.OpenStackCloudUnavailableFeature, self.cloud.delete_security_group_rule, '', ) def test_delete_security_group_rule_not_found(self): rule_id = "doesNotExist" self.cloud.secgroup_source = 'neutron' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'security-groups'] ), json={'security_groups': [neutron_grp_dict]}, ) ] ) self.assertFalse(self.cloud.delete_security_group(rule_id)) self.assert_calls() def test_delete_security_group_rule_not_found_nova(self): self.has_neutron = False self.cloud.secgroup_source = 'nova' self.register_uris( [ dict( method='GET', uri='{endpoint}/os-security-groups'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_groups': [nova_grp_dict]}, ), ] ) r = self.cloud.delete_security_group('doesNotExist') self.assertFalse(r) self.assert_calls() def test_nova_egress_security_group_rule(self): self.has_neutron = False self.cloud.secgroup_source = 'nova' self.register_uris( [ dict( method='GET', uri='{endpoint}/os-security-groups'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_groups': [nova_grp_dict]}, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.create_security_group_rule, secgroup_name_or_id='nova-sec-group', direction='egress', ) self.assert_calls() def test_list_server_security_groups_nova(self): self.has_neutron = False server = fakes.make_fake_server('1234', 'server-name', 'ACTIVE') self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', server['id']] ), json=server, ), dict( method='GET', uri='{endpoint}/servers/{id}/os-security-groups'.format( endpoint=fakes.COMPUTE_ENDPOINT, id=server['id'] ), json={'security_groups': [nova_grp_dict]}, ), ] ) groups = self.cloud.list_server_security_groups(server) self.assertEqual( groups[0]['rules'][0]['ip_range']['cidr'], nova_grp_dict['rules'][0]['ip_range']['cidr'], ) self.assert_calls() def test_list_server_security_groups_bad_source(self): self.has_neutron = False self.cloud.secgroup_source = 'invalid' server = dict(id='server_id') ret = self.cloud.list_server_security_groups(server) self.assertEqual([], ret) def test_add_security_group_to_server_nova(self): self.has_neutron = False self.cloud.secgroup_source = 'nova' self.register_uris( [ dict( method='GET', uri='{endpoint}/os-security-groups'.format( endpoint=fakes.COMPUTE_ENDPOINT, ), json={'security_groups': [nova_grp_dict]}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', uri='%s/servers/%s/action' % (fakes.COMPUTE_ENDPOINT, '1234'), validate=dict( json={'addSecurityGroup': {'name': 'nova-sec-group'}} ), status_code=202, ), ] ) ret = self.cloud.add_server_security_groups( dict(id='1234'), 'nova-sec-group' ) self.assertTrue(ret) self.assert_calls() def test_add_security_group_to_server_neutron(self): # fake to get server by name, server-name must match fake_server = fakes.make_fake_server('1234', 'server-name', 'ACTIVE') # use neutron for secgroup list and return an existing fake self.cloud.secgroup_source = 'neutron' self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': [fake_server]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'security-groups'] ), json={'security_groups': [neutron_grp_dict]}, ), dict( method='POST', uri='%s/servers/%s/action' % (fakes.COMPUTE_ENDPOINT, '1234'), validate=dict( json={ 'addSecurityGroup': {'name': 'neutron-sec-group'} } ), status_code=202, ), ] ) self.assertTrue( self.cloud.add_server_security_groups( 'server-name', 'neutron-sec-group' ) ) self.assert_calls() def test_remove_security_group_from_server_nova(self): self.has_neutron = False self.cloud.secgroup_source = 'nova' self.register_uris( [ dict( method='GET', uri='{endpoint}/os-security-groups'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_groups': [nova_grp_dict]}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', uri='%s/servers/%s/action' % (fakes.COMPUTE_ENDPOINT, '1234'), validate=dict( json={ 'removeSecurityGroup': {'name': 'nova-sec-group'} } ), ), ] ) ret = self.cloud.remove_server_security_groups( dict(id='1234'), 'nova-sec-group' ) self.assertTrue(ret) self.assert_calls() def test_remove_security_group_from_server_neutron(self): # fake to get server by name, server-name must match fake_server = fakes.make_fake_server('1234', 'server-name', 'ACTIVE') # use neutron for secgroup list and return an existing fake self.cloud.secgroup_source = 'neutron' validate = {'removeSecurityGroup': {'name': 'neutron-sec-group'}} self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': [fake_server]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'security-groups'] ), json={'security_groups': [neutron_grp_dict]}, ), dict( method='POST', uri='%s/servers/%s/action' % (fakes.COMPUTE_ENDPOINT, '1234'), validate=dict(json=validate), ), ] ) self.assertTrue( self.cloud.remove_server_security_groups( 'server-name', 'neutron-sec-group' ) ) self.assert_calls() def test_add_bad_security_group_to_server_nova(self): # fake to get server by name, server-name must match fake_server = fakes.make_fake_server('1234', 'server-name', 'ACTIVE') # use nova for secgroup list and return an existing fake self.has_neutron = False self.cloud.secgroup_source = 'nova' self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri='{endpoint}/servers/detail'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'servers': [fake_server]}, ), dict( method='GET', uri='{endpoint}/os-security-groups'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'security_groups': [nova_grp_dict]}, ), ] ) ret = self.cloud.add_server_security_groups( 'server-name', 'unknown-sec-group' ) self.assertFalse(ret) self.assert_calls() def test_add_bad_security_group_to_server_neutron(self): # fake to get server by name, server-name must match fake_server = fakes.make_fake_server('1234', 'server-name', 'ACTIVE') # use neutron for secgroup list and return an existing fake self.cloud.secgroup_source = 'neutron' self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': [fake_server]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'security-groups'] ), json={'security_groups': [neutron_grp_dict]}, ), ] ) self.assertFalse( self.cloud.add_server_security_groups( 'server-name', 'unknown-sec-group' ) ) self.assert_calls() def test_add_security_group_to_bad_server(self): # fake to get server by name, server-name must match fake_server = fakes.make_fake_server('1234', 'server-name', 'ACTIVE') self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri='{endpoint}/servers/detail'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={'servers': [fake_server]}, ), ] ) ret = self.cloud.add_server_security_groups( 'unknown-server-name', 'nova-sec-group' ) self.assertFalse(ret) self.assert_calls() def test_get_security_group_by_id_neutron(self): self.cloud.secgroup_source = 'neutron' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'security-groups', neutron_grp_dict['id'], ], ), json={'security_group': neutron_grp_dict}, ) ] ) ret_sg = self.cloud.get_security_group_by_id(neutron_grp_dict['id']) self.assertEqual(neutron_grp_dict['id'], ret_sg['id']) self.assertEqual(neutron_grp_dict['name'], ret_sg['name']) self.assertEqual( neutron_grp_dict['description'], ret_sg['description'] ) self.assertEqual(neutron_grp_dict['stateful'], ret_sg['stateful']) self.assert_calls() def test_get_security_group_by_id_nova(self): self.register_uris( [ dict( method='GET', uri='{endpoint}/os-security-groups/{id}'.format( endpoint=fakes.COMPUTE_ENDPOINT, id=nova_grp_dict['id'] ), json={'security_group': nova_grp_dict}, ), ] ) self.cloud.secgroup_source = 'nova' self.has_neutron = False ret_sg = self.cloud.get_security_group_by_id(nova_grp_dict['id']) self.assertEqual(nova_grp_dict['id'], ret_sg['id']) self.assertEqual(nova_grp_dict['name'], ret_sg['name']) self.assert_calls() def test_normalize_secgroups(self): nova_secgroup = dict( id='abc123', name='nova_secgroup', description='A Nova security group', rules=[ dict( id='123', from_port=80, to_port=81, ip_protocol='tcp', ip_range={'cidr': '0.0.0.0/0'}, parent_group_id='xyz123', ) ], ) expected = dict( id='abc123', name='nova_secgroup', description='A Nova security group', project_id='', tenant_id='', properties={}, location=dict( region_name='RegionOne', zone=None, project=dict( domain_name='default', id='1c36b64c840a42cd9e9b931a369337f0', domain_id=None, name='admin', ), cloud='_test_cloud_', ), security_group_rules=[ dict( id='123', direction='ingress', ethertype='IPv4', port_range_min=80, port_range_max=81, protocol='tcp', remote_ip_prefix='0.0.0.0/0', security_group_id='xyz123', project_id='', tenant_id='', properties={}, remote_group_id=None, location=dict( region_name='RegionOne', zone=None, project=dict( domain_name='default', id='1c36b64c840a42cd9e9b931a369337f0', domain_id=None, name='admin', ), cloud='_test_cloud_', ), ) ], ) # Set secgroup source to nova for this test as stateful parameter # is only valid for neutron security groups. self.cloud.secgroup_source = 'nova' retval = self.cloud._normalize_secgroup(nova_secgroup) self.cloud.secgroup_source = 'neutron' self.assertEqual(expected, retval) def test_normalize_secgroups_negone_port(self): nova_secgroup = dict( id='abc123', name='nova_secgroup', description='A Nova security group with -1 ports', rules=[ dict( id='123', from_port=-1, to_port=-1, ip_protocol='icmp', ip_range={'cidr': '0.0.0.0/0'}, parent_group_id='xyz123', ) ], ) retval = self.cloud._normalize_secgroup(nova_secgroup) self.assertIsNone(retval['security_group_rules'][0]['port_range_min']) self.assertIsNone(retval['security_group_rules'][0]['port_range_max']) def test_normalize_secgroup_rules(self): nova_rules = [ dict( id='123', from_port=80, to_port=81, ip_protocol='tcp', ip_range={'cidr': '0.0.0.0/0'}, parent_group_id='xyz123', ) ] expected = [ dict( id='123', direction='ingress', ethertype='IPv4', port_range_min=80, port_range_max=81, protocol='tcp', remote_ip_prefix='0.0.0.0/0', security_group_id='xyz123', tenant_id='', project_id='', remote_group_id=None, properties={}, location=dict( region_name='RegionOne', zone=None, project=dict( domain_name='default', id='1c36b64c840a42cd9e9b931a369337f0', domain_id=None, name='admin', ), cloud='_test_cloud_', ), ) ] retval = self.cloud._normalize_secgroup_rules(nova_rules) self.assertEqual(expected, retval) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_server_console.py0000664000175000017500000000621100000000000026106 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.tests import fakes from openstack.tests.unit import base class TestServerConsole(base.TestCase): def setUp(self): super().setUp() self.server_id = str(uuid.uuid4()) self.server_name = self.getUniqueString('name') self.server = fakes.make_fake_server( server_id=self.server_id, name=self.server_name ) self.output = self.getUniqueString('output') def test_get_server_console_dict(self): self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri='{endpoint}/servers/{id}/action'.format( endpoint=fakes.COMPUTE_ENDPOINT, id=self.server_id ), json={"output": self.output}, validate=dict(json={'os-getConsoleOutput': {'length': 5}}), ), ] ) self.assertEqual( self.output, self.cloud.get_server_console(self.server, 5) ) self.assert_calls() def test_get_server_console_name_or_id(self): self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri='{endpoint}/servers/detail'.format( endpoint=fakes.COMPUTE_ENDPOINT ), json={"servers": [self.server]}, ), dict( method='POST', uri='{endpoint}/servers/{id}/action'.format( endpoint=fakes.COMPUTE_ENDPOINT, id=self.server_id ), json={"output": self.output}, validate=dict(json={'os-getConsoleOutput': {}}), ), ] ) self.assertEqual( self.output, self.cloud.get_server_console(self.server['id']) ) self.assert_calls() def test_get_server_console_no_console(self): self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri='{endpoint}/servers/{id}/action'.format( endpoint=fakes.COMPUTE_ENDPOINT, id=self.server_id ), status_code=400, validate=dict(json={'os-getConsoleOutput': {}}), ), ] ) self.assertEqual('', self.cloud.get_server_console(self.server)) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_server_delete_metadata.py0000664000175000017500000000635500000000000027557 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_server_delete_metadata ---------------------------------- Tests for the `delete_server_metadata` command. """ import uuid from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base class TestServerDeleteMetadata(base.TestCase): def setUp(self): super().setUp() self.server_id = str(uuid.uuid4()) self.server_name = self.getUniqueString('name') self.fake_server = fakes.make_fake_server( self.server_id, self.server_name ) def test_server_delete_metadata_with_exception(self): """ Test that a missing metadata throws an exception. """ self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': [self.fake_server]}, ), dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=[ 'servers', self.fake_server['id'], 'metadata', 'key', ], ), status_code=404, ), ] ) self.assertRaises( exceptions.NotFoundException, self.cloud.delete_server_metadata, self.server_name, ['key'], ) self.assert_calls() def test_server_delete_metadata(self): self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': [self.fake_server]}, ), dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=[ 'servers', self.fake_server['id'], 'metadata', 'key', ], ), status_code=200, ), ] ) self.cloud.delete_server_metadata(self.server_id, ['key']) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_server_group.py0000664000175000017500000000527000000000000025604 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.tests import fakes from openstack.tests.unit import base class TestServerGroup(base.TestCase): def setUp(self): super().setUp() self.group_id = uuid.uuid4().hex self.group_name = self.getUniqueString('server-group') self.policies = ['affinity'] self.fake_group = fakes.make_fake_server_group( self.group_id, self.group_name, self.policies ) def test_create_server_group(self): self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['os-server-groups'] ), json={'server_group': self.fake_group}, validate=dict( json={ 'server_group': { 'name': self.group_name, 'policies': self.policies, } } ), ), ] ) self.cloud.create_server_group( name=self.group_name, policies=self.policies ) self.assert_calls() def test_delete_server_group(self): self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['os-server-groups'] ), json={'server_groups': [self.fake_group]}, ), dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=['os-server-groups', self.group_id], ), json={'server_groups': [self.fake_group]}, ), ] ) self.assertTrue(self.cloud.delete_server_group(self.group_name)) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_server_set_metadata.py0000664000175000017500000000614400000000000027104 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_server_set_metadata ---------------------------------- Tests for the `set_server_metadata` command. """ import uuid from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base class TestServerSetMetadata(base.TestCase): def setUp(self): super().setUp() self.server_id = str(uuid.uuid4()) self.server_name = self.getUniqueString('name') self.fake_server = fakes.make_fake_server( self.server_id, self.server_name ) def test_server_set_metadata_with_exception(self): self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': [self.fake_server]}, ), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.fake_server['id'], 'metadata'], ), validate=dict(json={'metadata': {'meta': 'data'}}), json={}, status_code=400, ), ] ) self.assertRaises( exceptions.BadRequestException, self.cloud.set_server_metadata, self.server_name, {'meta': 'data'}, ) self.assert_calls() def test_server_set_metadata(self): metadata = {'meta': 'data'} self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': [self.fake_server]}, ), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.fake_server['id'], 'metadata'], ), validate=dict(json={'metadata': metadata}), status_code=200, json={'metadata': metadata}, ), ] ) self.cloud.set_server_metadata(self.server_id, metadata) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_services.py0000664000175000017500000002662100000000000024710 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test_cloud_services ---------------------------------- Tests Keystone services commands. """ from testtools import matchers from openstack import exceptions from openstack.tests.unit import base class CloudServices(base.TestCase): def setUp(self, cloud_config_fixture='clouds.yaml'): super().setUp(cloud_config_fixture) def get_mock_url( self, service_type='identity', interface='public', resource='services', append=None, base_url_append='v3', ): return super().get_mock_url( service_type, interface, resource, append, base_url_append ) def test_create_service_v3(self): service_data = self._get_service_data( name='a service', type='network', description='A test service' ) self.register_uris( [ dict( method='POST', uri=self.get_mock_url(), status_code=200, json=service_data.json_response_v3, validate=dict(json={'service': service_data.json_request}), ) ] ) service = self.cloud.create_service( name=service_data.service_name, service_type=service_data.service_type, description=service_data.description, ) self.assertThat( service.name, matchers.Equals(service_data.service_name) ) self.assertThat(service.id, matchers.Equals(service_data.service_id)) self.assertThat( service.description, matchers.Equals(service_data.description) ) self.assertThat( service.type, matchers.Equals(service_data.service_type) ) self.assert_calls() def test_update_service_v3(self): service_data = self._get_service_data( name='a service', type='network', description='A test service' ) request = service_data.json_request.copy() request['enabled'] = False resp = service_data.json_response_v3.copy() resp['enabled'] = False request.pop('description') request.pop('name') request.pop('type') self.register_uris( [ dict( method='GET', uri=self.get_mock_url(), status_code=200, json={'services': [resp['service']]}, ), dict( method='PATCH', uri=self.get_mock_url(append=[service_data.service_id]), status_code=200, json=resp, validate=dict(json={'service': request}), ), ] ) service = self.cloud.update_service( service_data.service_id, enabled=False ) self.assertThat( service.name, matchers.Equals(service_data.service_name) ) self.assertThat(service.id, matchers.Equals(service_data.service_id)) self.assertThat( service.description, matchers.Equals(service_data.description) ) self.assertThat( service.type, matchers.Equals(service_data.service_type) ) self.assert_calls() def test_list_services(self): service_data = self._get_service_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url(), status_code=200, json={ 'services': [service_data.json_response_v3['service']] }, ) ] ) services = self.cloud.list_services() self.assertThat(len(services), matchers.Equals(1)) self.assertThat( services[0].id, matchers.Equals(service_data.service_id) ) self.assertThat( services[0].name, matchers.Equals(service_data.service_name) ) self.assertThat( services[0].type, matchers.Equals(service_data.service_type) ) self.assert_calls() def test_get_service(self): service_data = self._get_service_data() service2_data = self._get_service_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url(), status_code=200, json={ 'services': [ service_data.json_response_v3['service'], service2_data.json_response_v3['service'], ] }, ), dict( method='GET', uri=self.get_mock_url(), status_code=200, json={ 'services': [ service_data.json_response_v3['service'], service2_data.json_response_v3['service'], ] }, ), dict( method='GET', uri=self.get_mock_url(), status_code=200, json={ 'services': [ service_data.json_response_v3['service'], service2_data.json_response_v3['service'], ] }, ), dict(method='GET', uri=self.get_mock_url(), status_code=400), ] ) # Search by id service = self.cloud.get_service(name_or_id=service_data.service_id) self.assertThat(service.id, matchers.Equals(service_data.service_id)) # Search by name service = self.cloud.get_service(name_or_id=service_data.service_name) # test we are getting exactly 1 element self.assertThat(service.id, matchers.Equals(service_data.service_id)) # Not found service = self.cloud.get_service(name_or_id='INVALID SERVICE') self.assertIs(None, service) # Multiple matches # test we are getting an Exception self.assertRaises( exceptions.SDKException, self.cloud.get_service, name_or_id=None, filters={'type': 'type2'}, ) self.assert_calls() def test_search_services(self): service_data = self._get_service_data() service2_data = self._get_service_data(type=service_data.service_type) self.register_uris( [ dict( method='GET', uri=self.get_mock_url(), status_code=200, json={ 'services': [ service_data.json_response_v3['service'], service2_data.json_response_v3['service'], ] }, ), dict( method='GET', uri=self.get_mock_url(), status_code=200, json={ 'services': [ service_data.json_response_v3['service'], service2_data.json_response_v3['service'], ] }, ), dict( method='GET', uri=self.get_mock_url(), status_code=200, json={ 'services': [ service_data.json_response_v3['service'], service2_data.json_response_v3['service'], ] }, ), dict( method='GET', uri=self.get_mock_url(), status_code=200, json={ 'services': [ service_data.json_response_v3['service'], service2_data.json_response_v3['service'], ] }, ), ] ) # Search by id services = self.cloud.search_services( name_or_id=service_data.service_id ) # test we are getting exactly 1 element self.assertThat(len(services), matchers.Equals(1)) self.assertThat( services[0].id, matchers.Equals(service_data.service_id) ) # Search by name services = self.cloud.search_services( name_or_id=service_data.service_name ) # test we are getting exactly 1 element self.assertThat(len(services), matchers.Equals(1)) self.assertThat( services[0].name, matchers.Equals(service_data.service_name) ) # Not found services = self.cloud.search_services(name_or_id='!INVALID!') self.assertThat(len(services), matchers.Equals(0)) # Multiple matches services = self.cloud.search_services( filters={'type': service_data.service_type} ) # test we are getting exactly 2 elements self.assertThat(len(services), matchers.Equals(2)) self.assertThat( services[0].id, matchers.Equals(service_data.service_id) ) self.assertThat( services[1].id, matchers.Equals(service2_data.service_id) ) self.assert_calls() def test_delete_service(self): service_data = self._get_service_data() self.register_uris( [ dict( method='GET', uri=self.get_mock_url(), status_code=200, json={ 'services': [service_data.json_response_v3['service']] }, ), dict( method='DELETE', uri=self.get_mock_url(append=[service_data.service_id]), status_code=204, ), dict( method='GET', uri=self.get_mock_url(), status_code=200, json={ 'services': [service_data.json_response_v3['service']] }, ), dict( method='DELETE', uri=self.get_mock_url(append=[service_data.service_id]), status_code=204, ), ] ) # Delete by name self.cloud.delete_service(name_or_id=service_data.service_name) # Delete by id self.cloud.delete_service(service_data.service_id) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_shared_file_system.py0000664000175000017500000000334500000000000026734 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.tests.unit import base IDENTIFIER = str(uuid.uuid4()) MANILA_AZ_DICT = { "id": IDENTIFIER, "name": "manila-zone-0", "created_at": "2021-01-21T20:13:55.000000", "updated_at": None, } class TestSharedFileSystem(base.TestCase): def setUp(self): super().setUp() self.use_manila() def test_list_availability_zones(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'shared-file-system', 'public', append=['v2', 'availability-zones'], ), json={'availability_zones': [MANILA_AZ_DICT]}, ), ] ) az_list = self.cloud.list_share_availability_zones() self.assertEqual(len(az_list), 1) self.assertEqual(MANILA_AZ_DICT['id'], az_list[0].id) self.assertEqual(MANILA_AZ_DICT['name'], az_list[0].name) self.assertEqual(MANILA_AZ_DICT['created_at'], az_list[0].created_at) self.assertEqual(MANILA_AZ_DICT['updated_at'], az_list[0].updated_at) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_stack.py0000664000175000017500000010174500000000000024173 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import tempfile import testtools from openstack import exceptions from openstack.orchestration.v1 import stack from openstack.tests import fakes from openstack.tests.unit import base class TestStack(base.TestCase): def setUp(self): super().setUp() self.stack_id = self.getUniqueString('id') self.stack_name = self.getUniqueString('name') self.stack_tag = self.getUniqueString('tag') self.stack = fakes.make_fake_stack(self.stack_id, self.stack_name) def _compare_stacks(self, exp, real): self.assertDictEqual( stack.Stack(**exp).to_dict(computed=False), real.to_dict(computed=False), ) def test_list_stacks(self): fake_stacks = [ self.stack, fakes.make_fake_stack( self.getUniqueString('id'), self.getUniqueString('name') ), ] self.register_uris( [ dict( method='GET', uri='{endpoint}/stacks'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT ), json={"stacks": fake_stacks}, ), ] ) stacks = self.cloud.list_stacks() [self._compare_stacks(b, a) for a, b in zip(stacks, fake_stacks)] self.assert_calls() def test_list_stacks_filters(self): fake_stacks = [ self.stack, fakes.make_fake_stack( self.getUniqueString('id'), self.getUniqueString('name') ), ] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'orchestration', 'public', append=['stacks'], qs_elements=['name=a', 'status=b'], ), json={"stacks": fake_stacks}, ), ] ) stacks = self.cloud.list_stacks(name='a', status='b') [self._compare_stacks(b, a) for a, b in zip(stacks, fake_stacks)] self.assert_calls() def test_list_stacks_exception(self): self.register_uris( [ dict( method='GET', uri='{endpoint}/stacks'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT ), status_code=404, ) ] ) with testtools.ExpectedException(exceptions.NotFoundException): self.cloud.list_stacks() self.assert_calls() def test_search_stacks(self): fake_stacks = [ self.stack, fakes.make_fake_stack( self.getUniqueString('id'), self.getUniqueString('name') ), ] self.register_uris( [ dict( method='GET', uri='{endpoint}/stacks'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT ), json={"stacks": fake_stacks}, ), ] ) stacks = self.cloud.search_stacks() [self._compare_stacks(b, a) for a, b in zip(stacks, fake_stacks)] self.assert_calls() def test_search_stacks_filters(self): fake_stacks = [ self.stack, fakes.make_fake_stack( self.getUniqueString('id'), self.getUniqueString('name'), status='CREATE_FAILED', ), ] self.register_uris( [ dict( method='GET', uri='{endpoint}/stacks'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT ), json={"stacks": fake_stacks}, ), ] ) filters = {'status': 'FAILED'} stacks = self.cloud.search_stacks(filters=filters) [self._compare_stacks(b, a) for a, b in zip(stacks, fake_stacks)] self.assert_calls() def test_search_stacks_exception(self): self.register_uris( [ dict( method='GET', uri='{endpoint}/stacks'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT ), status_code=404, ) ] ) with testtools.ExpectedException(exceptions.NotFoundException): self.cloud.search_stacks() def test_delete_stack(self): resolve = 'resolve_outputs=False' self.register_uris( [ dict( method='GET', uri='{endpoint}/stacks/{name}?{resolve}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, name=self.stack_name, resolve=resolve, ), status_code=302, headers=dict( location='{endpoint}/stacks/{name}/{id}?{resolve}'.format( # noqa: E501 endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, resolve=resolve, ) ), ), dict( method='GET', uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, resolve=resolve, ), json={"stack": self.stack}, ), dict( method='DELETE', uri='{endpoint}/stacks/{id}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id ), ), ] ) self.assertTrue(self.cloud.delete_stack(self.stack_name)) self.assert_calls() def test_delete_stack_not_found(self): resolve = 'resolve_outputs=False' self.register_uris( [ dict( method='GET', uri='{endpoint}/stacks/stack_name?{resolve}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, resolve=resolve ), status_code=404, ), ] ) self.assertFalse(self.cloud.delete_stack('stack_name')) self.assert_calls() def test_delete_stack_exception(self): resolve = 'resolve_outputs=False' self.register_uris( [ dict( method='GET', uri='{endpoint}/stacks/{id}?{resolve}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, resolve=resolve, ), status_code=302, headers=dict( location='{endpoint}/stacks/{name}/{id}?{resolve}'.format( # noqa: E501 endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, resolve=resolve, ) ), ), dict( method='GET', uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, resolve=resolve, ), json={"stack": self.stack}, ), dict( method='DELETE', uri='{endpoint}/stacks/{id}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id ), status_code=400, reason="ouch", ), ] ) with testtools.ExpectedException(exceptions.BadRequestException): self.cloud.delete_stack(self.stack_id) self.assert_calls() def test_delete_stack_by_name_wait(self): marker_event = fakes.make_fake_stack_event( self.stack_id, self.stack_name, status='CREATE_COMPLETE', resource_name='name', ) marker_qs = 'marker={e_id}&sort_dir=asc'.format( e_id=marker_event['id'] ) resolve = 'resolve_outputs=False' self.register_uris( [ dict( method='GET', uri='{endpoint}/stacks/{name}?{resolve}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, name=self.stack_name, resolve=resolve, ), status_code=302, headers=dict( location='{endpoint}/stacks/{name}/{id}?{resolve}'.format( # noqa: E501 endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, resolve=resolve, ) ), ), dict( method='GET', uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, resolve=resolve, ), json={"stack": self.stack}, ), dict( method='GET', uri='{endpoint}/stacks/{name}/events?{qs}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, name=self.stack_name, qs='limit=1&sort_dir=desc', ), complete_qs=True, json={"events": [marker_event]}, ), dict( method='DELETE', uri='{endpoint}/stacks/{id}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id ), ), dict( method='GET', uri='{endpoint}/stacks/{name}/events?{qs}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, name=self.stack_name, qs=marker_qs, ), complete_qs=True, json={ "events": [ fakes.make_fake_stack_event( self.stack_id, self.stack_name, status='DELETE_COMPLETE', resource_name='name', ), ] }, ), dict( method='GET', uri='{endpoint}/stacks/{name}?{resolve}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, name=self.stack_name, resolve=resolve, ), status_code=404, ), ] ) self.assertTrue(self.cloud.delete_stack(self.stack_name, wait=True)) self.assert_calls() def test_delete_stack_by_id_wait(self): marker_event = fakes.make_fake_stack_event( self.stack_id, self.stack_name, status='CREATE_COMPLETE', resource_name='name', ) marker_qs = 'marker={e_id}&sort_dir=asc'.format( e_id=marker_event['id'] ) resolve = 'resolve_outputs=False' self.register_uris( [ dict( method='GET', uri='{endpoint}/stacks/{id}?{resolve}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, resolve=resolve, ), status_code=302, headers=dict( location='{endpoint}/stacks/{name}/{id}?{resolve}'.format( # noqa: E501 endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, resolve=resolve, ) ), ), dict( method='GET', uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, resolve=resolve, ), json={"stack": self.stack}, ), dict( method='GET', uri='{endpoint}/stacks/{id}/events?{qs}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, qs='limit=1&sort_dir=desc', ), complete_qs=True, json={"events": [marker_event]}, ), dict( method='DELETE', uri='{endpoint}/stacks/{id}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id ), ), dict( method='GET', uri='{endpoint}/stacks/{id}/events?{qs}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, qs=marker_qs, ), complete_qs=True, json={ "events": [ fakes.make_fake_stack_event( self.stack_id, self.stack_name, status='DELETE_COMPLETE', ), ] }, ), dict( method='GET', uri='{endpoint}/stacks/{id}?{resolve}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, resolve=resolve, ), status_code=404, ), ] ) self.assertTrue(self.cloud.delete_stack(self.stack_id, wait=True)) self.assert_calls() def test_delete_stack_wait_failed(self): failed_stack = self.stack.copy() failed_stack['stack_status'] = 'DELETE_FAILED' marker_event = fakes.make_fake_stack_event( self.stack_id, self.stack_name, status='CREATE_COMPLETE' ) marker_qs = 'marker={e_id}&sort_dir=asc'.format( e_id=marker_event['id'] ) resolve = 'resolve_outputs=False' self.register_uris( [ dict( method='GET', uri='{endpoint}/stacks/{id}?{resolve}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, resolve=resolve, ), status_code=302, headers=dict( location='{endpoint}/stacks/{name}/{id}?{resolve}'.format( # noqa: E501 endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, resolve=resolve, ) ), ), dict( method='GET', uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, resolve=resolve, ), json={"stack": self.stack}, ), dict( method='GET', uri='{endpoint}/stacks/{id}/events?{qs}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, qs='limit=1&sort_dir=desc', ), complete_qs=True, json={"events": [marker_event]}, ), dict( method='DELETE', uri='{endpoint}/stacks/{id}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id ), ), dict( method='GET', uri='{endpoint}/stacks/{id}/events?{qs}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, qs=marker_qs, ), complete_qs=True, json={ "events": [ fakes.make_fake_stack_event( self.stack_id, self.stack_name, status='DELETE_COMPLETE', ), ] }, ), dict( method='GET', uri='{endpoint}/stacks/{id}?resolve_outputs=False'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id ), status_code=302, headers=dict( location='{endpoint}/stacks/{name}/{id}?{resolve}'.format( # noqa: E501 endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, resolve=resolve, ) ), ), dict( method='GET', uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, resolve=resolve, ), json={"stack": failed_stack}, ), ] ) with testtools.ExpectedException(exceptions.SDKException): self.cloud.delete_stack(self.stack_id, wait=True) self.assert_calls() def test_create_stack(self): test_template = tempfile.NamedTemporaryFile(delete=False) test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8')) test_template.close() self.register_uris( [ dict( method='POST', uri='{endpoint}/stacks'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT ), json={"stack": self.stack}, validate=dict( json={ 'disable_rollback': False, 'parameters': {}, 'stack_name': self.stack_name, 'tags': self.stack_tag, 'template': fakes.FAKE_TEMPLATE_CONTENT, 'timeout_mins': 60, } ), ), dict( method='GET', uri='{endpoint}/stacks/{name}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, name=self.stack_name, ), status_code=302, headers=dict( location='{endpoint}/stacks/{name}/{id}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, ) ), ), dict( method='GET', uri='{endpoint}/stacks/{name}/{id}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, ), json={"stack": self.stack}, ), ] ) self.cloud.create_stack( self.stack_name, tags=self.stack_tag, template_file=test_template.name, ) self.assert_calls() def test_create_stack_wait(self): test_template = tempfile.NamedTemporaryFile(delete=False) test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8')) test_template.close() self.register_uris( [ dict( method='POST', uri='{endpoint}/stacks'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT ), json={"stack": self.stack}, validate=dict( json={ 'disable_rollback': False, 'parameters': {}, 'stack_name': self.stack_name, 'tags': self.stack_tag, 'template': fakes.FAKE_TEMPLATE_CONTENT, 'timeout_mins': 60, } ), ), dict( method='GET', uri='{endpoint}/stacks/{name}/events?sort_dir=asc'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, name=self.stack_name, ), json={ "events": [ fakes.make_fake_stack_event( self.stack_id, self.stack_name, status='CREATE_COMPLETE', resource_name='name', ), ] }, ), dict( method='GET', uri='{endpoint}/stacks/{name}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, name=self.stack_name, ), status_code=302, headers=dict( location='{endpoint}/stacks/{name}/{id}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, ) ), ), dict( method='GET', uri='{endpoint}/stacks/{name}/{id}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, ), json={"stack": self.stack}, ), ] ) self.cloud.create_stack( self.stack_name, tags=self.stack_tag, template_file=test_template.name, wait=True, ) self.assert_calls() def test_update_stack(self): test_template = tempfile.NamedTemporaryFile(delete=False) test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8')) test_template.close() self.register_uris( [ dict( method='PUT', uri='{endpoint}/stacks/{name}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, name=self.stack_name, ), validate=dict( json={ 'disable_rollback': False, 'parameters': {}, 'tags': self.stack_tag, 'template': fakes.FAKE_TEMPLATE_CONTENT, 'timeout_mins': 60, } ), json={}, ), dict( method='GET', uri='{endpoint}/stacks/{name}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, name=self.stack_name, ), status_code=302, headers=dict( location='{endpoint}/stacks/{name}/{id}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, ) ), ), dict( method='GET', uri='{endpoint}/stacks/{name}/{id}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, ), json={"stack": self.stack}, ), ] ) self.cloud.update_stack( self.stack_name, tags=self.stack_tag, template_file=test_template.name, ) self.assert_calls() def test_update_stack_wait(self): marker_event = fakes.make_fake_stack_event( self.stack_id, self.stack_name, status='CREATE_COMPLETE', resource_name='name', ) marker_qs = 'marker={e_id}&sort_dir=asc'.format( e_id=marker_event['id'] ) test_template = tempfile.NamedTemporaryFile(delete=False) test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8')) test_template.close() self.register_uris( [ dict( method='GET', uri='{endpoint}/stacks/{name}/events?{qs}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, name=self.stack_name, qs='limit=1&sort_dir=desc', ), json={"events": [marker_event]}, ), dict( method='PUT', uri='{endpoint}/stacks/{name}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, name=self.stack_name, ), validate=dict( json={ 'disable_rollback': False, 'parameters': {}, 'tags': self.stack_tag, 'template': fakes.FAKE_TEMPLATE_CONTENT, 'timeout_mins': 60, } ), json={}, ), dict( method='GET', uri='{endpoint}/stacks/{name}/events?{qs}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, name=self.stack_name, qs=marker_qs, ), json={ "events": [ fakes.make_fake_stack_event( self.stack_id, self.stack_name, status='UPDATE_COMPLETE', resource_name='name', ), ] }, ), dict( method='GET', uri='{endpoint}/stacks/{name}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, name=self.stack_name, ), status_code=302, headers=dict( location='{endpoint}/stacks/{name}/{id}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, ) ), ), dict( method='GET', uri='{endpoint}/stacks/{name}/{id}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, ), json={"stack": self.stack}, ), ] ) self.cloud.update_stack( self.stack_name, tags=self.stack_tag, template_file=test_template.name, wait=True, ) self.assert_calls() def test_get_stack(self): self.register_uris( [ dict( method='GET', uri='{endpoint}/stacks/{name}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, name=self.stack_name, ), status_code=302, headers=dict( location='{endpoint}/stacks/{name}/{id}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, ) ), ), dict( method='GET', uri='{endpoint}/stacks/{name}/{id}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, ), json={"stack": self.stack}, ), ] ) res = self.cloud.get_stack(self.stack_name) self.assertIsNotNone(res) self.assertEqual(self.stack['stack_name'], res['name']) self.assertEqual(self.stack['stack_status'], res['stack_status']) self.assertEqual('CREATE_COMPLETE', res['status']) self.assert_calls() def test_get_stack_in_progress(self): in_progress = self.stack.copy() in_progress['stack_status'] = 'CREATE_IN_PROGRESS' self.register_uris( [ dict( method='GET', uri='{endpoint}/stacks/{name}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, name=self.stack_name, ), status_code=302, headers=dict( location='{endpoint}/stacks/{name}/{id}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, ) ), ), dict( method='GET', uri='{endpoint}/stacks/{name}/{id}'.format( endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id, name=self.stack_name, ), json={"stack": in_progress}, ), ] ) res = self.cloud.get_stack(self.stack_name) self.assertIsNotNone(res) self.assertEqual(in_progress['stack_name'], res.name) self.assertEqual(in_progress['stack_status'], res['stack_status']) self.assertEqual('CREATE_IN_PROGRESS', res['status']) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_subnet.py0000664000175000017500000007437200000000000024373 0ustar00zuulzuul00000000000000# Copyright 2017 OVH SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import testtools from openstack import exceptions from openstack.network.v2 import subnet as _subnet from openstack.tests.unit import base class TestSubnet(base.TestCase): network_name = 'network_name' subnet_name = 'subnet_name' subnet_id = '1f1696eb-7f47-47f6-835c-4889bff88604' subnet_cidr = '192.168.199.0/24' subnetpool_cidr = '172.16.0.0/28' prefix_length = 28 mock_network_rep = { 'id': '881d1bb7-a663-44c0-8f9f-ee2765b74486', 'name': network_name, } mock_subnet_rep = { 'allocation_pools': [ {'start': '192.168.199.2', 'end': '192.168.199.254'} ], 'cidr': subnet_cidr, 'created_at': '2017-04-24T20:22:23Z', 'description': '', 'dns_nameservers': [], 'enable_dhcp': False, 'gateway_ip': '192.168.199.1', 'host_routes': [], 'id': subnet_id, 'ip_version': 4, 'ipv6_address_mode': None, 'ipv6_ra_mode': None, 'name': subnet_name, 'network_id': mock_network_rep['id'], 'project_id': '861808a93da0484ea1767967c4df8a23', 'revision_number': 2, 'service_types': [], 'subnetpool_id': None, 'tags': [], } mock_subnetpool_rep = { 'id': 'f49a1319-423a-4ee6-ba54-1d95a4f6cc68', 'prefixes': ['172.16.0.0/16'], } def _compare_subnets(self, exp, real): self.assertDictEqual( _subnet.Subnet(**exp).to_dict(computed=False), real.to_dict(computed=False), ) def test_get_subnet(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets', self.subnet_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'], qs_elements=['name=%s' % self.subnet_name], ), json={'subnets': [self.mock_subnet_rep]}, ), ] ) r = self.cloud.get_subnet(self.subnet_name) self.assertIsNotNone(r) self._compare_subnets(self.mock_subnet_rep, r) self.assert_calls() def test_get_subnet_by_id(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets', self.subnet_id], ), json={'subnet': self.mock_subnet_rep}, ) ] ) r = self.cloud.get_subnet_by_id(self.subnet_id) self.assertIsNotNone(r) self._compare_subnets(self.mock_subnet_rep, r) self.assert_calls() def test_create_subnet(self): pool = [{'start': '192.168.199.2', 'end': '192.168.199.254'}] dns = ['8.8.8.8'] routes = [{"destination": "0.0.0.0/0", "nexthop": "123.456.78.9"}] mock_subnet_rep = copy.copy(self.mock_subnet_rep) mock_subnet_rep['allocation_pools'] = pool mock_subnet_rep['dns_nameservers'] = dns mock_subnet_rep['host_routes'] = routes self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', self.network_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'], qs_elements=['name=%s' % self.network_name], ), json={'networks': [self.mock_network_rep]}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'] ), json={'subnet': mock_subnet_rep}, validate=dict( json={ 'subnet': { 'cidr': self.subnet_cidr, 'enable_dhcp': False, 'ip_version': 4, 'network_id': self.mock_network_rep['id'], 'allocation_pools': pool, 'dns_nameservers': dns, 'host_routes': routes, } } ), ), ] ) subnet = self.cloud.create_subnet( self.network_name, self.subnet_cidr, allocation_pools=pool, dns_nameservers=dns, host_routes=routes, ) self._compare_subnets(mock_subnet_rep, subnet) self.assert_calls() def test_create_subnet_string_ip_version(self): '''Allow ip_version as a string''' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', self.network_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'], qs_elements=['name=%s' % self.network_name], ), json={'networks': [self.mock_network_rep]}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'] ), json={'subnet': self.mock_subnet_rep}, validate=dict( json={ 'subnet': { 'cidr': self.subnet_cidr, 'enable_dhcp': False, 'ip_version': 4, 'network_id': self.mock_network_rep['id'], } } ), ), ] ) subnet = self.cloud.create_subnet( self.network_name, self.subnet_cidr, ip_version='4' ) self._compare_subnets(self.mock_subnet_rep, subnet) self.assert_calls() def test_create_subnet_bad_ip_version(self): '''String ip_versions must be convertable to int''' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', self.network_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'], qs_elements=['name=%s' % self.network_name], ), json={'networks': [self.mock_network_rep]}, ), ] ) with testtools.ExpectedException( exceptions.SDKException, "ip_version must be an integer" ): self.cloud.create_subnet( self.network_name, self.subnet_cidr, ip_version='4x' ) self.assert_calls() def test_create_subnet_without_gateway_ip(self): pool = [{'start': '192.168.199.2', 'end': '192.168.199.254'}] dns = ['8.8.8.8'] mock_subnet_rep = copy.copy(self.mock_subnet_rep) mock_subnet_rep['allocation_pools'] = pool mock_subnet_rep['dns_nameservers'] = dns mock_subnet_rep['gateway_ip'] = None self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', self.network_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'], qs_elements=['name=%s' % self.network_name], ), json={'networks': [self.mock_network_rep]}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'] ), json={'subnet': mock_subnet_rep}, validate=dict( json={ 'subnet': { 'cidr': self.subnet_cidr, 'enable_dhcp': False, 'ip_version': 4, 'network_id': self.mock_network_rep['id'], 'allocation_pools': pool, 'gateway_ip': None, 'dns_nameservers': dns, } } ), ), ] ) subnet = self.cloud.create_subnet( self.network_name, self.subnet_cidr, allocation_pools=pool, dns_nameservers=dns, disable_gateway_ip=True, ) self._compare_subnets(mock_subnet_rep, subnet) self.assert_calls() def test_create_subnet_with_gateway_ip(self): pool = [{'start': '192.168.199.8', 'end': '192.168.199.254'}] gateway = '192.168.199.2' dns = ['8.8.8.8'] mock_subnet_rep = copy.copy(self.mock_subnet_rep) mock_subnet_rep['allocation_pools'] = pool mock_subnet_rep['dns_nameservers'] = dns mock_subnet_rep['gateway_ip'] = gateway self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', self.network_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'], qs_elements=['name=%s' % self.network_name], ), json={'networks': [self.mock_network_rep]}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'] ), json={'subnet': mock_subnet_rep}, validate=dict( json={ 'subnet': { 'cidr': self.subnet_cidr, 'enable_dhcp': False, 'ip_version': 4, 'network_id': self.mock_network_rep['id'], 'allocation_pools': pool, 'gateway_ip': gateway, 'dns_nameservers': dns, } } ), ), ] ) subnet = self.cloud.create_subnet( self.network_name, self.subnet_cidr, allocation_pools=pool, dns_nameservers=dns, gateway_ip=gateway, ) self._compare_subnets(mock_subnet_rep, subnet) self.assert_calls() def test_create_subnet_conflict_gw_ops(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', 'kooky'], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'], qs_elements=['name=kooky'], ), json={'networks': [self.mock_network_rep]}, ), ] ) gateway = '192.168.200.3' self.assertRaises( exceptions.SDKException, self.cloud.create_subnet, 'kooky', self.subnet_cidr, gateway_ip=gateway, disable_gateway_ip=True, ) self.assert_calls() def test_create_subnet_bad_network(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', 'duck'], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'], qs_elements=['name=duck'], ), json={'networks': [self.mock_network_rep]}, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.create_subnet, 'duck', self.subnet_cidr, ) self.assert_calls() def test_create_subnet_non_unique_network(self): net1 = dict(id='123', name=self.network_name) net2 = dict(id='456', name=self.network_name) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', self.network_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'], qs_elements=['name=%s' % self.network_name], ), json={'networks': [net1, net2]}, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.create_subnet, self.network_name, self.subnet_cidr, ) self.assert_calls() def test_create_subnet_from_subnetpool_with_prefixlen(self): pool = [{'start': '172.16.0.2', 'end': '172.16.0.15'}] id = '143296eb-7f47-4755-835c-488123475604' gateway = '172.16.0.1' dns = ['8.8.8.8'] routes = [{"destination": "0.0.0.0/0", "nexthop": "123.456.78.9"}] mock_subnet_rep = copy.copy(self.mock_subnet_rep) mock_subnet_rep['allocation_pools'] = pool mock_subnet_rep['dns_nameservers'] = dns mock_subnet_rep['host_routes'] = routes mock_subnet_rep['gateway_ip'] = gateway mock_subnet_rep['subnetpool_id'] = self.mock_subnetpool_rep['id'] mock_subnet_rep['cidr'] = self.subnetpool_cidr mock_subnet_rep['id'] = id self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', self.network_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'], qs_elements=['name=%s' % self.network_name], ), json={'networks': [self.mock_network_rep]}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'] ), json={'subnet': mock_subnet_rep}, validate=dict( json={ 'subnet': { 'enable_dhcp': False, 'ip_version': 4, 'network_id': self.mock_network_rep['id'], 'allocation_pools': pool, 'dns_nameservers': dns, 'use_default_subnetpool': True, 'prefixlen': self.prefix_length, 'host_routes': routes, } } ), ), ] ) subnet = self.cloud.create_subnet( self.network_name, allocation_pools=pool, dns_nameservers=dns, use_default_subnetpool=True, prefixlen=self.prefix_length, host_routes=routes, ) mock_subnet_rep.update( {'prefixlen': self.prefix_length, 'use_default_subnetpool': True} ) self._compare_subnets(mock_subnet_rep, subnet) self.assert_calls() def test_create_subnet_from_specific_subnetpool(self): pool = [{'start': '172.16.0.2', 'end': '172.16.0.15'}] id = '143296eb-7f47-4755-835c-488123475604' gateway = '172.16.0.1' dns = ['8.8.8.8'] routes = [{"destination": "0.0.0.0/0", "nexthop": "123.456.78.9"}] mock_subnet_rep = copy.copy(self.mock_subnet_rep) mock_subnet_rep['allocation_pools'] = pool mock_subnet_rep['dns_nameservers'] = dns mock_subnet_rep['host_routes'] = routes mock_subnet_rep['gateway_ip'] = gateway mock_subnet_rep['subnetpool_id'] = self.mock_subnetpool_rep['id'] mock_subnet_rep['cidr'] = self.subnetpool_cidr mock_subnet_rep['id'] = id self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks', self.network_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'], qs_elements=['name=%s' % self.network_name], ), json={'networks': [self.mock_network_rep]}, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=[ 'v2.0', 'subnetpools', self.mock_subnetpool_rep['id'], ], ), json={"subnetpool": self.mock_subnetpool_rep}, ), dict( method='POST', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'] ), json={'subnet': mock_subnet_rep}, validate=dict( json={ 'subnet': { 'enable_dhcp': False, 'ip_version': 4, 'network_id': self.mock_network_rep['id'], 'allocation_pools': pool, 'dns_nameservers': dns, 'subnetpool_id': self.mock_subnetpool_rep[ 'id' ], 'prefixlen': self.prefix_length, 'host_routes': routes, } } ), ), ] ) subnet = self.cloud.create_subnet( self.network_name, allocation_pools=pool, dns_nameservers=dns, subnetpool_name_or_id=self.mock_subnetpool_rep['id'], prefixlen=self.prefix_length, host_routes=routes, ) mock_subnet_rep.update( {'prefixlen': self.prefix_length, 'use_default_subnetpool': None} ) self._compare_subnets(mock_subnet_rep, subnet) self.assert_calls() def test_delete_subnet(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets', self.subnet_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'], qs_elements=['name=%s' % self.subnet_name], ), json={'subnets': [self.mock_subnet_rep]}, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets', self.subnet_id], ), json={}, ), ] ) self.assertTrue(self.cloud.delete_subnet(self.subnet_name)) self.assert_calls() def test_delete_subnet_not_found(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets', 'goofy'], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'], qs_elements=['name=goofy'], ), json={'subnets': []}, ), ] ) self.assertFalse(self.cloud.delete_subnet('goofy')) self.assert_calls() def test_delete_subnet_multiple_found(self): subnet1 = dict(id='123', name=self.subnet_name) subnet2 = dict(id='456', name=self.subnet_name) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets', self.subnet_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets'], qs_elements=['name=%s' % self.subnet_name], ), json={'subnets': [subnet1, subnet2]}, ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.delete_subnet, self.subnet_name, ) self.assert_calls() def test_delete_subnet_using_id(self): subnet1 = dict(id='123', name=self.subnet_name) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets', subnet1['id']], ), json=subnet1, ), dict( method='DELETE', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets', subnet1['id']], ), json={}, ), ] ) self.assertTrue(self.cloud.delete_subnet(subnet1['id'])) self.assert_calls() def test_update_subnet(self): expected_subnet = copy.copy(self.mock_subnet_rep) expected_subnet['name'] = 'goofy' self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets', self.subnet_id], ), json=self.mock_subnet_rep, ), dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets', self.subnet_id], ), json={'subnet': expected_subnet}, validate=dict(json={'subnet': {'name': 'goofy'}}), ), ] ) subnet = self.cloud.update_subnet(self.subnet_id, subnet_name='goofy') self._compare_subnets(expected_subnet, subnet) self.assert_calls() def test_update_subnet_gateway_ip(self): expected_subnet = copy.copy(self.mock_subnet_rep) gateway = '192.168.199.3' expected_subnet['gateway_ip'] = gateway self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets', self.subnet_id], ), json=self.mock_subnet_rep, ), dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets', self.subnet_id], ), json={'subnet': expected_subnet}, validate=dict(json={'subnet': {'gateway_ip': gateway}}), ), ] ) subnet = self.cloud.update_subnet(self.subnet_id, gateway_ip=gateway) self._compare_subnets(expected_subnet, subnet) self.assert_calls() def test_update_subnet_disable_gateway_ip(self): expected_subnet = copy.copy(self.mock_subnet_rep) expected_subnet['gateway_ip'] = None self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets', self.subnet_id], ), json=self.mock_subnet_rep, ), dict( method='PUT', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'subnets', self.subnet_id], ), json={'subnet': expected_subnet}, validate=dict(json={'subnet': {'gateway_ip': None}}), ), ] ) subnet = self.cloud.update_subnet( self.subnet_id, disable_gateway_ip=True ) self._compare_subnets(expected_subnet, subnet) self.assert_calls() def test_update_subnet_conflict_gw_ops(self): self.assertRaises( exceptions.SDKException, self.cloud.update_subnet, self.subnet_id, gateway_ip="192.168.199.3", disable_gateway_ip=True, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_update_server.py0000664000175000017500000001112600000000000025727 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_update_server ---------------------------------- Tests for the `update_server` command. """ import uuid from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base class TestUpdateServer(base.TestCase): def setUp(self): super().setUp() self.server_id = str(uuid.uuid4()) self.server_name = self.getUniqueString('name') self.updated_server_name = self.getUniqueString('name2') self.fake_server = fakes.make_fake_server( self.server_id, self.server_name ) def test_update_server_with_update_exception(self): """ Test that an exception in the update raises an exception in update_server. """ self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.server_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'], qs_elements=['name=%s' % self.server_name], ), json={'servers': [self.fake_server]}, ), dict( method='PUT', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.server_id] ), status_code=400, validate=dict( json={'server': {'name': self.updated_server_name}} ), ), ] ) self.assertRaises( exceptions.SDKException, self.cloud.update_server, self.server_name, name=self.updated_server_name, ) self.assert_calls() def test_update_server_name(self): """ Test that update_server updates the name without raising any exception """ fake_update_server = fakes.make_fake_server( self.server_id, self.updated_server_name ) self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.server_name], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'], qs_elements=['name=%s' % self.server_name], ), json={'servers': [self.fake_server]}, ), dict( method='PUT', uri=self.get_mock_url( 'compute', 'public', append=['servers', self.server_id] ), json={'server': fake_update_server}, validate=dict( json={'server': {'name': self.updated_server_name}} ), ), dict( method='GET', uri=self.get_mock_url( 'network', 'public', append=['v2.0', 'networks'] ), json={'networks': []}, ), ] ) self.assertEqual( self.updated_server_name, self.cloud.update_server( self.server_name, name=self.updated_server_name )['name'], ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_usage.py0000664000175000017500000000556400000000000024174 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid from openstack.tests.unit import base class TestUsage(base.TestCase): def test_get_usage(self): project = self.mock_for_keystone_projects( project_count=1, id_get=True )[0] start = end = datetime.datetime.now() self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['os-simple-tenant-usage', project.project_id], qs_elements=[ f'start={start.isoformat()}', f'end={end.isoformat()}', ], ), json={ "tenant_usage": { "server_usages": [ { "ended_at": None, "flavor": "m1.tiny", "hours": 1.0, "instance_id": uuid.uuid4().hex, "local_gb": 1, "memory_mb": 512, "name": "instance-2", "started_at": "2012-10-08T20:10:44.541277", "state": "active", "tenant_id": "6f70656e737461636b20342065766572", # noqa: E501 "uptime": 3600, "vcpus": 1, } ], "start": "2012-10-08T20:10:44.587336", "stop": "2012-10-08T21:10:44.587336", "tenant_id": "6f70656e737461636b20342065766572", "total_hours": 1.0, "total_local_gb_usage": 1.0, "total_memory_mb_usage": 512.0, "total_vcpus_usage": 1.0, } }, ), ] ) self.cloud.get_compute_usage(project.project_id, start, end) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_users.py0000664000175000017500000001734300000000000024227 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import testtools from openstack import exceptions from openstack.tests.unit import base class TestUsers(base.TestCase): def _get_keystone_mock_url( self, resource, append=None, v3=True, qs_elements=None ): base_url_append = None if v3: base_url_append = 'v3' return self.get_mock_url( service_type='identity', resource=resource, append=append, base_url_append=base_url_append, qs_elements=qs_elements, ) def _get_user_list(self, user_data): uri = self._get_keystone_mock_url(resource='users') return { 'users': [ user_data.json_response['user'], ], 'links': { 'self': uri, 'previous': None, 'next': None, }, } def test_create_user_v3(self): user_data = self._get_user_data( domain_id=uuid.uuid4().hex, description=self.getUniqueString('description'), ) self.register_uris( [ dict( method='POST', uri=self._get_keystone_mock_url(resource='users'), status_code=200, json=user_data.json_response, validate=dict(json=user_data.json_request), ), ] ) user = self.cloud.create_user( name=user_data.name, email=user_data.email, password=user_data.password, description=user_data.description, domain_id=user_data.domain_id, ) self.assertEqual(user_data.name, user.name) self.assertEqual(user_data.email, user.email) self.assertEqual(user_data.description, user.description) self.assertEqual(user_data.user_id, user.id) self.assert_calls() def test_create_user_v3_no_domain(self): user_data = self._get_user_data( domain_id=uuid.uuid4().hex, email='test@example.com' ) with testtools.ExpectedException( exceptions.SDKException, "User or project creation requires an explicit" " domain_id argument.", ): self.cloud.create_user( name=user_data.name, email=user_data.email, password=user_data.password, ) def test_delete_user(self): user_data = self._get_user_data(domain_id=uuid.uuid4().hex) user_resource_uri = self._get_keystone_mock_url( resource='users', append=[user_data.user_id] ) self.register_uris( [ dict( method='GET', uri=self._get_keystone_mock_url( resource='users', qs_elements=['name=%s' % user_data.name], ), status_code=200, json=self._get_user_list(user_data), ), dict(method='DELETE', uri=user_resource_uri, status_code=204), ] ) self.cloud.delete_user(user_data.name) self.assert_calls() def test_delete_user_not_found(self): self.register_uris( [ dict( method='GET', uri=self._get_keystone_mock_url(resource='users'), status_code=200, json={'users': []}, ) ] ) self.assertFalse(self.cloud.delete_user(self.getUniqueString())) def test_add_user_to_group(self): user_data = self._get_user_data() group_data = self._get_group_data() self.register_uris( [ dict( method='GET', uri=self._get_keystone_mock_url(resource='users'), status_code=200, json=self._get_user_list(user_data), ), dict( method='GET', uri=self._get_keystone_mock_url(resource='groups'), status_code=200, json={'groups': [group_data.json_response['group']]}, ), dict( method='PUT', uri=self._get_keystone_mock_url( resource='groups', append=[ group_data.group_id, 'users', user_data.user_id, ], ), status_code=200, ), ] ) self.cloud.add_user_to_group(user_data.user_id, group_data.group_id) self.assert_calls() def test_is_user_in_group(self): user_data = self._get_user_data() group_data = self._get_group_data() self.register_uris( [ dict( method='GET', uri=self._get_keystone_mock_url(resource='users'), status_code=200, json=self._get_user_list(user_data), ), dict( method='GET', uri=self._get_keystone_mock_url(resource='groups'), status_code=200, json={'groups': [group_data.json_response['group']]}, ), dict( method='HEAD', uri=self._get_keystone_mock_url( resource='groups', append=[ group_data.group_id, 'users', user_data.user_id, ], ), status_code=204, ), ] ) self.assertTrue( self.cloud.is_user_in_group(user_data.user_id, group_data.group_id) ) self.assert_calls() def test_remove_user_from_group(self): user_data = self._get_user_data() group_data = self._get_group_data() self.register_uris( [ dict( method='GET', uri=self._get_keystone_mock_url(resource='users'), json=self._get_user_list(user_data), ), dict( method='GET', uri=self._get_keystone_mock_url(resource='groups'), status_code=200, json={'groups': [group_data.json_response['group']]}, ), dict( method='DELETE', uri=self._get_keystone_mock_url( resource='groups', append=[ group_data.group_id, 'users', user_data.user_id, ], ), status_code=204, ), ] ) self.cloud.remove_user_from_group( user_data.user_id, group_data.group_id ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_volume.py0000664000175000017500000005641000000000000024373 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from openstack.block_storage.v3 import volume from openstack.cloud import meta from openstack.compute.v2 import volume_attachment from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base class TestVolume(base.TestCase): def _compare_volumes(self, exp, real): self.assertDictEqual( volume.Volume(**exp).to_dict(computed=False), real.to_dict(computed=False), ) def _compare_volume_attachments(self, exp, real): self.assertDictEqual( volume_attachment.VolumeAttachment(**exp).to_dict(computed=False), real.to_dict(computed=False), ) def test_attach_volume(self): server = dict(id='server001') vol = { 'id': 'volume001', 'status': 'available', 'name': '', 'attachments': [], } volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) rattach = { 'server_id': server['id'], 'device': 'device001', 'volumeId': volume['id'], 'id': 'attachmentId', } self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=[ 'servers', server['id'], 'os-volume_attachments', ], ), json={'volumeAttachment': rattach}, validate=dict( json={'volumeAttachment': {'volumeId': vol['id']}} ), ), ] ) ret = self.cloud.attach_volume(server, volume, wait=False) self._compare_volume_attachments(rattach, ret) self.assert_calls() def test_attach_volume_exception(self): server = dict(id='server001') vol = { 'id': 'volume001', 'status': 'available', 'name': '', 'attachments': [], } volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=[ 'servers', server['id'], 'os-volume_attachments', ], ), status_code=404, validate=dict( json={'volumeAttachment': {'volumeId': vol['id']}} ), ), ] ) with testtools.ExpectedException( exceptions.NotFoundException, ): self.cloud.attach_volume(server, volume, wait=False) self.assert_calls() def test_attach_volume_wait(self): server = dict(id='server001') vol = { 'id': 'volume001', 'status': 'available', 'name': '', 'attachments': [], } volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) vol['attachments'] = [ {'server_id': server['id'], 'device': 'device001'} ] vol['status'] = 'in-use' attached_volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) rattach = { 'server_id': server['id'], 'device': 'device001', 'volumeId': volume['id'], 'id': 'attachmentId', } self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=[ 'servers', server['id'], 'os-volume_attachments', ], ), json={'volumeAttachment': rattach}, validate=dict( json={'volumeAttachment': {'volumeId': vol['id']}} ), ), self.get_cinder_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', vol['id']] ), json={'volume': volume}, ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', vol['id']] ), json={'volume': attached_volume}, ), ] ) # defaults to wait=True ret = self.cloud.attach_volume(server, volume) self._compare_volume_attachments(rattach, ret) self.assert_calls() def test_attach_volume_wait_error(self): server = dict(id='server001') vol = { 'id': 'volume001', 'status': 'available', 'name': '', 'attachments': [], } volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) vol['status'] = 'error' errored_volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) rattach = { 'server_id': server['id'], 'device': 'device001', 'volumeId': volume['id'], 'id': 'attachmentId', } self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'compute', 'public', append=[ 'servers', server['id'], 'os-volume_attachments', ], ), json={'volumeAttachment': rattach}, validate=dict( json={'volumeAttachment': {'volumeId': vol['id']}} ), ), self.get_cinder_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', volume['id']] ), json={'volume': errored_volume}, ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', volume['id']] ), json={'volume': errored_volume}, ), ] ) with testtools.ExpectedException(exceptions.ResourceFailure): self.cloud.attach_volume(server, volume) self.assert_calls() def test_attach_volume_not_available(self): server = dict(id='server001') volume = dict(id='volume001', status='error', attachments=[]) with testtools.ExpectedException( exceptions.SDKException, "Volume %s is not available. Status is '%s'" % (volume['id'], volume['status']), ): self.cloud.attach_volume(server, volume) self.assertEqual(0, len(self.adapter.request_history)) def test_attach_volume_already_attached(self): device_id = 'device001' server = dict(id='server001') volume = dict( id='volume001', attachments=[{'server_id': 'server001', 'device': device_id}], ) with testtools.ExpectedException( exceptions.SDKException, "Volume %s already attached to server %s on device %s" % (volume['id'], server['id'], device_id), ): self.cloud.attach_volume(server, volume) self.assertEqual(0, len(self.adapter.request_history)) def test_detach_volume(self): server = dict(id='server001') volume = dict( id='volume001', attachments=[{'server_id': 'server001', 'device': 'device001'}], ) self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', server['id']] ), json={'server': server}, ), dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=[ 'servers', server['id'], 'os-volume_attachments', volume['id'], ], ), ), ] ) self.cloud.detach_volume(server, volume, wait=False) self.assert_calls() def test_detach_volume_exception(self): server = dict(id='server001') volume = dict( id='volume001', attachments=[{'server_id': 'server001', 'device': 'device001'}], ) self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', server['id']] ), json={'server': server}, ), dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=[ 'servers', server['id'], 'os-volume_attachments', volume['id'], ], ), status_code=404, ), ] ) with testtools.ExpectedException( exceptions.NotFoundException, ): self.cloud.detach_volume(server, volume, wait=False) self.assert_calls() def test_detach_volume_wait(self): server = dict(id='server001') attachments = [{'server_id': 'server001', 'device': 'device001'}] vol = { 'id': 'volume001', 'status': 'attached', 'name': '', 'attachments': attachments, } volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) vol['status'] = 'available' vol['attachments'] = [] avail_volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', server['id']] ), json={'server': server}, ), dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=[ 'servers', server['id'], 'os-volume_attachments', volume.id, ], ), ), self.get_cinder_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', 'detail'] ), json={'volumes': [avail_volume]}, ), ] ) self.cloud.detach_volume(server, volume) self.assert_calls() def test_detach_volume_wait_error(self): server = dict(id='server001') attachments = [{'server_id': 'server001', 'device': 'device001'}] vol = { 'id': 'volume001', 'status': 'attached', 'name': '', 'attachments': attachments, } volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) vol['status'] = 'error' vol['attachments'] = [] errored_volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', server['id']] ), json={'server': server}, ), dict( method='DELETE', uri=self.get_mock_url( 'compute', 'public', append=[ 'servers', server['id'], 'os-volume_attachments', volume.id, ], ), ), self.get_cinder_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', 'detail'] ), json={'volumes': [errored_volume]}, ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', errored_volume['id']], ), json={'volume': errored_volume}, ), ] ) with testtools.ExpectedException(exceptions.ResourceFailure): self.cloud.detach_volume(server, volume) self.assert_calls() def test_delete_volume_deletes(self): vol = { 'id': 'volume001', 'status': 'attached', 'name': '', 'attachments': [], } volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) self.register_uris( [ self.get_cinder_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', volume.id] ), json={'volumes': [volume]}, ), dict( method='DELETE', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', volume.id] ), ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', volume.id] ), status_code=404, ), ] ) self.assertTrue(self.cloud.delete_volume(volume['id'])) self.assert_calls() def test_delete_volume_gone_away(self): vol = { 'id': 'volume001', 'status': 'attached', 'name': '', 'attachments': [], } volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) self.register_uris( [ self.get_cinder_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', volume.id] ), json=volume, ), dict( method='DELETE', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', volume.id] ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', volume.id] ), status_code=404, ), ] ) self.assertTrue(self.cloud.delete_volume(volume['id'])) self.assert_calls() def test_delete_volume_force(self): vol = { 'id': 'volume001', 'status': 'attached', 'name': '', 'attachments': [], } volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) self.register_uris( [ self.get_cinder_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', volume['id']] ), json={'volumes': [volume]}, ), dict( method='POST', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', volume.id, 'action'], ), validate=dict(json={'os-force_delete': None}), ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', volume['id']] ), status_code=404, ), ] ) self.assertTrue(self.cloud.delete_volume(volume['id'], force=True)) self.assert_calls() def test_set_volume_bootable(self): vol = { 'id': 'volume001', 'status': 'attached', 'name': '', 'attachments': [], } volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) self.register_uris( [ self.get_cinder_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', 'detail'] ), json={'volumes': [volume]}, ), dict( method='POST', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', volume.id, 'action'], ), json={'os-set_bootable': {'bootable': True}}, ), ] ) self.cloud.set_volume_bootable(volume['id']) self.assert_calls() def test_set_volume_bootable_false(self): vol = { 'id': 'volume001', 'status': 'attached', 'name': '', 'attachments': [], } volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) self.register_uris( [ self.get_cinder_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', 'detail'] ), json={'volumes': [volume]}, ), dict( method='POST', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', volume.id, 'action'], ), json={'os-set_bootable': {'bootable': False}}, ), ] ) self.cloud.set_volume_bootable(volume['id']) self.assert_calls() def test_get_volume_by_id(self): vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1')) self.register_uris( [ self.get_cinder_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', '01'] ), json={'volume': vol1}, ), ] ) self._compare_volumes(vol1, self.cloud.get_volume_by_id('01')) self.assert_calls() def test_create_volume(self): vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1')) self.register_uris( [ self.get_cinder_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes'] ), json={'volume': vol1}, validate=dict( json={ 'volume': { 'size': 50, 'name': 'vol1', } } ), ), ] ) self.cloud.create_volume(50, name='vol1') self.assert_calls() def test_create_bootable_volume(self): vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1')) self.register_uris( [ self.get_cinder_discovery_mock_dict(), dict( method='POST', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes'] ), json={'volume': vol1}, validate=dict( json={ 'volume': { 'size': 50, 'name': 'vol1', } } ), ), dict( method='POST', uri=self.get_mock_url( 'volumev3', 'public', append=['volumes', '01', 'action'], ), validate=dict( json={'os-set_bootable': {'bootable': True}} ), ), ] ) self.cloud.create_volume(50, name='vol1', bootable=True) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_volume_access.py0000664000175000017500000002404700000000000025715 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from openstack import exceptions from openstack.tests.unit import base class TestVolumeAccess(base.TestCase): def setUp(self): super().setUp() self.use_cinder() def test_list_volume_types(self): volume_type = dict( id='voltype01', description='volume type description', name='name', is_public=False, ) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['types'] ), json={'volume_types': [volume_type]}, ) ] ) self.assertTrue(self.cloud.list_volume_types()) self.assert_calls() def test_get_volume_type(self): volume_type = dict( id='voltype01', description='volume type description', name='name', is_public=False, ) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['types'] ), json={'volume_types': [volume_type]}, ) ] ) volume_type_got = self.cloud.get_volume_type(volume_type['name']) self.assertEqual(volume_type_got.id, volume_type['id']) def test_get_volume_type_access(self): volume_type = dict( id='voltype01', description='volume type description', name='name', is_public=False, ) volume_type_access = [ dict(volume_type_id='voltype01', name='name', project_id='prj01'), dict(volume_type_id='voltype01', name='name', project_id='prj02'), ] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['types'] ), json={'volume_types': [volume_type]}, ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=[ 'types', volume_type['id'], 'os-volume-type-access', ], ), json={'volume_type_access': volume_type_access}, ), ] ) self.assertEqual( len(self.cloud.get_volume_type_access(volume_type['name'])), 2 ) self.assert_calls() def test_remove_volume_type_access(self): volume_type = dict( id='voltype01', description='volume type description', name='name', is_public=False, ) project_001 = dict( volume_type_id='voltype01', name='name', project_id='prj01' ) project_002 = dict( volume_type_id='voltype01', name='name', project_id='prj02' ) volume_type_access = [project_001, project_002] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['types'] ), json={'volume_types': [volume_type]}, ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=[ 'types', volume_type['id'], 'os-volume-type-access', ], ), json={'volume_type_access': volume_type_access}, ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['types'] ), json={'volume_types': [volume_type]}, ), dict( method='POST', uri=self.get_mock_url( 'volumev3', 'public', append=['types', volume_type['id'], 'action'], ), json={ 'removeProjectAccess': { 'project': project_001['project_id'] } }, validate=dict( json={ 'removeProjectAccess': { 'project': project_001['project_id'] } } ), ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['types'] ), json={'volume_types': [volume_type]}, ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=[ 'types', volume_type['id'], 'os-volume-type-access', ], ), json={'volume_type_access': [project_001]}, ), ] ) self.assertEqual( len(self.cloud.get_volume_type_access(volume_type['name'])), 2 ) self.cloud.remove_volume_type_access( volume_type['name'], project_001['project_id'] ) self.assertEqual( len(self.cloud.get_volume_type_access(volume_type['name'])), 1 ) self.assert_calls() def test_add_volume_type_access(self): volume_type = dict( id='voltype01', description='volume type description', name='name', is_public=False, ) project_001 = dict( volume_type_id='voltype01', name='name', project_id='prj01' ) project_002 = dict( volume_type_id='voltype01', name='name', project_id='prj02' ) volume_type_access = [project_001, project_002] self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['types'] ), json={'volume_types': [volume_type]}, ), dict( method='POST', uri=self.get_mock_url( 'volumev3', 'public', append=['types', volume_type['id'], 'action'], ), json={ 'addProjectAccess': { 'project': project_002['project_id'] } }, validate=dict( json={ 'addProjectAccess': { 'project': project_002['project_id'] } } ), ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['types'] ), json={'volume_types': [volume_type]}, ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=[ 'types', volume_type['id'], 'os-volume-type-access', ], ), json={'volume_type_access': volume_type_access}, ), ] ) self.cloud.add_volume_type_access( volume_type['name'], project_002['project_id'] ) self.assertEqual( len(self.cloud.get_volume_type_access(volume_type['name'])), 2 ) self.assert_calls() def test_add_volume_type_access_missing(self): volume_type = dict( id='voltype01', description='volume type description', name='name', is_public=False, ) project_001 = dict( volume_type_id='voltype01', name='name', project_id='prj01' ) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['types'] ), json={'volume_types': [volume_type]}, ) ] ) with testtools.ExpectedException( exceptions.SDKException, "VolumeType not found: MISSING", ): self.cloud.add_volume_type_access( "MISSING", project_001['project_id'] ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_volume_backups.py0000664000175000017500000002321500000000000026100 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v3 import backup from openstack.tests.unit import base class TestVolumeBackups(base.TestCase): def setUp(self): super().setUp() self.use_cinder() def _compare_backups(self, exp, real): self.assertDictEqual( backup.Backup(**exp).to_dict(computed=False), real.to_dict(computed=False), ) def test_search_volume_backups(self): name = 'Volume1' vol1 = {'name': name, 'availability_zone': 'az1'} vol2 = {'name': name, 'availability_zone': 'az1'} vol3 = {'name': 'Volume2', 'availability_zone': 'az2'} self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['backups', 'detail'] ), json={"backups": [vol1, vol2, vol3]}, ) ] ) result = self.cloud.search_volume_backups( name, {'availability_zone': 'az1'} ) self.assertEqual(len(result), 2) for a, b in zip([vol1, vol2], result): self._compare_backups(a, b) self.assert_calls() def test_get_volume_backup(self): name = 'Volume1' vol1 = {'name': name, 'availability_zone': 'az1'} vol2 = {'name': name, 'availability_zone': 'az2'} vol3 = {'name': 'Volume2', 'availability_zone': 'az1'} self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['backups', 'detail'] ), json={"backups": [vol1, vol2, vol3]}, ) ] ) result = self.cloud.get_volume_backup( name, {'availability_zone': 'az1'} ) self._compare_backups(vol1, result) self.assert_calls() def test_list_volume_backups(self): backup = { 'id': '6ff16bdf-44d5-4bf9-b0f3-687549c76414', 'status': 'available', } search_opts = {'status': 'available'} self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['backups', 'detail'], qs_elements=['='.join(i) for i in search_opts.items()], ), json={"backups": [backup]}, ) ] ) result = self.cloud.list_volume_backups(True, search_opts) self.assertEqual(len(result), 1) self._compare_backups(backup, result[0]) self.assert_calls() def test_delete_volume_backup_wait(self): backup_id = '6ff16bdf-44d5-4bf9-b0f3-687549c76414' backup = {'id': backup_id, 'status': 'available'} self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['backups', 'detail'] ), json={"backups": [backup]}, ), dict( method='DELETE', uri=self.get_mock_url( 'volumev3', 'public', append=['backups', backup_id] ), ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['backups', backup_id] ), json={"backup": backup}, ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['backups', backup_id] ), status_code=404, ), ] ) self.cloud.delete_volume_backup(backup_id, False, True, 1) self.assert_calls() def test_delete_volume_backup_force(self): backup_id = '6ff16bdf-44d5-4bf9-b0f3-687549c76414' backup = {'id': backup_id, 'status': 'available'} self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['backups', 'detail'] ), json={"backups": [backup]}, ), dict( method='POST', uri=self.get_mock_url( 'volumev3', 'public', append=['backups', backup_id, 'action'], ), json={'os-force_delete': None}, validate=dict(json={'os-force_delete': None}), ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['backups', backup_id] ), json={"backup": backup}, ), dict( method='GET', uri=self.get_mock_url( 'volumev3', 'public', append=['backups', backup_id] ), status_code=404, ), ] ) self.cloud.delete_volume_backup(backup_id, True, True, 1) self.assert_calls() def test_create_volume_backup(self): volume_id = '1234' backup_name = 'bak1' bak1 = { 'id': '5678', 'volume_id': volume_id, 'status': 'available', 'name': backup_name, } self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'volumev3', 'public', append=['backups'] ), json={'backup': bak1}, validate=dict( json={ 'backup': { 'name': backup_name, 'volume_id': volume_id, 'description': None, 'force': False, 'snapshot_id': None, 'incremental': False, } } ), ), ] ) self.cloud.create_volume_backup(volume_id, name=backup_name) self.assert_calls() def test_create_incremental_volume_backup(self): volume_id = '1234' backup_name = 'bak1' bak1 = { 'id': '5678', 'volume_id': volume_id, 'status': 'available', 'name': backup_name, } self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'volumev3', 'public', append=['backups'] ), json={'backup': bak1}, validate=dict( json={ 'backup': { 'name': backup_name, 'volume_id': volume_id, 'description': None, 'force': False, 'snapshot_id': None, 'incremental': True, } } ), ), ] ) self.cloud.create_volume_backup( volume_id, name=backup_name, incremental=True ) self.assert_calls() def test_create_volume_backup_from_snapshot(self): volume_id = '1234' backup_name = 'bak1' snapshot_id = '5678' bak1 = { 'id': '5678', 'volume_id': volume_id, 'status': 'available', 'name': 'bak1', } self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'volumev3', 'public', append=['backups'] ), json={'backup': bak1}, validate=dict( json={ 'backup': { 'name': backup_name, 'volume_id': volume_id, 'description': None, 'force': False, 'snapshot_id': snapshot_id, 'incremental': False, } } ), ), ] ) self.cloud.create_volume_backup( volume_id, name=backup_name, snapshot_id=snapshot_id ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/cloud/test_zone.py0000664000175000017500000002242700000000000024040 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from openstack import exceptions from openstack.tests.unit import base zone_dict = { 'name': 'example.net.', 'type': 'PRIMARY', 'email': 'test@example.net', 'description': 'Example zone', 'ttl': 3600, 'id': '1', } class ZoneTestWrapper: def __init__(self, ut, attrs): self.remote_res = attrs self.ut = ut def get_create_response_json(self): return self.remote_res def get_get_response_json(self): return self.remote_res def __getitem__(self, key): """Dict access to be able to access properties easily""" return self.remote_res[key] def cmp(self, other): ut = self.ut me = self.remote_res for k, v in me.items(): # Go over known attributes. We might of course compare others, # but not necessary here ut.assertEqual(v, other[k]) class TestZone(base.TestCase): def setUp(self): super().setUp() self.use_designate() def test_create_zone(self): fake_zone = ZoneTestWrapper(self, zone_dict) self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones'] ), json=fake_zone.get_create_response_json(), validate=dict( json={ 'description': zone_dict['description'], 'email': zone_dict['email'], 'name': zone_dict['name'], 'ttl': zone_dict['ttl'], 'type': 'PRIMARY', } ), ) ] ) z = self.cloud.create_zone( name=zone_dict['name'], zone_type=zone_dict['type'], email=zone_dict['email'], description=zone_dict['description'], ttl=zone_dict['ttl'], masters=None, ) fake_zone.cmp(z) self.assert_calls() def test_create_zone_exception(self): self.register_uris( [ dict( method='POST', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones'] ), status_code=500, ) ] ) self.assertRaises( exceptions.SDKException, self.cloud.create_zone, 'example.net.' ) self.assert_calls() def test_update_zone(self): fake_zone = ZoneTestWrapper(self, zone_dict) new_ttl = 7200 updated_zone_dict = copy.copy(zone_dict) updated_zone_dict['ttl'] = new_ttl updated_zone = ZoneTestWrapper(self, updated_zone_dict) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['id']], ), json=fake_zone.get_get_response_json(), ), dict( method='PATCH', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['id']], ), json=updated_zone.get_get_response_json(), validate=dict(json={"ttl": new_ttl}), ), ] ) z = self.cloud.update_zone(fake_zone['id'], ttl=new_ttl) updated_zone.cmp(z) self.assert_calls() def test_delete_zone(self): fake_zone = ZoneTestWrapper(self, zone_dict) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['id']], ), json=fake_zone.get_get_response_json(), ), dict( method='DELETE', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['id']], ), status_code=202, ), ] ) self.assertTrue(self.cloud.delete_zone(fake_zone['id'])) self.assert_calls() def test_get_zone_by_id(self): fake_zone = ZoneTestWrapper(self, zone_dict) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['id']], ), json=fake_zone.get_get_response_json(), ) ] ) res = self.cloud.get_zone(fake_zone['id']) fake_zone.cmp(res) self.assert_calls() def test_get_zone_by_name(self): fake_zone = ZoneTestWrapper(self, zone_dict) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', fake_zone['name']], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones'], qs_elements=[ 'name={name}'.format(name=fake_zone['name']) ], ), json={"zones": [fake_zone.get_get_response_json()]}, ), ] ) res = self.cloud.get_zone(fake_zone['name']) fake_zone.cmp(res) self.assert_calls() def test_get_zone_not_found_returns_false(self): self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones', 'nonexistingzone.net.'], ), status_code=404, ), dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones'], qs_elements=['name=nonexistingzone.net.'], ), json={"zones": []}, ), ] ) zone = self.cloud.get_zone('nonexistingzone.net.') self.assertFalse(zone) self.assert_calls() def test_list_zones(self): fake_zone = ZoneTestWrapper(self, zone_dict) self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones'] ), json={ 'zones': [fake_zone.get_get_response_json()], 'links': { 'next': self.get_mock_url( 'dns', 'public', append=['v2', 'zones/'], qs_elements=['limit=1', 'marker=asd'], ), 'self': self.get_mock_url( 'dns', 'public', append=['v2', 'zones/'], qs_elements=['limit=1'], ), }, 'metadata': {'total_count': 2}, }, ), dict( method='GET', uri=self.get_mock_url( 'dns', 'public', append=['v2', 'zones/'], qs_elements=['limit=1', 'marker=asd'], ), json={'zones': [fake_zone.get_get_response_json()]}, ), ] ) res = self.cloud.list_zones() # updated_rs.cmp(res) self.assertEqual(2, len(res)) self.assert_calls() ././@PaxHeader0000000000000000000000000000003100000000000011447 xustar000000000000000025 mtime=1725296385.4254 openstacksdk-4.0.0/openstack/tests/unit/clustering/0000775000175000017500000000000000000000000022516 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/clustering/__init__.py0000664000175000017500000000000000000000000024615 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/clustering/test_version.py0000664000175000017500000000260600000000000025620 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.clustering import version from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'links': '2', 'status': '3', } class TestVersion(base.TestCase): def test_basic(self): sot = version.Version() self.assertEqual('version', sot.resource_key) self.assertEqual('versions', sot.resources_key) self.assertEqual('/', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = version.Version(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['status'], sot.status) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.429402 openstacksdk-4.0.0/openstack/tests/unit/clustering/v1/0000775000175000017500000000000000000000000023044 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/clustering/v1/__init__.py0000664000175000017500000000000000000000000025143 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/clustering/v1/test_action.py0000664000175000017500000000624100000000000025735 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.clustering.v1 import action from openstack.tests.unit import base FAKE_CLUSTER_ID = 'ffaed25e-46f5-4089-8e20-b3b4722fd597' FAKE_ID = '633bd3c6-520b-420f-8e6a-dc2a47022b53' FAKE_NAME = 'node_create_c3783474' FAKE = { 'id': FAKE_ID, 'name': FAKE_NAME, 'target': 'c378e474-d091-43a3-b083-e19719291358', 'action': 'NODE_CREATE', 'cause': 'RPC Request', 'owner': None, 'user': '3747afc360b64702a53bdd64dc1b8976', 'project': '42d9e9663331431f97b75e25136307ff', 'domain': '204ccccd267b40aea871750116b5b184', 'interval': -1, 'start_time': 1453414055.48672, 'end_time': 1453414055.48672, 'timeout': 3600, 'status': 'SUCCEEDED', 'status_reason': 'Action completed successfully.', 'inputs': {}, 'outputs': {}, 'depends_on': [], 'depended_by': [], 'created_at': '2015-10-10T12:46:36.000000', 'updated_at': '2016-10-10T12:46:36.000000', 'cluster_id': FAKE_CLUSTER_ID, } class TestAction(base.TestCase): def setUp(self): super().setUp() def test_basic(self): sot = action.Action() self.assertEqual('action', sot.resource_key) self.assertEqual('actions', sot.resources_key) self.assertEqual('/actions', sot.base_path) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_commit) def test_instantiate(self): sot = action.Action(**FAKE) self.assertEqual(FAKE['id'], sot.id) self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['target'], sot.target_id) self.assertEqual(FAKE['action'], sot.action) self.assertEqual(FAKE['cause'], sot.cause) self.assertEqual(FAKE['owner'], sot.owner_id) self.assertEqual(FAKE['user'], sot.user_id) self.assertEqual(FAKE['project'], sot.project_id) self.assertEqual(FAKE['domain'], sot.domain_id) self.assertEqual(FAKE['interval'], sot.interval) self.assertEqual(FAKE['start_time'], sot.start_at) self.assertEqual(FAKE['end_time'], sot.end_at) self.assertEqual(FAKE['timeout'], sot.timeout) self.assertEqual(FAKE['status'], sot.status) self.assertEqual(FAKE['status_reason'], sot.status_reason) self.assertEqual(FAKE['inputs'], sot.inputs) self.assertEqual(FAKE['outputs'], sot.outputs) self.assertEqual(FAKE['depends_on'], sot.depends_on) self.assertEqual(FAKE['depended_by'], sot.depended_by) self.assertEqual(FAKE['created_at'], sot.created_at) self.assertEqual(FAKE['updated_at'], sot.updated_at) self.assertEqual(FAKE['cluster_id'], sot.cluster_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/clustering/v1/test_build_info.py0000664000175000017500000000230500000000000026567 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.clustering.v1 import build_info from openstack.tests.unit import base FAKE = { 'api': { 'revision': '1.0.0', }, 'engine': { 'revision': '1.0.0', }, } class TestBuildInfo(base.TestCase): def setUp(self): super().setUp() def test_basic(self): sot = build_info.BuildInfo() self.assertEqual('/build-info', sot.base_path) self.assertEqual('build_info', sot.resource_key) self.assertTrue(sot.allow_fetch) def test_instantiate(self): sot = build_info.BuildInfo(**FAKE) self.assertEqual(FAKE['api'], sot.api) self.assertEqual(FAKE['engine'], sot.engine) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/clustering/v1/test_cluster.py0000664000175000017500000002421400000000000026141 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.clustering.v1 import cluster from openstack.tests.unit import base FAKE_ID = '092d0955-2645-461a-b8fa-6a44655cdb2c' FAKE_NAME = 'test_cluster' FAKE = { 'id': 'IDENTIFIER', 'config': {'key1': 'value1', 'key2': 'value2'}, 'desired_capacity': 1, 'max_size': 3, 'min_size': 0, 'name': FAKE_NAME, 'profile_id': 'myserver', 'profile_only': True, 'metadata': {}, 'dependents': {}, 'timeout': None, 'init_at': '2015-10-10T12:46:36.000000', 'created_at': '2015-10-10T12:46:36.000000', 'updated_at': '2016-10-10T12:46:36.000000', } FAKE_CREATE_RESP = { 'cluster': { 'action': 'a679c926-908f-49e7-a822-06ca371e64e1', 'init_at': '2015-10-10T12:46:36.000000', 'created_at': '2015-10-10T12:46:36.000000', 'updated_at': '2016-10-10T12:46:36.000000', 'data': {}, 'desired_capacity': 1, 'domain': None, 'id': FAKE_ID, 'init_time': None, 'max_size': 3, 'metadata': {}, 'min_size': 0, 'name': 'test_cluster', 'nodes': [], 'policies': [], 'profile_id': '560a8f9d-7596-4a32-85e8-03645fa7be13', 'profile_name': 'myserver', 'project': '333acb15a43242f4a609a27cb097a8f2', 'status': 'INIT', 'status_reason': 'Initializing', 'timeout': None, 'user': '6d600911ff764e54b309ce734c89595e', 'dependents': {}, } } class TestCluster(base.TestCase): def setUp(self): super().setUp() def test_basic(self): sot = cluster.Cluster() self.assertEqual('cluster', sot.resource_key) self.assertEqual('clusters', sot.resources_key) self.assertEqual('/clusters', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_instantiate(self): sot = cluster.Cluster(**FAKE) self.assertEqual(FAKE['id'], sot.id) self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['profile_id'], sot.profile_id) self.assertEqual(FAKE['min_size'], sot.min_size) self.assertEqual(FAKE['max_size'], sot.max_size) self.assertEqual(FAKE['desired_capacity'], sot.desired_capacity) self.assertEqual(FAKE['config'], sot.config) self.assertEqual(FAKE['timeout'], sot.timeout) self.assertEqual(FAKE['metadata'], sot.metadata) self.assertEqual(FAKE['init_at'], sot.init_at) self.assertEqual(FAKE['created_at'], sot.created_at) self.assertEqual(FAKE['updated_at'], sot.updated_at) self.assertEqual(FAKE['dependents'], sot.dependents) self.assertTrue(sot.is_profile_only) self.assertDictEqual( { "limit": "limit", "marker": "marker", "name": "name", "status": "status", "sort": "sort", "global_project": "global_project", }, sot._query_mapping._mapping, ) def test_scale_in(self): sot = cluster.Cluster(**FAKE) resp = mock.Mock() resp.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.scale_in(sess, 3)) url = 'clusters/%s/actions' % sot.id body = {'scale_in': {'count': 3}} sess.post.assert_called_once_with(url, json=body) def test_scale_out(self): sot = cluster.Cluster(**FAKE) resp = mock.Mock() resp.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.scale_out(sess, 3)) url = 'clusters/%s/actions' % sot.id body = {'scale_out': {'count': 3}} sess.post.assert_called_once_with(url, json=body) def test_resize(self): sot = cluster.Cluster(**FAKE) resp = mock.Mock() resp.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.resize(sess, foo='bar', zoo=5)) url = 'clusters/%s/actions' % sot.id body = {'resize': {'foo': 'bar', 'zoo': 5}} sess.post.assert_called_once_with(url, json=body) def test_add_nodes(self): sot = cluster.Cluster(**FAKE) resp = mock.Mock() resp.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.add_nodes(sess, ['node-33'])) url = 'clusters/%s/actions' % sot.id body = {'add_nodes': {'nodes': ['node-33']}} sess.post.assert_called_once_with(url, json=body) def test_del_nodes(self): sot = cluster.Cluster(**FAKE) resp = mock.Mock() resp.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.del_nodes(sess, ['node-11'])) url = 'clusters/%s/actions' % sot.id body = {'del_nodes': {'nodes': ['node-11']}} sess.post.assert_called_once_with(url, json=body) def test_del_nodes_with_params(self): sot = cluster.Cluster(**FAKE) resp = mock.Mock() resp.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=resp) params = { 'destroy_after_deletion': True, } self.assertEqual('', sot.del_nodes(sess, ['node-11'], **params)) url = 'clusters/%s/actions' % sot.id body = { 'del_nodes': { 'nodes': ['node-11'], 'destroy_after_deletion': True, } } sess.post.assert_called_once_with(url, json=body) def test_replace_nodes(self): sot = cluster.Cluster(**FAKE) resp = mock.Mock() resp.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.replace_nodes(sess, {'node-22': 'node-44'})) url = 'clusters/%s/actions' % sot.id body = {'replace_nodes': {'nodes': {'node-22': 'node-44'}}} sess.post.assert_called_once_with(url, json=body) def test_policy_attach(self): sot = cluster.Cluster(**FAKE) resp = mock.Mock() resp.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=resp) params = { 'enabled': True, } self.assertEqual('', sot.policy_attach(sess, 'POLICY', **params)) url = 'clusters/%s/actions' % sot.id body = { 'policy_attach': { 'policy_id': 'POLICY', 'enabled': True, } } sess.post.assert_called_once_with(url, json=body) def test_policy_detach(self): sot = cluster.Cluster(**FAKE) resp = mock.Mock() resp.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.policy_detach(sess, 'POLICY')) url = 'clusters/%s/actions' % sot.id body = {'policy_detach': {'policy_id': 'POLICY'}} sess.post.assert_called_once_with(url, json=body) def test_policy_update(self): sot = cluster.Cluster(**FAKE) resp = mock.Mock() resp.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=resp) params = {'enabled': False} self.assertEqual('', sot.policy_update(sess, 'POLICY', **params)) url = 'clusters/%s/actions' % sot.id body = {'policy_update': {'policy_id': 'POLICY', 'enabled': False}} sess.post.assert_called_once_with(url, json=body) def test_check(self): sot = cluster.Cluster(**FAKE) resp = mock.Mock() resp.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.check(sess)) url = 'clusters/%s/actions' % sot.id body = {'check': {}} sess.post.assert_called_once_with(url, json=body) def test_recover(self): sot = cluster.Cluster(**FAKE) resp = mock.Mock() resp.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.recover(sess)) url = 'clusters/%s/actions' % sot.id body = {'recover': {}} sess.post.assert_called_once_with(url, json=body) def test_operation(self): sot = cluster.Cluster(**FAKE) resp = mock.Mock() resp.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.op(sess, 'dance', style='tango')) url = 'clusters/%s/ops' % sot.id body = {'dance': {'style': 'tango'}} sess.post.assert_called_once_with(url, json=body) def test_force_delete(self): sot = cluster.Cluster(**FAKE) resp = mock.Mock() fake_action_id = 'f1de9847-2382-4272-8e73-cab0bc194663' resp.headers = {'Location': fake_action_id} resp.json = mock.Mock(return_value={"foo": "bar"}) resp.status_code = 200 sess = mock.Mock() sess.delete = mock.Mock(return_value=resp) res = sot.force_delete(sess) self.assertEqual(fake_action_id, res.id) url = 'clusters/%s' % sot.id body = {'force': True} sess.delete.assert_called_once_with(url, json=body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/clustering/v1/test_cluster_attr.py0000664000175000017500000000265700000000000027202 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.clustering.v1 import cluster_attr as ca from openstack.tests.unit import base FAKE = { 'cluster_id': '633bd3c6-520b-420f-8e6a-dc2a47022b53', 'path': 'path.to.attr', 'id': 'c378e474-d091-43a3-b083-e19719291358', 'value': 'fake value', } class TestClusterAttr(base.TestCase): def setUp(self): super().setUp() def test_basic(self): sot = ca.ClusterAttr() self.assertEqual('cluster_attributes', sot.resources_key) self.assertEqual( '/clusters/%(cluster_id)s/attrs/%(path)s', sot.base_path ) self.assertTrue(sot.allow_list) def test_instantiate(self): sot = ca.ClusterAttr(**FAKE) self.assertEqual(FAKE['cluster_id'], sot.cluster_id) self.assertEqual(FAKE['path'], sot.path) self.assertEqual(FAKE['id'], sot.node_id) self.assertEqual(FAKE['value'], sot.attr_value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/clustering/v1/test_cluster_policy.py0000664000175000017500000000431700000000000027522 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.clustering.v1 import cluster_policy from openstack.tests.unit import base FAKE = { 'cluster_id': '99e39f4b-1990-4237-a556-1518f0f0c9e7', 'cluster_name': 'test_cluster', 'data': {'purpose': 'unknown'}, 'enabled': True, 'policy_id': 'ac5415bd-f522-4160-8be0-f8853e4bc332', 'policy_name': 'dp01', 'policy_type': 'senlin.poicy.deletion-1.0', } class TestClusterPolicy(base.TestCase): def setUp(self): super().setUp() def test_basic(self): sot = cluster_policy.ClusterPolicy() self.assertEqual('cluster_policy', sot.resource_key) self.assertEqual('cluster_policies', sot.resources_key) self.assertEqual('/clusters/%(cluster_id)s/policies', sot.base_path) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_list) self.assertDictEqual( { "policy_name": "policy_name", "policy_type": "policy_type", "is_enabled": "enabled", "sort": "sort", "limit": "limit", "marker": "marker", }, sot._query_mapping._mapping, ) def test_instantiate(self): sot = cluster_policy.ClusterPolicy(**FAKE) self.assertEqual(FAKE['policy_id'], sot.id) self.assertEqual(FAKE['cluster_id'], sot.cluster_id) self.assertEqual(FAKE['cluster_name'], sot.cluster_name) self.assertEqual(FAKE['data'], sot.data) self.assertTrue(sot.is_enabled) self.assertEqual(FAKE['policy_id'], sot.policy_id) self.assertEqual(FAKE['policy_name'], sot.policy_name) self.assertEqual(FAKE['policy_type'], sot.policy_type) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/clustering/v1/test_event.py0000664000175000017500000000445700000000000025610 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.clustering.v1 import event from openstack.tests.unit import base FAKE = { 'action': 'NODE_CREATE', 'cluster_id': None, 'id': 'ffaed25e-46f5-4089-8e20-b3b4722fd597', 'level': '20', 'oid': 'efff1c11-2ada-47da-bedd-2c9af4fd099a', 'oname': 'node_create_b4a49016', 'otype': 'NODEACTION', 'project': '42d9e9663331431f97b75e25136307ff', 'status': 'START', 'status_reason': 'The action was abandoned.', 'timestamp': '2016-10-10T12:46:36.000000', 'user': '5e5bf8027826429c96af157f68dc9072', 'meta_data': { "action": {"created_at": "2019-07-13T13:18:18Z", "outputs": {}} }, } class TestEvent(base.TestCase): def setUp(self): super().setUp() def test_basic(self): sot = event.Event() self.assertEqual('event', sot.resource_key) self.assertEqual('events', sot.resources_key) self.assertEqual('/events', sot.base_path) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_list) def test_instantiate(self): sot = event.Event(**FAKE) self.assertEqual(FAKE['id'], sot.id) self.assertEqual(FAKE['action'], sot.action) self.assertEqual(FAKE['cluster_id'], sot.cluster_id) self.assertEqual(FAKE['level'], sot.level) self.assertEqual(FAKE['oid'], sot.obj_id) self.assertEqual(FAKE['oname'], sot.obj_name) self.assertEqual(FAKE['otype'], sot.obj_type) self.assertEqual(FAKE['project'], sot.project_id) self.assertEqual(FAKE['status'], sot.status) self.assertEqual(FAKE['status_reason'], sot.status_reason) self.assertEqual(FAKE['timestamp'], sot.generated_at) self.assertEqual(FAKE['user'], sot.user_id) self.assertEqual(FAKE['meta_data'], sot.meta_data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/clustering/v1/test_node.py0000664000175000017500000001331000000000000025400 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.clustering.v1 import node from openstack.tests.unit import base FAKE_ID = '123d0955-0099-aabb-b8fa-6a44655ceeff' FAKE_NAME = 'test_node' FAKE = { 'id': FAKE_ID, 'cluster_id': 'clusterA', 'metadata': {'key1': 'value1'}, 'name': FAKE_NAME, 'profile_id': 'myserver', 'domain': '204ccccd267b40aea871750116b5b184', 'user': '3747afc360b64702a53bdd64dc1b8976', 'project': '42d9e9663331431f97b75e25136307ff', 'index': 1, 'role': 'master', 'dependents': {}, 'created_at': '2015-10-10T12:46:36.000000', 'updated_at': '2016-10-10T12:46:36.000000', 'init_at': '2015-10-10T12:46:36.000000', 'tainted': True, } class TestNode(base.TestCase): def test_basic(self): sot = node.Node() self.assertEqual('node', sot.resource_key) self.assertEqual('nodes', sot.resources_key) self.assertEqual('/nodes', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_instantiate(self): sot = node.Node(**FAKE) self.assertEqual(FAKE['id'], sot.id) self.assertEqual(FAKE['profile_id'], sot.profile_id) self.assertEqual(FAKE['cluster_id'], sot.cluster_id) self.assertEqual(FAKE['user'], sot.user_id) self.assertEqual(FAKE['project'], sot.project_id) self.assertEqual(FAKE['domain'], sot.domain_id) self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['index'], sot.index) self.assertEqual(FAKE['role'], sot.role) self.assertEqual(FAKE['metadata'], sot.metadata) self.assertEqual(FAKE['init_at'], sot.init_at) self.assertEqual(FAKE['created_at'], sot.created_at) self.assertEqual(FAKE['updated_at'], sot.updated_at) self.assertEqual(FAKE['dependents'], sot.dependents) self.assertEqual(FAKE['tainted'], sot.tainted) def test_check(self): sot = node.Node(**FAKE) resp = mock.Mock() resp.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.check(sess)) url = 'nodes/%s/actions' % sot.id body = {'check': {}} sess.post.assert_called_once_with(url, json=body) def test_recover(self): sot = node.Node(**FAKE) resp = mock.Mock() resp.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.recover(sess)) url = 'nodes/%s/actions' % sot.id body = {'recover': {}} sess.post.assert_called_once_with(url, json=body) def test_operation(self): sot = node.Node(**FAKE) resp = mock.Mock() resp.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.op(sess, 'dance', style='tango')) url = 'nodes/%s/ops' % sot.id sess.post.assert_called_once_with( url, json={'dance': {'style': 'tango'}} ) def test_adopt_preview(self): sot = node.Node.new() resp = mock.Mock() resp.headers = {} resp.json = mock.Mock(return_value={"foo": "bar"}) sess = mock.Mock() sess.post = mock.Mock(return_value=resp) attrs = { 'identity': 'fake-resource-id', 'overrides': {}, 'type': 'os.nova.server-1.0', 'snapshot': False, } res = sot.adopt(sess, True, **attrs) self.assertEqual({"foo": "bar"}, res) sess.post.assert_called_once_with("nodes/adopt-preview", json=attrs) def test_adopt(self): sot = node.Node.new() resp = mock.Mock() resp.headers = {} resp.json = mock.Mock(return_value={"foo": "bar"}) resp.status_code = 200 sess = mock.Mock() sess.post = mock.Mock(return_value=resp) res = sot.adopt(sess, False, param="value") self.assertEqual(sot, res) sess.post.assert_called_once_with( "nodes/adopt", json={"param": "value"} ) def test_force_delete(self): sot = node.Node(**FAKE) resp = mock.Mock() fake_action_id = 'f1de9847-2382-4272-8e73-cab0bc194663' resp.headers = {'Location': fake_action_id} resp.json = mock.Mock(return_value={"foo": "bar"}) resp.status_code = 200 sess = mock.Mock() sess.delete = mock.Mock(return_value=resp) res = sot.force_delete(sess) self.assertEqual(fake_action_id, res.id) url = 'nodes/%s' % sot.id body = {'force': True} sess.delete.assert_called_once_with(url, json=body) class TestNodeDetail(base.TestCase): def test_basic(self): sot = node.NodeDetail() self.assertEqual('/nodes/%(node_id)s?show_details=True', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/clustering/v1/test_policy.py0000664000175000017500000000557100000000000025764 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.clustering.v1 import policy from openstack.tests.unit import base FAKE_ID = 'ac5415bd-f522-4160-8be0-f8853e4bc332' FAKE_NAME = 'test_policy' FAKE = { 'id': FAKE_ID, 'name': FAKE_NAME, 'spec': { 'type': 'senlin.policy.deletion', 'version': '1.0', 'properties': { 'criteria': 'OLDEST_FIRST', 'grace_period': 60, 'reduce_desired_capacity': False, 'destroy_after_deletion': True, }, }, 'project': '42d9e9663331431f97b75e25136307ff', 'domain': '204ccccd267b40aea871750116b5b184', 'user': '3747afc360b64702a53bdd64dc1b8976', 'type': 'senlin.policy.deletion-1.0', 'created_at': '2015-10-10T12:46:36.000000', 'updated_at': '2016-10-10T12:46:36.000000', 'data': {}, } class TestPolicy(base.TestCase): def setUp(self): super().setUp() def test_basic(self): sot = policy.Policy() self.assertEqual('policy', sot.resource_key) self.assertEqual('policies', sot.resources_key) self.assertEqual('/policies', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_instantiate(self): sot = policy.Policy(**FAKE) self.assertEqual(FAKE['id'], sot.id) self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['spec'], sot.spec) self.assertEqual(FAKE['project'], sot.project_id) self.assertEqual(FAKE['domain'], sot.domain_id) self.assertEqual(FAKE['user'], sot.user_id) self.assertEqual(FAKE['data'], sot.data) self.assertEqual(FAKE['created_at'], sot.created_at) self.assertEqual(FAKE['updated_at'], sot.updated_at) class TestPolicyValidate(base.TestCase): def setUp(self): super().setUp() def test_basic(self): sot = policy.PolicyValidate() self.assertEqual('policy', sot.resource_key) self.assertEqual('policies', sot.resources_key) self.assertEqual('/policies/validate', sot.base_path) self.assertTrue(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/clustering/v1/test_policy_type.py0000664000175000017500000000265300000000000027023 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.clustering.v1 import policy_type from openstack.tests.unit import base FAKE = { 'name': 'FAKE_POLICY_TYPE', 'schema': {'foo': 'bar'}, 'support_status': {'1.0': [{'status': 'supported', 'since': '2016.10'}]}, } class TestPolicyType(base.TestCase): def test_basic(self): sot = policy_type.PolicyType() self.assertEqual('policy_type', sot.resource_key) self.assertEqual('policy_types', sot.resources_key) self.assertEqual('/policy-types', sot.base_path) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_list) def test_instantiate(self): sot = policy_type.PolicyType(**FAKE) self.assertEqual(FAKE['name'], sot._get_id(sot)) self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['schema'], sot.schema) self.assertEqual(FAKE['support_status'], sot.support_status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/clustering/v1/test_profile.py0000664000175000017500000000600100000000000026112 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.clustering.v1 import profile from openstack.tests.unit import base FAKE_ID = '9b127538-a675-4271-ab9b-f24f54cfe173' FAKE_NAME = 'test_profile' FAKE = { 'metadata': {}, 'name': FAKE_NAME, 'id': FAKE_ID, 'spec': { 'type': 'os.nova.server', 'version': 1.0, 'properties': { 'flavor': 1, 'image': 'cirros-0.3.2-x86_64-uec', 'key_name': 'oskey', 'name': 'cirros_server', }, }, 'project': '42d9e9663331431f97b75e25136307ff', 'domain': '204ccccd267b40aea871750116b5b184', 'user': '3747afc360b64702a53bdd64dc1b8976', 'type': 'os.nova.server', 'created_at': '2015-10-10T12:46:36.000000', 'updated_at': '2016-10-10T12:46:36.000000', } class TestProfile(base.TestCase): def setUp(self): super().setUp() def test_basic(self): sot = profile.Profile() self.assertEqual('profile', sot.resource_key) self.assertEqual('profiles', sot.resources_key) self.assertEqual('/profiles', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) def test_instantiate(self): sot = profile.Profile(**FAKE) self.assertEqual(FAKE['id'], sot.id) self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['metadata'], sot.metadata) self.assertEqual(FAKE['spec'], sot.spec) self.assertEqual(FAKE['project'], sot.project_id) self.assertEqual(FAKE['domain'], sot.domain_id) self.assertEqual(FAKE['user'], sot.user_id) self.assertEqual(FAKE['type'], sot.type) self.assertEqual(FAKE['created_at'], sot.created_at) self.assertEqual(FAKE['updated_at'], sot.updated_at) class TestProfileValidate(base.TestCase): def setUp(self): super().setUp() def test_basic(self): sot = profile.ProfileValidate() self.assertEqual('profile', sot.resource_key) self.assertEqual('profiles', sot.resources_key) self.assertEqual('/profiles/validate', sot.base_path) self.assertTrue(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) self.assertEqual('PUT', sot.commit_method) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/clustering/v1/test_profile_type.py0000664000175000017500000000361500000000000027163 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.clustering.v1 import profile_type from openstack.tests.unit import base FAKE = { 'name': 'FAKE_PROFILE_TYPE', 'schema': {'foo': 'bar'}, 'support_status': { '1.0': [ { 'status': 'supported', 'since': '2016.10', } ] }, } class TestProfileType(base.TestCase): def test_basic(self): sot = profile_type.ProfileType() self.assertEqual('profile_type', sot.resource_key) self.assertEqual('profile_types', sot.resources_key) self.assertEqual('/profile-types', sot.base_path) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_list) def test_instantiate(self): sot = profile_type.ProfileType(**FAKE) self.assertEqual(FAKE['name'], sot._get_id(sot)) self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['schema'], sot.schema) self.assertEqual(FAKE['support_status'], sot.support_status) def test_ops(self): sot = profile_type.ProfileType(**FAKE) resp = mock.Mock() resp.json = mock.Mock(return_value='') sess = mock.Mock() sess.get = mock.Mock(return_value=resp) self.assertEqual('', sot.type_ops(sess)) url = 'profile-types/%s/ops' % sot.id sess.get.assert_called_once_with(url) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/clustering/v1/test_proxy.py0000664000175000017500000004055100000000000025643 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.clustering.v1 import _proxy from openstack.clustering.v1 import action from openstack.clustering.v1 import build_info from openstack.clustering.v1 import cluster from openstack.clustering.v1 import cluster_attr from openstack.clustering.v1 import cluster_policy from openstack.clustering.v1 import event from openstack.clustering.v1 import node from openstack.clustering.v1 import policy from openstack.clustering.v1 import policy_type from openstack.clustering.v1 import profile from openstack.clustering.v1 import profile_type from openstack.clustering.v1 import receiver from openstack.clustering.v1 import service from openstack import proxy as proxy_base from openstack.tests.unit import test_proxy_base class TestClusterProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) def test_build_info_get(self): self.verify_get( self.proxy.get_build_info, build_info.BuildInfo, method_args=[], expected_kwargs={'requires_id': False}, ) def test_profile_types(self): self.verify_list(self.proxy.profile_types, profile_type.ProfileType) def test_profile_type_get(self): self.verify_get(self.proxy.get_profile_type, profile_type.ProfileType) def test_policy_types(self): self.verify_list(self.proxy.policy_types, policy_type.PolicyType) def test_policy_type_get(self): self.verify_get(self.proxy.get_policy_type, policy_type.PolicyType) def test_profile_create(self): self.verify_create(self.proxy.create_profile, profile.Profile) def test_profile_validate(self): self.verify_create( self.proxy.validate_profile, profile.ProfileValidate ) def test_profile_delete(self): self.verify_delete(self.proxy.delete_profile, profile.Profile, False) def test_profile_delete_ignore(self): self.verify_delete(self.proxy.delete_profile, profile.Profile, True) def test_profile_find(self): self.verify_find(self.proxy.find_profile, profile.Profile) def test_profile_get(self): self.verify_get(self.proxy.get_profile, profile.Profile) def test_profiles(self): self.verify_list( self.proxy.profiles, profile.Profile, method_kwargs={'limit': 2}, expected_kwargs={'limit': 2}, ) def test_profile_update(self): self.verify_update(self.proxy.update_profile, profile.Profile) def test_cluster_create(self): self.verify_create(self.proxy.create_cluster, cluster.Cluster) def test_cluster_delete(self): self.verify_delete(self.proxy.delete_cluster, cluster.Cluster, False) def test_cluster_delete_ignore(self): self.verify_delete(self.proxy.delete_cluster, cluster.Cluster, True) def test_cluster_force_delete(self): self._verify( "openstack.clustering.v1.cluster.Cluster.force_delete", self.proxy.delete_cluster, method_args=["value", False, True], expected_args=[self.proxy], ) def test_cluster_find(self): self.verify_find(self.proxy.find_cluster, cluster.Cluster) def test_cluster_get(self): self.verify_get(self.proxy.get_cluster, cluster.Cluster) def test_clusters(self): self.verify_list( self.proxy.clusters, cluster.Cluster, method_kwargs={'limit': 2}, expected_kwargs={'limit': 2}, ) def test_cluster_update(self): self.verify_update(self.proxy.update_cluster, cluster.Cluster) def test_services(self): self.verify_list(self.proxy.services, service.Service) @mock.patch.object(proxy_base.Proxy, '_find') def test_resize_cluster(self, mock_find): mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') mock_find.return_value = mock_cluster self._verify( "openstack.clustering.v1.cluster.Cluster.resize", self.proxy.resize_cluster, method_args=["FAKE_CLUSTER"], method_kwargs={'k1': 'v1', 'k2': 'v2'}, expected_args=[self.proxy], expected_kwargs={'k1': 'v1', 'k2': 'v2'}, ) mock_find.assert_called_once_with( cluster.Cluster, "FAKE_CLUSTER", ignore_missing=False ) def test_resize_cluster_with_obj(self): mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') self._verify( "openstack.clustering.v1.cluster.Cluster.resize", self.proxy.resize_cluster, method_args=[mock_cluster], method_kwargs={'k1': 'v1', 'k2': 'v2'}, expected_args=[self.proxy], expected_kwargs={'k1': 'v1', 'k2': 'v2'}, ) def test_collect_cluster_attrs(self): self.verify_list( self.proxy.collect_cluster_attrs, cluster_attr.ClusterAttr, method_args=['FAKE_ID', 'path.to.attr'], expected_args=[], expected_kwargs={'cluster_id': 'FAKE_ID', 'path': 'path.to.attr'}, ) @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_cluster_check(self, mock_get): mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') mock_get.return_value = mock_cluster self._verify( "openstack.clustering.v1.cluster.Cluster.check", self.proxy.check_cluster, method_args=["FAKE_CLUSTER"], expected_args=[self.proxy], ) mock_get.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER") @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_cluster_recover(self, mock_get): mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') mock_get.return_value = mock_cluster self._verify( "openstack.clustering.v1.cluster.Cluster.recover", self.proxy.recover_cluster, method_args=["FAKE_CLUSTER"], expected_args=[self.proxy], ) mock_get.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER") def test_node_create(self): self.verify_create(self.proxy.create_node, node.Node) def test_node_delete(self): self.verify_delete(self.proxy.delete_node, node.Node, False) def test_node_delete_ignore(self): self.verify_delete(self.proxy.delete_node, node.Node, True) def test_node_force_delete(self): self._verify( "openstack.clustering.v1.node.Node.force_delete", self.proxy.delete_node, method_args=["value", False, True], expected_args=[self.proxy], ) def test_node_find(self): self.verify_find(self.proxy.find_node, node.Node) def test_node_get(self): self.verify_get(self.proxy.get_node, node.Node) def test_node_get_with_details(self): self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_node, method_args=['NODE_ID'], method_kwargs={'details': True}, expected_args=[node.NodeDetail], expected_kwargs={'node_id': 'NODE_ID', 'requires_id': False}, ) def test_nodes(self): self.verify_list( self.proxy.nodes, node.Node, method_kwargs={'limit': 2}, expected_kwargs={'limit': 2}, ) def test_node_update(self): self.verify_update(self.proxy.update_node, node.Node) @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_node_check(self, mock_get): mock_node = node.Node.new(id='FAKE_NODE') mock_get.return_value = mock_node self._verify( "openstack.clustering.v1.node.Node.check", self.proxy.check_node, method_args=["FAKE_NODE"], expected_args=[self.proxy], ) mock_get.assert_called_once_with(node.Node, "FAKE_NODE") @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_node_recover(self, mock_get): mock_node = node.Node.new(id='FAKE_NODE') mock_get.return_value = mock_node self._verify( "openstack.clustering.v1.node.Node.recover", self.proxy.recover_node, method_args=["FAKE_NODE"], expected_args=[self.proxy], ) mock_get.assert_called_once_with(node.Node, "FAKE_NODE") @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_node_adopt(self, mock_get): mock_node = node.Node.new() mock_get.return_value = mock_node self._verify( "openstack.clustering.v1.node.Node.adopt", self.proxy.adopt_node, method_kwargs={"preview": False, "foo": "bar"}, expected_args=[self.proxy], expected_kwargs={"preview": False, "foo": "bar"}, ) mock_get.assert_called_once_with(node.Node, None) @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_node_adopt_preview(self, mock_get): mock_node = node.Node.new() mock_get.return_value = mock_node self._verify( "openstack.clustering.v1.node.Node.adopt", self.proxy.adopt_node, method_kwargs={"preview": True, "foo": "bar"}, expected_args=[self.proxy], expected_kwargs={"preview": True, "foo": "bar"}, ) mock_get.assert_called_once_with(node.Node, None) def test_policy_create(self): self.verify_create(self.proxy.create_policy, policy.Policy) def test_policy_validate(self): self.verify_create(self.proxy.validate_policy, policy.PolicyValidate) def test_policy_delete(self): self.verify_delete(self.proxy.delete_policy, policy.Policy, False) def test_policy_delete_ignore(self): self.verify_delete(self.proxy.delete_policy, policy.Policy, True) def test_policy_find(self): self.verify_find(self.proxy.find_policy, policy.Policy) def test_policy_get(self): self.verify_get(self.proxy.get_policy, policy.Policy) def test_policies(self): self.verify_list( self.proxy.policies, policy.Policy, method_kwargs={'limit': 2}, expected_kwargs={'limit': 2}, ) def test_policy_update(self): self.verify_update(self.proxy.update_policy, policy.Policy) def test_cluster_policies(self): self.verify_list( self.proxy.cluster_policies, cluster_policy.ClusterPolicy, method_args=["FAKE_CLUSTER"], expected_args=[], expected_kwargs={"cluster_id": "FAKE_CLUSTER"}, ) def test_get_cluster_policy(self): fake_policy = cluster_policy.ClusterPolicy.new(id="FAKE_POLICY") fake_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') # ClusterPolicy object as input self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_cluster_policy, method_args=[fake_policy, "FAKE_CLUSTER"], expected_args=[cluster_policy.ClusterPolicy, fake_policy], expected_kwargs={'cluster_id': 'FAKE_CLUSTER'}, expected_result=fake_policy, ) # Policy ID as input self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_cluster_policy, method_args=["FAKE_POLICY", "FAKE_CLUSTER"], expected_args=[cluster_policy.ClusterPolicy, "FAKE_POLICY"], expected_kwargs={"cluster_id": "FAKE_CLUSTER"}, ) # Cluster object as input self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_cluster_policy, method_args=["FAKE_POLICY", fake_cluster], expected_args=[cluster_policy.ClusterPolicy, "FAKE_POLICY"], expected_kwargs={"cluster_id": fake_cluster}, ) def test_receiver_create(self): self.verify_create(self.proxy.create_receiver, receiver.Receiver) def test_receiver_update(self): self.verify_update(self.proxy.update_receiver, receiver.Receiver) def test_receiver_delete(self): self.verify_delete( self.proxy.delete_receiver, receiver.Receiver, False ) def test_receiver_delete_ignore(self): self.verify_delete(self.proxy.delete_receiver, receiver.Receiver, True) def test_receiver_find(self): self.verify_find(self.proxy.find_receiver, receiver.Receiver) def test_receiver_get(self): self.verify_get(self.proxy.get_receiver, receiver.Receiver) def test_receivers(self): self.verify_list( self.proxy.receivers, receiver.Receiver, method_kwargs={'limit': 2}, expected_kwargs={'limit': 2}, ) def test_action_get(self): self.verify_get(self.proxy.get_action, action.Action) def test_actions(self): self.verify_list( self.proxy.actions, action.Action, method_kwargs={'limit': 2}, expected_kwargs={'limit': 2}, ) def test_action_update(self): self.verify_update(self.proxy.update_action, action.Action) def test_event_get(self): self.verify_get(self.proxy.get_event, event.Event) def test_events(self): self.verify_list( self.proxy.events, event.Event, method_kwargs={'limit': 2}, expected_kwargs={'limit': 2}, ) @mock.patch("openstack.resource.wait_for_status") def test_wait_for(self, mock_wait): mock_resource = mock.Mock() mock_wait.return_value = mock_resource self.proxy.wait_for_status(mock_resource, 'ACTIVE') mock_wait.assert_called_once_with( self.proxy, mock_resource, 'ACTIVE', [], 2, 120 ) @mock.patch("openstack.resource.wait_for_status") def test_wait_for_params(self, mock_wait): mock_resource = mock.Mock() mock_wait.return_value = mock_resource self.proxy.wait_for_status(mock_resource, 'ACTIVE', ['ERROR'], 1, 2) mock_wait.assert_called_once_with( self.proxy, mock_resource, 'ACTIVE', ['ERROR'], 1, 2 ) @mock.patch("openstack.resource.wait_for_delete") def test_wait_for_delete(self, mock_wait): mock_resource = mock.Mock() mock_wait.return_value = mock_resource self.proxy.wait_for_delete(mock_resource) mock_wait.assert_called_once_with(self.proxy, mock_resource, 2, 120) @mock.patch("openstack.resource.wait_for_delete") def test_wait_for_delete_params(self, mock_wait): mock_resource = mock.Mock() mock_wait.return_value = mock_resource self.proxy.wait_for_delete(mock_resource, 1, 2) mock_wait.assert_called_once_with(self.proxy, mock_resource, 1, 2) def test_get_cluster_metadata(self): self._verify( "openstack.clustering.v1.cluster.Cluster.fetch_metadata", self.proxy.get_cluster_metadata, method_args=["value"], expected_args=[self.proxy], expected_result=cluster.Cluster(id="value", metadata={}), ) def test_set_cluster_metadata(self): kwargs = {"a": "1", "b": "2"} id = "an_id" self._verify( "openstack.clustering.v1.cluster.Cluster.set_metadata", self.proxy.set_cluster_metadata, method_args=[id], method_kwargs=kwargs, method_result=cluster.Cluster.existing(id=id, metadata=kwargs), expected_args=[self.proxy], expected_kwargs={'metadata': kwargs}, expected_result=cluster.Cluster.existing(id=id, metadata=kwargs), ) def test_delete_cluster_metadata(self): self._verify( "openstack.clustering.v1.cluster.Cluster.delete_metadata_item", self.proxy.delete_cluster_metadata, expected_result=None, method_args=["value", ["key"]], expected_args=[self.proxy, "key"], ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/clustering/v1/test_receiver.py0000664000175000017500000000461600000000000026270 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.clustering.v1 import receiver from openstack.tests.unit import base FAKE_ID = 'ae63a10b-4a90-452c-aef1-113a0b255ee3' FAKE_NAME = 'test_receiver' FAKE = { 'id': FAKE_ID, 'name': FAKE_NAME, 'type': 'webhook', 'cluster_id': 'FAKE_CLUSTER', 'action': 'CLUSTER_RESIZE', 'created_at': '2015-10-10T12:46:36.000000', 'updated_at': '2016-10-10T12:46:36.000000', 'actor': {}, 'params': {'adjustment_type': 'CHANGE_IN_CAPACITY', 'adjustment': 2}, 'channel': { 'alarm_url': 'http://host:port/webhooks/AN_ID/trigger?V=1', }, 'user': 'FAKE_USER', 'project': 'FAKE_PROJECT', 'domain': '', } class TestReceiver(base.TestCase): def setUp(self): super().setUp() def test_basic(self): sot = receiver.Receiver() self.assertEqual('receiver', sot.resource_key) self.assertEqual('receivers', sot.resources_key) self.assertEqual('/receivers', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_instantiate(self): sot = receiver.Receiver(**FAKE) self.assertEqual(FAKE['id'], sot.id) self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['type'], sot.type) self.assertEqual(FAKE['cluster_id'], sot.cluster_id) self.assertEqual(FAKE['action'], sot.action) self.assertEqual(FAKE['params'], sot.params) self.assertEqual(FAKE['created_at'], sot.created_at) self.assertEqual(FAKE['updated_at'], sot.updated_at) self.assertEqual(FAKE['user'], sot.user_id) self.assertEqual(FAKE['project'], sot.project_id) self.assertEqual(FAKE['domain'], sot.domain_id) self.assertEqual(FAKE['channel'], sot.channel) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/clustering/v1/test_service.py0000664000175000017500000000347600000000000026127 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.clustering.v1 import service from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'binary': 'senlin-engine', 'host': 'host1', 'status': 'enabled', 'state': 'up', 'disabled_reason': None, 'updated_at': '2016-10-10T12:46:36.000000', } class TestService(base.TestCase): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) self.sess = mock.Mock() self.sess.put = mock.Mock(return_value=self.resp) def test_basic(self): sot = service.Service() self.assertEqual('service', sot.resource_key) self.assertEqual('services', sot.resources_key) self.assertEqual('/services', sot.base_path) self.assertTrue(sot.allow_list) def test_make_it(self): sot = service.Service(**EXAMPLE) self.assertEqual(EXAMPLE['host'], sot.host) self.assertEqual(EXAMPLE['binary'], sot.binary) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['state'], sot.state) self.assertEqual(EXAMPLE['disabled_reason'], sot.disabled_reason) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.429402 openstacksdk-4.0.0/openstack/tests/unit/common/0000775000175000017500000000000000000000000021627 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/common/__init__.py0000664000175000017500000000000000000000000023726 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/common/test_metadata.py0000664000175000017500000001551100000000000025023 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.common import metadata from openstack import exceptions from openstack import resource from openstack.tests.unit import base from openstack.tests.unit.test_resource import FakeResponse IDENTIFIER = 'IDENTIFIER' class TestMetadata(base.TestCase): def setUp(self): super().setUp() self.service_name = "service" self.base_path = "base_path" self.metadata_result = {"metadata": {"go": "cubs", "boo": "sox"}} self.meta_result = {"meta": {"oh": "yeah"}} class Test(resource.Resource, metadata.MetadataMixin): service = self.service_name base_path = self.base_path resources_key = 'resources' allow_create = True allow_fetch = True allow_head = True allow_commit = True allow_delete = True allow_list = True self.test_class = Test self.request = mock.Mock(spec=resource._Request) self.request.url = "uri" self.request.body = "body" self.request.headers = "headers" self.response = FakeResponse({}) self.sot = Test.new(id="id") self.sot._prepare_request = mock.Mock(return_value=self.request) self.sot._translate_response = mock.Mock() self.session = mock.Mock(spec=adapter.Adapter) self.session.get = mock.Mock(return_value=self.response) self.session.put = mock.Mock(return_value=self.response) self.session.post = mock.Mock(return_value=self.response) self.session.delete = mock.Mock(return_value=self.response) def test_metadata_attribute(self): res = self.sot self.assertTrue(hasattr(res, 'metadata')) def test_get_metadata(self): res = self.sot mock_response = mock.Mock() mock_response.status_code = 200 mock_response.links = {} mock_response.json.return_value = {'metadata': {'foo': 'bar'}} self.session.get.side_effect = [mock_response] result = res.fetch_metadata(self.session) # Check metadata attribute is updated self.assertDictEqual({'foo': 'bar'}, result.metadata) # Check passed resource is returned self.assertEqual(res, result) url = self.base_path + '/' + res.id + '/metadata' self.session.get.assert_called_once_with(url) def test_set_metadata(self): res = self.sot result = res.set_metadata(self.session, {'foo': 'bar'}) # Check metadata attribute is updated self.assertDictEqual({'foo': 'bar'}, res.metadata) # Check passed resource is returned self.assertEqual(res, result) url = self.base_path + '/' + res.id + '/metadata' self.session.post.assert_called_once_with( url, json={'metadata': {'foo': 'bar'}} ) def test_replace_metadata(self): res = self.sot result = res.replace_metadata(self.session, {'foo': 'bar'}) # Check metadata attribute is updated self.assertDictEqual({'foo': 'bar'}, res.metadata) # Check passed resource is returned self.assertEqual(res, result) url = self.base_path + '/' + res.id + '/metadata' self.session.put.assert_called_once_with( url, json={'metadata': {'foo': 'bar'}} ) def test_delete_all_metadata(self): res = self.sot # Set some initial value to check removal res.metadata = {'foo': 'bar'} result = res.delete_metadata(self.session) # Check metadata attribute is updated self.assertEqual({}, res.metadata) # Check passed resource is returned self.assertEqual(res, result) url = self.base_path + '/' + res.id + '/metadata' self.session.put.assert_called_once_with(url, json={'metadata': {}}) def test_get_metadata_item(self): res = self.sot mock_response = mock.Mock() mock_response.status_code = 200 mock_response.json.return_value = {'meta': {'foo': 'bar'}} self.session.get.side_effect = [mock_response] result = res.get_metadata_item(self.session, 'foo') # Check tags attribute is updated self.assertEqual({'foo': 'bar'}, res.metadata) # Check the passed resource is returned self.assertEqual(res, result) url = self.base_path + '/' + res.id + '/metadata/foo' self.session.get.assert_called_once_with(url) def test_delete_single_item(self): res = self.sot res.metadata = {'foo': 'bar', 'foo2': 'bar2'} result = res.delete_metadata_item(self.session, 'foo2') # Check metadata attribute is updated self.assertEqual({'foo': 'bar'}, res.metadata) # Check passed resource is returned self.assertEqual(res, result) url = self.base_path + '/' + res.id + '/metadata/foo2' self.session.delete.assert_called_once_with(url) def test_delete_signle_item_empty(self): res = self.sot result = res.delete_metadata_item(self.session, 'foo2') # Check metadata attribute is updated self.assertEqual({}, res.metadata) # Check passed resource is returned self.assertEqual(res, result) url = self.base_path + '/' + res.id + '/metadata/foo2' self.session.delete.assert_called_once_with(url) def test_get_metadata_item_not_exists(self): res = self.sot mock_response = mock.Mock() mock_response.status_code = 404 mock_response.content = None self.session.get.side_effect = [mock_response] # ensure we get 404 self.assertRaises( exceptions.NotFoundException, res.get_metadata_item, self.session, 'dummy', ) def test_set_metadata_item(self): res = self.sot # Set some initial value to check add res.metadata = {'foo': 'bar'} result = res.set_metadata_item(self.session, 'foo', 'black') # Check metadata attribute is updated self.assertEqual({'foo': 'black'}, res.metadata) # Check passed resource is returned self.assertEqual(res, result) url = self.base_path + '/' + res.id + '/metadata/foo' self.session.put.assert_called_once_with( url, json={'meta': {'foo': 'black'}} ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/common/test_quota_set.py0000664000175000017500000001133000000000000025242 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from keystoneauth1 import adapter from openstack.common import quota_set as _qs from openstack.tests.unit import base BASIC_EXAMPLE = { "backup_gigabytes": 1000, "backups": 10, "gigabytes___DEFAULT__": -1, } USAGE_EXAMPLE = { "backup_gigabytes": {"in_use": 0, "limit": 1000, "reserved": 0}, "backups": {"in_use": 0, "limit": 10, "reserved": 0}, "gigabytes___DEFAULT__": {"in_use": 0, "limit": -1, "reserved": 0}, } class TestQuotaSet(base.TestCase): def setUp(self): super().setUp() self.sess = mock.Mock(spec=adapter.Adapter) self.sess.default_microversion = 1 self.sess._get_connection = mock.Mock(return_value=self.cloud) self.sess.retriable_status_codes = set() def test_basic(self): sot = _qs.QuotaSet() self.assertEqual('quota_set', sot.resource_key) self.assertIsNone(sot.resources_key) self.assertEqual('/os-quota-sets/%(project_id)s', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertFalse(sot.allow_list) self.assertTrue(sot.allow_commit) self.assertDictEqual( {"usage": "usage", "limit": "limit", "marker": "marker"}, sot._query_mapping._mapping, ) def test_make_basic(self): sot = _qs.QuotaSet(**BASIC_EXAMPLE) self.assertEqual(BASIC_EXAMPLE['backups'], sot.backups) def test_get(self): sot = _qs.QuotaSet(project_id='proj') resp = mock.Mock() resp.body = {'quota_set': copy.deepcopy(BASIC_EXAMPLE)} resp.json = mock.Mock(return_value=resp.body) resp.status_code = 200 resp.headers = {} self.sess.get = mock.Mock(return_value=resp) sot.fetch(self.sess) self.sess.get.assert_called_with( '/os-quota-sets/proj', microversion=1, params={}, skip_cache=False ) self.assertEqual(BASIC_EXAMPLE['backups'], sot.backups) self.assertEqual({}, sot.reservation) self.assertEqual({}, sot.usage) def test_get_usage(self): sot = _qs.QuotaSet(project_id='proj') resp = mock.Mock() resp.body = {'quota_set': copy.deepcopy(USAGE_EXAMPLE)} resp.json = mock.Mock(return_value=resp.body) resp.status_code = 200 resp.headers = {} self.sess.get = mock.Mock(return_value=resp) sot.fetch(self.sess, usage=True) self.sess.get.assert_called_with( '/os-quota-sets/proj', microversion=1, params={'usage': True}, skip_cache=False, ) self.assertEqual(USAGE_EXAMPLE['backups']['limit'], sot.backups) def test_update_quota(self): # Use QuotaSet as if it was returned by get(usage=True) sot = _qs.QuotaSet.existing( project_id='proj', reservation={'a': 'b'}, usage={'c': 'd'}, foo='bar', ) resp = mock.Mock() resp.body = {'quota_set': copy.deepcopy(BASIC_EXAMPLE)} resp.json = mock.Mock(return_value=resp.body) resp.status_code = 200 resp.headers = {} self.sess.put = mock.Mock(return_value=resp) sot._update(reservation={'b': 'd'}, backups=15, something_else=20) sot.commit(self.sess) self.sess.put.assert_called_with( '/os-quota-sets/proj', microversion=1, headers={}, json={'quota_set': {'backups': 15, 'something_else': 20}}, ) def test_delete_quota(self): # Use QuotaSet as if it was returned by get(usage=True) sot = _qs.QuotaSet.existing( project_id='proj', reservation={'a': 'b'}, usage={'c': 'd'}, foo='bar', ) resp = mock.Mock() resp.body = None resp.json = mock.Mock(return_value=resp.body) resp.status_code = 200 resp.headers = {} self.sess.delete = mock.Mock(return_value=resp) sot.delete(self.sess) self.sess.delete.assert_called_with( '/os-quota-sets/proj', microversion=1, headers={}, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/common/test_tag.py0000664000175000017500000001342600000000000024021 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.common import tag from openstack import exceptions from openstack import resource from openstack.tests.unit import base from openstack.tests.unit.test_resource import FakeResponse class TestTagMixin(base.TestCase): def setUp(self): super().setUp() self.service_name = "service" self.base_path = "base_path" class Test(resource.Resource, tag.TagMixin): service = self.service_name base_path = self.base_path resources_key = 'resources' allow_create = True allow_fetch = True allow_head = True allow_commit = True allow_delete = True allow_list = True self.test_class = Test self.request = mock.Mock(spec=resource._Request) self.request.url = "uri" self.request.body = "body" self.request.headers = "headers" self.response = FakeResponse({}) self.sot = Test.new(id="id", tags=[]) self.sot._prepare_request = mock.Mock(return_value=self.request) self.sot._translate_response = mock.Mock() self.session = mock.Mock(spec=adapter.Adapter) self.session.get = mock.Mock(return_value=self.response) self.session.put = mock.Mock(return_value=self.response) self.session.delete = mock.Mock(return_value=self.response) def test_tags_attribute(self): res = self.sot self.assertTrue(hasattr(res, 'tags')) self.assertIsInstance(res.tags, list) def test_fetch_tags(self): res = self.sot sess = self.session mock_response = mock.Mock() mock_response.status_code = 200 mock_response.links = {} mock_response.json.return_value = {'tags': ['blue1', 'green1']} sess.get.side_effect = [mock_response] result = res.fetch_tags(sess) # Check tags attribute is updated self.assertEqual(['blue1', 'green1'], res.tags) # Check the passed resource is returned self.assertEqual(res, result) url = self.base_path + '/' + res.id + '/tags' sess.get.assert_called_once_with(url) def test_set_tags(self): res = self.sot sess = self.session # Set some initial value to check rewrite res.tags.extend(['blue_old', 'green_old']) result = res.set_tags(sess, ['blue', 'green']) # Check tags attribute is updated self.assertEqual(['blue', 'green'], res.tags) # Check the passed resource is returned self.assertEqual(res, result) url = self.base_path + '/' + res.id + '/tags' sess.put.assert_called_once_with(url, json={'tags': ['blue', 'green']}) def test_remove_all_tags(self): res = self.sot sess = self.session # Set some initial value to check removal res.tags.extend(['blue_old', 'green_old']) result = res.remove_all_tags(sess) # Check tags attribute is updated self.assertEqual([], res.tags) # Check the passed resource is returned self.assertEqual(res, result) url = self.base_path + '/' + res.id + '/tags' sess.delete.assert_called_once_with(url) def test_remove_single_tag(self): res = self.sot sess = self.session res.tags.extend(['blue', 'dummy']) result = res.remove_tag(sess, 'dummy') # Check tags attribute is updated self.assertEqual(['blue'], res.tags) # Check the passed resource is returned self.assertEqual(res, result) url = self.base_path + '/' + res.id + '/tags/dummy' sess.delete.assert_called_once_with(url) def test_check_tag_exists(self): res = self.sot sess = self.session sess.get.side_effect = [FakeResponse(None, 202)] result = res.check_tag(sess, 'blue') # Check tags attribute is updated self.assertEqual([], res.tags) # Check the passed resource is returned self.assertEqual(res, result) url = self.base_path + '/' + res.id + '/tags/blue' sess.get.assert_called_once_with(url) def test_check_tag_not_exists(self): res = self.sot sess = self.session mock_response = mock.Mock() mock_response.status_code = 404 mock_response.links = {} mock_response.content = None sess.get.side_effect = [mock_response] # ensure we get 404 self.assertRaises( exceptions.NotFoundException, res.check_tag, sess, 'dummy', ) def test_add_tag(self): res = self.sot sess = self.session # Set some initial value to check add res.tags.extend(['blue', 'green']) result = res.add_tag(sess, 'lila') # Check tags attribute is updated self.assertEqual(['blue', 'green', 'lila'], res.tags) # Check the passed resource is returned self.assertEqual(res, result) url = self.base_path + '/' + res.id + '/tags/lila' sess.put.assert_called_once_with(url) def test_tagged_resource_always_created_with_empty_tag_list(self): res = self.sot self.assertIsNotNone(res.tags) self.assertEqual(res.tags, list()) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.433404 openstacksdk-4.0.0/openstack/tests/unit/compute/0000775000175000017500000000000000000000000022013 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/__init__.py0000664000175000017500000000000000000000000024112 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/test_version.py0000664000175000017500000000272100000000000025113 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.compute import version from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'links': '2', 'status': '3', 'updated': '4', } class TestVersion(base.TestCase): def test_basic(self): sot = version.Version() self.assertEqual('version', sot.resource_key) self.assertEqual('versions', sot.resources_key) self.assertEqual('/', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = version.Version(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['updated'], sot.updated) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4374058 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/0000775000175000017500000000000000000000000022342 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/__init__.py0000664000175000017500000000000000000000000024441 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_aggregate.py0000664000175000017500000000730700000000000025710 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.compute.v2 import aggregate from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { "name": "m-family", "availability_zone": None, "deleted": False, "created_at": "2018-07-06T14:58:16.000000", "updated_at": None, "hosts": ["oscomp-m001", "oscomp-m002", "oscomp-m003"], "deleted_at": None, "id": 4, "uuid": IDENTIFIER, "metadata": {"type": "public", "family": "m-family"}, } class TestAggregate(base.TestCase): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = EXAMPLE.copy() self.resp.json = mock.Mock(return_value=self.resp.body) self.resp.status_code = 200 self.resp.headers = {'Accept': ''} self.sess = mock.Mock(spec=adapter.Adapter) self.sess.post = mock.Mock(return_value=self.resp) def test_basic(self): sot = aggregate.Aggregate() self.assertEqual('aggregate', sot.resource_key) self.assertEqual('aggregates', sot.resources_key) self.assertEqual('/os-aggregates', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = aggregate.Aggregate(**EXAMPLE) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['availability_zone'], sot.availability_zone) self.assertEqual(EXAMPLE['deleted'], sot.is_deleted) self.assertEqual(EXAMPLE['deleted_at'], sot.deleted_at) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) self.assertEqual(EXAMPLE['hosts'], sot.hosts) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['uuid'], sot.uuid) self.assertDictEqual(EXAMPLE['metadata'], sot.metadata) def test_add_host(self): sot = aggregate.Aggregate(**EXAMPLE) sot.add_host(self.sess, 'host1') url = 'os-aggregates/4/action' body = {"add_host": {"host": "host1"}} self.sess.post.assert_called_with(url, json=body, microversion=None) def test_remove_host(self): sot = aggregate.Aggregate(**EXAMPLE) sot.remove_host(self.sess, 'host1') url = 'os-aggregates/4/action' body = {"remove_host": {"host": "host1"}} self.sess.post.assert_called_with(url, json=body, microversion=None) def test_set_metadata(self): sot = aggregate.Aggregate(**EXAMPLE) sot.set_metadata(self.sess, {"key: value"}) url = 'os-aggregates/4/action' body = {"set_metadata": {"metadata": {"key: value"}}} self.sess.post.assert_called_with(url, json=body, microversion=None) def test_precache_image(self): sot = aggregate.Aggregate(**EXAMPLE) sot.precache_images(self.sess, ['1']) url = 'os-aggregates/4/images' body = {"cache": ['1']} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_availability_zone.py0000664000175000017500000000255700000000000027471 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.compute.v2 import availability_zone as az from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' BASIC_EXAMPLE = { 'id': IDENTIFIER, 'zoneState': 'available', 'hosts': 'host1', 'zoneName': 'zone1', } class TestAvailabilityZone(base.TestCase): def test_basic(self): sot = az.AvailabilityZone() self.assertEqual('availabilityZoneInfo', sot.resources_key) self.assertEqual('/os-availability-zone', sot.base_path) self.assertTrue(sot.allow_list) def test_make_basic(self): sot = az.AvailabilityZone(**BASIC_EXAMPLE) self.assertEqual(BASIC_EXAMPLE['id'], sot.id) self.assertEqual(BASIC_EXAMPLE['zoneState'], sot.state) self.assertEqual(BASIC_EXAMPLE['hosts'], sot.hosts) self.assertEqual(BASIC_EXAMPLE['zoneName'], sot.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_extension.py0000664000175000017500000000326200000000000025772 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.compute.v2 import extension from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'alias': '1', 'description': '2', 'links': [], 'name': '4', 'namespace': '5', 'updated': '2015-03-09T12:14:57.233772', } class TestExtension(base.TestCase): def test_basic(self): sot = extension.Extension() self.assertEqual('extension', sot.resource_key) self.assertEqual('extensions', sot.resources_key) self.assertEqual('/extensions', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = extension.Extension(**EXAMPLE) self.assertEqual(EXAMPLE['alias'], sot.alias) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['namespace'], sot.namespace) self.assertEqual(EXAMPLE['updated'], sot.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_flavor.py0000664000175000017500000002111600000000000025245 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.compute.v2 import flavor from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' BASIC_EXAMPLE = { 'id': IDENTIFIER, 'links': '2', 'name': '3', 'description': 'Testing flavor', 'disk': 4, 'os-flavor-access:is_public': True, 'ram': 6, 'vcpus': 7, 'swap': 8, 'OS-FLV-EXT-DATA:ephemeral': 9, 'OS-FLV-DISABLED:disabled': False, 'rxtx_factor': 11.0, } DEFAULTS_EXAMPLE = { 'links': '2', 'original_name': IDENTIFIER, 'description': 'Testing flavor', } class TestFlavor(base.TestCase): def setUp(self): super().setUp() self.sess = mock.Mock(spec=adapter.Adapter) self.sess.default_microversion = 1 self.sess._get_connection = mock.Mock(return_value=self.cloud) def test_basic(self): sot = flavor.Flavor() self.assertEqual('flavor', sot.resource_key) self.assertEqual('flavors', sot.resources_key) self.assertEqual('/flavors', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_commit) self.assertDictEqual( { "sort_key": "sort_key", "sort_dir": "sort_dir", "min_disk": "minDisk", "min_ram": "minRam", "limit": "limit", "marker": "marker", "is_public": "is_public", }, sot._query_mapping._mapping, ) def test_make_basic(self): sot = flavor.Flavor(**BASIC_EXAMPLE) self.assertEqual(BASIC_EXAMPLE['id'], sot.id) self.assertEqual(BASIC_EXAMPLE['name'], sot.name) self.assertEqual(BASIC_EXAMPLE['description'], sot.description) self.assertEqual(BASIC_EXAMPLE['disk'], sot.disk) self.assertEqual( BASIC_EXAMPLE['os-flavor-access:is_public'], sot.is_public ) self.assertEqual(BASIC_EXAMPLE['ram'], sot.ram) self.assertEqual(BASIC_EXAMPLE['vcpus'], sot.vcpus) self.assertEqual(BASIC_EXAMPLE['swap'], sot.swap) self.assertEqual( BASIC_EXAMPLE['OS-FLV-EXT-DATA:ephemeral'], sot.ephemeral ) self.assertEqual( BASIC_EXAMPLE['OS-FLV-DISABLED:disabled'], sot.is_disabled ) self.assertEqual(BASIC_EXAMPLE['rxtx_factor'], sot.rxtx_factor) def test_make_basic_swap(self): sot = flavor.Flavor(id=IDENTIFIER, swap="") self.assertEqual(0, sot.swap) sot1 = flavor.Flavor(id=IDENTIFIER, swap=0) self.assertEqual(0, sot1.swap) def test_make_defaults(self): sot = flavor.Flavor(**DEFAULTS_EXAMPLE) self.assertEqual(DEFAULTS_EXAMPLE['original_name'], sot.name) self.assertEqual(0, sot.disk) self.assertEqual(True, sot.is_public) self.assertEqual(0, sot.ram) self.assertEqual(0, sot.vcpus) self.assertEqual(0, sot.swap) self.assertEqual(0, sot.ephemeral) self.assertEqual(IDENTIFIER, sot.id) def test_flavor_id(self): id = 'fake_id' sot = flavor.Flavor(id=id) self.assertEqual(sot.id, id) sot = flavor.Flavor(name=id) self.assertEqual(sot.id, id) self.assertEqual(sot.name, id) sot = flavor.Flavor(original_name=id) self.assertEqual(sot.id, id) self.assertEqual(sot.original_name, id) def test_add_tenant_access(self): sot = flavor.Flavor(**BASIC_EXAMPLE) resp = mock.Mock() resp.body = None resp.json = mock.Mock(return_value=resp.body) resp.status_code = 200 self.sess.post = mock.Mock(return_value=resp) sot.add_tenant_access(self.sess, 'fake_tenant') self.sess.post.assert_called_with( 'flavors/IDENTIFIER/action', json={'addTenantAccess': {'tenant': 'fake_tenant'}}, headers={'Accept': ''}, ) def test_remove_tenant_access(self): sot = flavor.Flavor(**BASIC_EXAMPLE) resp = mock.Mock() resp.body = None resp.json = mock.Mock(return_value=resp.body) resp.status_code = 200 self.sess.post = mock.Mock(return_value=resp) sot.remove_tenant_access(self.sess, 'fake_tenant') self.sess.post.assert_called_with( 'flavors/IDENTIFIER/action', json={'removeTenantAccess': {'tenant': 'fake_tenant'}}, headers={'Accept': ''}, ) def test_get_flavor_access(self): sot = flavor.Flavor(**BASIC_EXAMPLE) resp = mock.Mock() resp.body = { 'flavor_access': [ {'flavor_id': 'fake_flavor', 'tenant_id': 'fake_tenant'} ] } resp.json = mock.Mock(return_value=resp.body) resp.status_code = 200 self.sess.get = mock.Mock(return_value=resp) rsp = sot.get_access(self.sess) self.sess.get.assert_called_with( 'flavors/IDENTIFIER/os-flavor-access', ) self.assertEqual(resp.body['flavor_access'], rsp) def test_fetch_extra_specs(self): sot = flavor.Flavor(**BASIC_EXAMPLE) resp = mock.Mock() resp.body = {'extra_specs': {'a': 'b', 'c': 'd'}} resp.json = mock.Mock(return_value=resp.body) resp.status_code = 200 self.sess.get = mock.Mock(return_value=resp) rsp = sot.fetch_extra_specs(self.sess) self.sess.get.assert_called_with( 'flavors/IDENTIFIER/os-extra_specs', microversion=self.sess.default_microversion, ) self.assertEqual(resp.body['extra_specs'], rsp.extra_specs) self.assertIsInstance(rsp, flavor.Flavor) def test_create_extra_specs(self): sot = flavor.Flavor(**BASIC_EXAMPLE) specs = {'a': 'b', 'c': 'd'} resp = mock.Mock() resp.body = {'extra_specs': specs} resp.json = mock.Mock(return_value=resp.body) resp.status_code = 200 self.sess.post = mock.Mock(return_value=resp) rsp = sot.create_extra_specs(self.sess, specs) self.sess.post.assert_called_with( 'flavors/IDENTIFIER/os-extra_specs', json={'extra_specs': specs}, microversion=self.sess.default_microversion, ) self.assertEqual(resp.body['extra_specs'], rsp.extra_specs) self.assertIsInstance(rsp, flavor.Flavor) def test_get_extra_specs_property(self): sot = flavor.Flavor(**BASIC_EXAMPLE) resp = mock.Mock() resp.body = {'a': 'b'} resp.json = mock.Mock(return_value=resp.body) resp.status_code = 200 self.sess.get = mock.Mock(return_value=resp) rsp = sot.get_extra_specs_property(self.sess, 'a') self.sess.get.assert_called_with( 'flavors/IDENTIFIER/os-extra_specs/a', microversion=self.sess.default_microversion, ) self.assertEqual('b', rsp) def test_update_extra_specs_property(self): sot = flavor.Flavor(**BASIC_EXAMPLE) resp = mock.Mock() resp.body = {'a': 'b'} resp.json = mock.Mock(return_value=resp.body) resp.status_code = 200 self.sess.put = mock.Mock(return_value=resp) rsp = sot.update_extra_specs_property(self.sess, 'a', 'b') self.sess.put.assert_called_with( 'flavors/IDENTIFIER/os-extra_specs/a', json={'a': 'b'}, microversion=self.sess.default_microversion, ) self.assertEqual('b', rsp) def test_delete_extra_specs_property(self): sot = flavor.Flavor(**BASIC_EXAMPLE) resp = mock.Mock() resp.body = None resp.json = mock.Mock(return_value=resp.body) resp.status_code = 200 self.sess.delete = mock.Mock(return_value=resp) rsp = sot.delete_extra_specs_property(self.sess, 'a') self.sess.delete.assert_called_with( 'flavors/IDENTIFIER/os-extra_specs/a', microversion=self.sess.default_microversion, ) self.assertIsNone(rsp) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_hypervisor.py0000664000175000017500000001347400000000000026176 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from keystoneauth1 import adapter from openstack.compute.v2 import hypervisor from openstack import exceptions from openstack.tests.unit import base EXAMPLE = { "cpu_info": { "arch": "x86_64", "model": "Nehalem", "vendor": "Intel", "features": ["pge", "clflush"], "topology": {"cores": 1, "threads": 1, "sockets": 4}, }, "state": "up", "status": "enabled", "servers": [ { "name": "test_server1", "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", }, { "name": "test_server2", "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb", }, ], "host_ip": "1.1.1.1", "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": "b1e43b5f-eec1-44e0-9f10-7b4945c0226d", "uptime": ( " 08:32:11 up 93 days, 18:25, 12 users, " "load average: 0.20, 0.12, 0.14" ), "service": { "host": "043b3cacf6f34c90a7245151fc8ebcda", "id": "5d343e1d-938e-4284-b98b-6a2b5406ba76", "disabled_reason": None, }, # deprecated attributes "vcpus_used": 0, "local_gb_used": 0, "vcpus": 8, "memory_mb_used": 512, "memory_mb": 7980, "current_workload": 0, "running_vms": 0, "free_disk_gb": 157, "disk_available_least": 140, "local_gb": 157, "free_ram_mb": 7468, } class TestHypervisor(base.TestCase): def setUp(self): super().setUp() self.sess = mock.Mock(spec=adapter.Adapter) self.sess.default_microversion = 1 self.sess._get_connection = mock.Mock(return_value=self.cloud) def test_basic(self): sot = hypervisor.Hypervisor() self.assertEqual('hypervisor', sot.resource_key) self.assertEqual('hypervisors', sot.resources_key) self.assertEqual('/os-hypervisors', sot.base_path) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_list) self.assertDictEqual( { 'hypervisor_hostname_pattern': 'hypervisor_hostname_pattern', 'limit': 'limit', 'marker': 'marker', 'with_servers': 'with_servers', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = hypervisor.Hypervisor(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['cpu_info'], sot.cpu_info) self.assertEqual(EXAMPLE['host_ip'], sot.host_ip) self.assertEqual(EXAMPLE['hypervisor_type'], sot.hypervisor_type) self.assertEqual(EXAMPLE['hypervisor_version'], sot.hypervisor_version) self.assertEqual(EXAMPLE['hypervisor_hostname'], sot.name) self.assertEqual(EXAMPLE['service'], sot.service_details) self.assertEqual(EXAMPLE['servers'], sot.servers) self.assertEqual(EXAMPLE['state'], sot.state) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['uptime'], sot.uptime) # Verify deprecated attributes self.assertEqual(EXAMPLE['vcpus_used'], sot.vcpus_used) self.assertEqual(EXAMPLE['hypervisor_type'], sot.hypervisor_type) self.assertEqual(EXAMPLE['local_gb_used'], sot.local_disk_used) self.assertEqual(EXAMPLE['vcpus'], sot.vcpus) self.assertEqual(EXAMPLE['vcpus_used'], sot.vcpus_used) self.assertEqual(EXAMPLE['memory_mb_used'], sot.memory_used) self.assertEqual(EXAMPLE['memory_mb'], sot.memory_size) self.assertEqual(EXAMPLE['current_workload'], sot.current_workload) self.assertEqual(EXAMPLE['running_vms'], sot.running_vms) self.assertEqual(EXAMPLE['free_disk_gb'], sot.local_disk_free) self.assertEqual(EXAMPLE['disk_available_least'], sot.disk_available) self.assertEqual(EXAMPLE['local_gb'], sot.local_disk_size) self.assertEqual(EXAMPLE['free_ram_mb'], sot.memory_free) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=False, ) def test_get_uptime(self, mv_mock): sot = hypervisor.Hypervisor(**copy.deepcopy(EXAMPLE)) rsp = { "hypervisor": { "hypervisor_hostname": "fake-mini", "id": sot.id, "state": "up", "status": "enabled", "uptime": "08:32:11 up 93 days, 18:25, 12 users", } } resp = mock.Mock() resp.body = copy.deepcopy(rsp) resp.json = mock.Mock(return_value=resp.body) resp.headers = {} resp.status_code = 200 self.sess.get = mock.Mock(return_value=resp) hyp = sot.get_uptime(self.sess) self.sess.get.assert_called_with( f'os-hypervisors/{sot.id}/uptime', microversion=self.sess.default_microversion, ) self.assertEqual(rsp['hypervisor']['uptime'], hyp.uptime) self.assertEqual(rsp['hypervisor']['status'], sot.status) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=True, ) def test_get_uptime_after_2_88(self, mv_mock): sot = hypervisor.Hypervisor(**copy.deepcopy(EXAMPLE)) self.assertRaises(exceptions.SDKException, sot.get_uptime, self.sess) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_image.py0000664000175000017500000000521700000000000025042 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.compute.v2 import image from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'links': '2', 'name': '3', 'created': '2015-03-09T12:14:57.233772', 'metadata': {'key': '2'}, 'minDisk': 3, 'minRam': 4, 'progress': 5, 'status': '6', 'updated': '2015-03-09T12:15:57.233772', 'OS-EXT-IMG-SIZE:size': 8, } class TestImage(base.TestCase): def test_basic(self): sot = image.Image() self.assertEqual('image', sot.resource_key) self.assertEqual('images', sot.resources_key) self.assertEqual('/images', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { "server": "server", "name": "name", "status": "status", "type": "type", "min_disk": "minDisk", "min_ram": "minRam", "changes_since": "changes-since", "limit": "limit", "marker": "marker", }, sot._query_mapping._mapping, ) def test_make_basic(self): sot = image.Image(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['created'], sot.created_at) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['metadata'], sot.metadata) self.assertEqual(EXAMPLE['minDisk'], sot.min_disk) self.assertEqual(EXAMPLE['minRam'], sot.min_ram) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['progress'], sot.progress) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['updated'], sot.updated_at) self.assertEqual(EXAMPLE['OS-EXT-IMG-SIZE:size'], sot.size) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_keypair.py0000664000175000017500000000424600000000000025425 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.compute.v2 import keypair from openstack.tests.unit import base EXAMPLE = { 'created_at': 'some_time', 'deleted': False, 'fingerprint': '1', 'name': '2', 'public_key': '3', 'private_key': '4', 'type': 'ssh', 'user_id': '5', } class TestKeypair(base.TestCase): def test_basic(self): sot = keypair.Keypair() self.assertEqual('keypair', sot.resource_key) self.assertEqual('keypairs', sot.resources_key) self.assertEqual('/os-keypairs', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( {'limit': 'limit', 'marker': 'marker', 'user_id': 'user_id'}, sot._query_mapping._mapping, ) def test_make_it(self): sot = keypair.Keypair(**EXAMPLE) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['deleted'], sot.is_deleted) self.assertEqual(EXAMPLE['fingerprint'], sot.fingerprint) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['public_key'], sot.public_key) self.assertEqual(EXAMPLE['private_key'], sot.private_key) self.assertEqual(EXAMPLE['type'], sot.type) self.assertEqual(EXAMPLE['user_id'], sot.user_id) def test_make_it_defaults(self): EXAMPLE_DEFAULT = EXAMPLE.copy() EXAMPLE_DEFAULT.pop('type') sot = keypair.Keypair(**EXAMPLE_DEFAULT) self.assertEqual(EXAMPLE['type'], sot.type) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_limits.py0000664000175000017500000001750100000000000025260 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from keystoneauth1 import adapter from openstack.compute.v2 import limits from openstack.tests.unit import base ABSOLUTE_LIMITS = { "maxImageMeta": 128, "maxSecurityGroupRules": 20, "maxSecurityGroups": 10, "maxServerMeta": 128, "maxTotalCores": 20, "maxTotalFloatingIps": 10, "maxTotalInstances": 10, "maxTotalKeypairs": 100, "maxTotalRAMSize": 51200, "maxServerGroups": 10, "maxServerGroupMembers": 10, "totalFloatingIpsUsed": 1, "totalSecurityGroupsUsed": 2, "totalRAMUsed": 4, "totalInstancesUsed": 5, "totalServerGroupsUsed": 6, "totalCoresUsed": 7, } RATE_LIMIT = { "limit": [ { "next-available": "2012-11-27T17:22:18Z", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "POST", }, ], "regex": ".*", "uri": "*", } LIMITS_BODY = {"limits": {"absolute": ABSOLUTE_LIMITS, "rate": [RATE_LIMIT]}} class TestAbsoluteLimits(base.TestCase): def test_basic(self): sot = limits.AbsoluteLimits() self.assertIsNone(sot.resource_key) self.assertIsNone(sot.resources_key) self.assertEqual("", sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) def test_make_it(self): sot = limits.AbsoluteLimits(**ABSOLUTE_LIMITS) self.assertEqual(ABSOLUTE_LIMITS["maxImageMeta"], sot.image_meta) self.assertEqual( ABSOLUTE_LIMITS["maxSecurityGroupRules"], sot.security_group_rules ) self.assertEqual( ABSOLUTE_LIMITS["maxSecurityGroups"], sot.security_groups ) self.assertEqual(ABSOLUTE_LIMITS["maxServerMeta"], sot.server_meta) self.assertEqual(ABSOLUTE_LIMITS["maxTotalCores"], sot.total_cores) self.assertEqual( ABSOLUTE_LIMITS["maxTotalFloatingIps"], sot.floating_ips ) self.assertEqual(ABSOLUTE_LIMITS["maxTotalInstances"], sot.instances) self.assertEqual(ABSOLUTE_LIMITS["maxTotalKeypairs"], sot.keypairs) self.assertEqual(ABSOLUTE_LIMITS["maxTotalRAMSize"], sot.total_ram) self.assertEqual(ABSOLUTE_LIMITS["maxServerGroups"], sot.server_groups) self.assertEqual( ABSOLUTE_LIMITS["maxServerGroupMembers"], sot.server_group_members ) self.assertEqual( ABSOLUTE_LIMITS["totalFloatingIpsUsed"], sot.floating_ips_used ) self.assertEqual( ABSOLUTE_LIMITS["totalSecurityGroupsUsed"], sot.security_groups_used, ) self.assertEqual(ABSOLUTE_LIMITS["totalRAMUsed"], sot.total_ram_used) self.assertEqual( ABSOLUTE_LIMITS["totalInstancesUsed"], sot.instances_used ) self.assertEqual( ABSOLUTE_LIMITS["totalServerGroupsUsed"], sot.server_groups_used ) self.assertEqual( ABSOLUTE_LIMITS["totalCoresUsed"], sot.total_cores_used ) class TestRateLimits(base.TestCase): def test_basic(self): sot = limits.RateLimits() self.assertIsNone(sot.resource_key) self.assertIsNone(sot.resources_key) self.assertEqual("", sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) def test_make_it(self): sot = limits.RateLimits(**RATE_LIMIT) self.assertEqual(RATE_LIMIT["regex"], sot.regex) self.assertEqual(RATE_LIMIT["uri"], sot.uri) self.assertIsInstance(sot.limits[0], limits.RateLimit) class TestLimits(base.TestCase): def test_basic(self): sot = limits.Limits() self.assertEqual("limits", sot.resource_key) self.assertEqual("/limits", sot.base_path) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'tenant_id': 'tenant_id', 'project_id': 'tenant_id', 'reserved': 'reserved', }, sot._query_mapping._mapping, ) def test_get(self): sess = mock.Mock(spec=adapter.Adapter) sess.default_microversion = None resp = mock.Mock() sess.get.return_value = resp resp.json.return_value = copy.deepcopy(LIMITS_BODY) resp.headers = {} resp.status_code = 200 sot = limits.Limits().fetch(sess) self.assertEqual( ABSOLUTE_LIMITS["maxImageMeta"], sot.absolute.image_meta ) self.assertEqual( ABSOLUTE_LIMITS["maxSecurityGroupRules"], sot.absolute.security_group_rules, ) self.assertEqual( ABSOLUTE_LIMITS["maxSecurityGroups"], sot.absolute.security_groups ) self.assertEqual( ABSOLUTE_LIMITS["maxServerMeta"], sot.absolute.server_meta ) self.assertEqual( ABSOLUTE_LIMITS["maxTotalCores"], sot.absolute.total_cores ) self.assertEqual( ABSOLUTE_LIMITS["maxTotalFloatingIps"], sot.absolute.floating_ips ) self.assertEqual( ABSOLUTE_LIMITS["maxTotalInstances"], sot.absolute.instances ) self.assertEqual( ABSOLUTE_LIMITS["maxTotalKeypairs"], sot.absolute.keypairs ) self.assertEqual( ABSOLUTE_LIMITS["maxTotalRAMSize"], sot.absolute.total_ram ) self.assertEqual( ABSOLUTE_LIMITS["maxServerGroups"], sot.absolute.server_groups ) self.assertEqual( ABSOLUTE_LIMITS["maxServerGroupMembers"], sot.absolute.server_group_members, ) self.assertEqual( ABSOLUTE_LIMITS["totalFloatingIpsUsed"], sot.absolute.floating_ips_used, ) self.assertEqual( ABSOLUTE_LIMITS["totalSecurityGroupsUsed"], sot.absolute.security_groups_used, ) self.assertEqual( ABSOLUTE_LIMITS["totalRAMUsed"], sot.absolute.total_ram_used ) self.assertEqual( ABSOLUTE_LIMITS["totalInstancesUsed"], sot.absolute.instances_used ) self.assertEqual( ABSOLUTE_LIMITS["totalServerGroupsUsed"], sot.absolute.server_groups_used, ) self.assertEqual( ABSOLUTE_LIMITS["totalCoresUsed"], sot.absolute.total_cores_used ) self.assertEqual(RATE_LIMIT["uri"], sot.rate[0].uri) self.assertEqual(RATE_LIMIT["regex"], sot.rate[0].regex) self.assertIsInstance(sot.rate[0].limits[0], limits.RateLimit) dsot = sot.to_dict() self.assertIsInstance(dsot['rate'][0], dict) self.assertIsInstance(dsot['absolute'], dict) self.assertEqual(RATE_LIMIT["uri"], dsot['rate'][0]['uri']) self.assertEqual( ABSOLUTE_LIMITS["totalSecurityGroupsUsed"], dsot['absolute']['security_groups_used'], ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_migration.py0000664000175000017500000000645100000000000025752 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.compute.v2 import migration from openstack.tests.unit import base EXAMPLE = { 'uuid': '42341d4b-346a-40d0-83c6-5f4f6892b650', 'instance_uuid': '9128d044-7b61-403e-b766-7547076ff6c1', 'user_id': '78348f0e-97ee-4d70-ad34-189692673ea2', 'project_id': '9842f0f7-1229-4355-afe7-15ebdbb8c3d8', 'created_at': '2016-06-23T14:42:02.000000', 'updated_at': '2016-06-23T14:42:02.000000', 'status': 'migrating', 'source_compute': 'compute10', 'source_node': 'node10', 'dest_host': '5.6.7.8', 'dest_compute': 'compute20', 'dest_node': 'node20', 'migration_type': 'resize', 'old_instance_type_id': 5, 'new_instance_type_id': 6, } class TestMigration(base.TestCase): def test_basic(self): sot = migration.Migration() self.assertIsNone(sot.resource_key) # we don't support fetch self.assertEqual('migrations', sot.resources_key) self.assertEqual('/os-migrations', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'host': 'host', 'status': 'status', 'migration_type': 'migration_type', 'source_compute': 'source_compute', 'user_id': 'user_id', 'project_id': 'project_id', 'changes_since': 'changes-since', 'changes_before': 'changes-before', 'server_id': 'instance_uuid', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = migration.Migration(**EXAMPLE) self.assertEqual(EXAMPLE['uuid'], sot.id) self.assertEqual(EXAMPLE['instance_uuid'], sot.server_id) self.assertEqual(EXAMPLE['user_id'], sot.user_id) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['source_compute'], sot.source_compute) self.assertEqual(EXAMPLE['source_node'], sot.source_node) self.assertEqual(EXAMPLE['dest_host'], sot.dest_host) self.assertEqual(EXAMPLE['dest_compute'], sot.dest_compute) self.assertEqual(EXAMPLE['dest_node'], sot.dest_node) self.assertEqual(EXAMPLE['migration_type'], sot.migration_type) self.assertEqual(EXAMPLE['old_instance_type_id'], sot.old_flavor_id) self.assertEqual(EXAMPLE['new_instance_type_id'], sot.new_flavor_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_proxy.py0000664000175000017500000016645000000000000025150 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import datetime from unittest import mock import uuid import warnings from openstack.block_storage.v3 import volume from openstack.compute.v2 import _proxy from openstack.compute.v2 import aggregate from openstack.compute.v2 import availability_zone as az from openstack.compute.v2 import extension from openstack.compute.v2 import flavor from openstack.compute.v2 import hypervisor from openstack.compute.v2 import image from openstack.compute.v2 import keypair from openstack.compute.v2 import migration from openstack.compute.v2 import quota_class_set from openstack.compute.v2 import quota_set from openstack.compute.v2 import server from openstack.compute.v2 import server_action from openstack.compute.v2 import server_group from openstack.compute.v2 import server_interface from openstack.compute.v2 import server_ip from openstack.compute.v2 import server_migration from openstack.compute.v2 import server_remote_console from openstack.compute.v2 import service from openstack.compute.v2 import usage from openstack.compute.v2 import volume_attachment from openstack.identity.v3 import project from openstack import proxy as proxy_base from openstack.tests.unit import test_proxy_base from openstack import warnings as os_warnings class TestComputeProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) class TestFlavor(TestComputeProxy): def test_flavor_create(self): self.verify_create(self.proxy.create_flavor, flavor.Flavor) def test_flavor_delete(self): self.verify_delete(self.proxy.delete_flavor, flavor.Flavor, False) def test_flavor_update(self): self.verify_update(self.proxy.update_flavor, flavor.Flavor, False) def test_flavor_delete_ignore(self): self.verify_delete(self.proxy.delete_flavor, flavor.Flavor, True) def test_flavor_find(self): self.verify_find(self.proxy.find_flavor, flavor.Flavor) def test_flavor_find_query(self): self.verify_find( self.proxy.find_flavor, flavor.Flavor, method_kwargs={"a": "b"}, expected_kwargs={"a": "b", "ignore_missing": True}, ) def test_flavor_find_fetch_extra(self): """fetch extra_specs is triggered""" with mock.patch( 'openstack.compute.v2.flavor.Flavor.fetch_extra_specs' ) as mocked: res = flavor.Flavor() mocked.return_value = res self._verify( 'openstack.proxy.Proxy._find', self.proxy.find_flavor, method_args=['res', True], method_kwargs={'get_extra_specs': True}, expected_result=res, expected_args=[flavor.Flavor, 'res'], expected_kwargs={'ignore_missing': True}, ) mocked.assert_called_once() def test_flavor_find_skip_fetch_extra(self): """fetch extra_specs not triggered""" with mock.patch( 'openstack.compute.v2.flavor.Flavor.fetch_extra_specs' ) as mocked: res = flavor.Flavor(extra_specs={'a': 'b'}) mocked.return_value = res self._verify( 'openstack.proxy.Proxy._find', self.proxy.find_flavor, method_args=['res', True], expected_result=res, expected_args=[flavor.Flavor, 'res'], expected_kwargs={'ignore_missing': True}, ) mocked.assert_not_called() def test_flavor_get_no_extra(self): """fetch extra_specs not triggered""" with mock.patch( 'openstack.compute.v2.flavor.Flavor.fetch_extra_specs' ) as mocked: res = flavor.Flavor() mocked.return_value = res self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_flavor, method_args=['res'], expected_result=res, expected_args=[flavor.Flavor, 'res'], ) mocked.assert_not_called() def test_flavor_get_fetch_extra(self): """fetch extra_specs is triggered""" with mock.patch( 'openstack.compute.v2.flavor.Flavor.fetch_extra_specs' ) as mocked: res = flavor.Flavor() mocked.return_value = res self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_flavor, method_args=['res', True], expected_result=res, expected_args=[flavor.Flavor, 'res'], ) mocked.assert_called_once() def test_flavor_get_skip_fetch_extra(self): """fetch extra_specs not triggered""" with mock.patch( 'openstack.compute.v2.flavor.Flavor.fetch_extra_specs' ) as mocked: res = flavor.Flavor(extra_specs={'a': 'b'}) mocked.return_value = res self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_flavor, method_args=['res', True], expected_result=res, expected_args=[flavor.Flavor, 'res'], ) mocked.assert_not_called() @mock.patch("openstack.proxy.Proxy._list") @mock.patch("openstack.compute.v2.flavor.Flavor.fetch_extra_specs") def test_flavors_detailed(self, fetch_mock, list_mock): res = self.proxy.flavors(details=True) for r in res: self.assertIsNotNone(r) fetch_mock.assert_not_called() list_mock.assert_called_with( flavor.Flavor, base_path="/flavors/detail" ) @mock.patch("openstack.proxy.Proxy._list") @mock.patch("openstack.compute.v2.flavor.Flavor.fetch_extra_specs") def test_flavors_not_detailed(self, fetch_mock, list_mock): res = self.proxy.flavors(details=False) for r in res: self.assertIsNotNone(r) fetch_mock.assert_not_called() list_mock.assert_called_with(flavor.Flavor, base_path="/flavors") @mock.patch("openstack.proxy.Proxy._list") @mock.patch("openstack.compute.v2.flavor.Flavor.fetch_extra_specs") def test_flavors_query(self, fetch_mock, list_mock): res = self.proxy.flavors(details=False, get_extra_specs=True, a="b") for r in res: fetch_mock.assert_called_with(self.proxy) list_mock.assert_called_with( flavor.Flavor, base_path="/flavors", a="b" ) @mock.patch("openstack.proxy.Proxy._list") @mock.patch("openstack.compute.v2.flavor.Flavor.fetch_extra_specs") def test_flavors_get_extra(self, fetch_mock, list_mock): res = self.proxy.flavors(details=False, get_extra_specs=True) for r in res: fetch_mock.assert_called_with(self.proxy) list_mock.assert_called_with(flavor.Flavor, base_path="/flavors") def test_flavor_get_access(self): self._verify( "openstack.compute.v2.flavor.Flavor.get_access", self.proxy.get_flavor_access, method_args=["value"], expected_args=[self.proxy], ) def test_flavor_add_tenant_access(self): self._verify( "openstack.compute.v2.flavor.Flavor.add_tenant_access", self.proxy.flavor_add_tenant_access, method_args=["value", "fake-tenant"], expected_args=[self.proxy, "fake-tenant"], ) def test_flavor_remove_tenant_access(self): self._verify( "openstack.compute.v2.flavor.Flavor.remove_tenant_access", self.proxy.flavor_remove_tenant_access, method_args=["value", "fake-tenant"], expected_args=[self.proxy, "fake-tenant"], ) def test_flavor_fetch_extra_specs(self): self._verify( "openstack.compute.v2.flavor.Flavor.fetch_extra_specs", self.proxy.fetch_flavor_extra_specs, method_args=["value"], expected_args=[self.proxy], ) def test_create_flavor_extra_specs(self): self._verify( "openstack.compute.v2.flavor.Flavor.create_extra_specs", self.proxy.create_flavor_extra_specs, method_args=["value", {'a': 'b'}], expected_args=[self.proxy], expected_kwargs={"specs": {'a': 'b'}}, ) def test_get_flavor_extra_specs_prop(self): self._verify( "openstack.compute.v2.flavor.Flavor.get_extra_specs_property", self.proxy.get_flavor_extra_specs_property, method_args=["value", "prop"], expected_args=[self.proxy, "prop"], ) def test_update_flavor_extra_specs_prop(self): self._verify( "openstack.compute.v2.flavor.Flavor.update_extra_specs_property", self.proxy.update_flavor_extra_specs_property, method_args=["value", "prop", "val"], expected_args=[self.proxy, "prop", "val"], ) def test_delete_flavor_extra_specs_prop(self): self._verify( "openstack.compute.v2.flavor.Flavor.delete_extra_specs_property", self.proxy.delete_flavor_extra_specs_property, method_args=["value", "prop"], expected_args=[self.proxy, "prop"], ) class TestKeyPair(TestComputeProxy): def test_keypair_create(self): self.verify_create(self.proxy.create_keypair, keypair.Keypair) def test_keypair_delete(self): self.verify_delete(self.proxy.delete_keypair, keypair.Keypair, False) def test_keypair_delete_ignore(self): self.verify_delete(self.proxy.delete_keypair, keypair.Keypair, True) def test_keypair_delete_user_id(self): self.verify_delete( self.proxy.delete_keypair, keypair.Keypair, True, method_kwargs={'user_id': 'fake_user'}, expected_kwargs={'user_id': 'fake_user'}, ) def test_keypair_find(self): self.verify_find(self.proxy.find_keypair, keypair.Keypair) def test_keypair_find_user_id(self): self.verify_find( self.proxy.find_keypair, keypair.Keypair, method_kwargs={'user_id': 'fake_user'}, expected_kwargs={'user_id': 'fake_user'}, ) def test_keypair_get(self): self.verify_get(self.proxy.get_keypair, keypair.Keypair) def test_keypair_get_user_id(self): self.verify_get( self.proxy.get_keypair, keypair.Keypair, method_kwargs={'user_id': 'fake_user'}, expected_kwargs={'user_id': 'fake_user'}, ) def test_keypairs(self): self.verify_list(self.proxy.keypairs, keypair.Keypair) def test_keypairs_user_id(self): self.verify_list( self.proxy.keypairs, keypair.Keypair, method_kwargs={'user_id': 'fake_user'}, expected_kwargs={'user_id': 'fake_user'}, ) class TestAggregate(TestComputeProxy): def test_aggregate_create(self): self.verify_create(self.proxy.create_aggregate, aggregate.Aggregate) def test_aggregate_delete(self): self.verify_delete( self.proxy.delete_aggregate, aggregate.Aggregate, False ) def test_aggregate_delete_ignore(self): self.verify_delete( self.proxy.delete_aggregate, aggregate.Aggregate, True ) def test_aggregate_find(self): self.verify_find(self.proxy.find_aggregate, aggregate.Aggregate) def test_aggregates(self): self.verify_list(self.proxy.aggregates, aggregate.Aggregate) def test_aggregate_get(self): self.verify_get(self.proxy.get_aggregate, aggregate.Aggregate) def test_aggregate_update(self): self.verify_update(self.proxy.update_aggregate, aggregate.Aggregate) def test_aggregate_add_host(self): self._verify( "openstack.compute.v2.aggregate.Aggregate.add_host", self.proxy.add_host_to_aggregate, method_args=["value", "host"], expected_args=[self.proxy, "host"], ) def test_aggregate_remove_host(self): self._verify( "openstack.compute.v2.aggregate.Aggregate.remove_host", self.proxy.remove_host_from_aggregate, method_args=["value", "host"], expected_args=[self.proxy, "host"], ) def test_aggregate_set_metadata(self): self._verify( "openstack.compute.v2.aggregate.Aggregate.set_metadata", self.proxy.set_aggregate_metadata, method_args=["value", {'a': 'b'}], expected_args=[self.proxy, {'a': 'b'}], ) def test_aggregate_precache_image(self): self._verify( "openstack.compute.v2.aggregate.Aggregate.precache_images", self.proxy.aggregate_precache_images, method_args=["value", '1'], expected_args=[self.proxy, [{'id': '1'}]], ) def test_aggregate_precache_images(self): self._verify( "openstack.compute.v2.aggregate.Aggregate.precache_images", self.proxy.aggregate_precache_images, method_args=["value", ['1', '2']], expected_args=[self.proxy, [{'id': '1'}, {'id': '2'}]], ) class TestService(TestComputeProxy): def test_services(self): self.verify_list(self.proxy.services, service.Service) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=False, ) def test_enable_service_252(self, mv_mock): self._verify( 'openstack.compute.v2.service.Service.enable', self.proxy.enable_service, method_args=["value", "host1", "nova-compute"], expected_args=[self.proxy, "host1", "nova-compute"], ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=True, ) def test_enable_service_253(self, mv_mock): self._verify( 'openstack.proxy.Proxy._update', self.proxy.enable_service, method_args=["value"], method_kwargs={}, expected_args=[service.Service, "value"], expected_kwargs={'status': 'enabled'}, ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=False, ) def test_disable_service_252(self, mv_mock): self._verify( 'openstack.compute.v2.service.Service.disable', self.proxy.disable_service, method_args=["value", "host1", "nova-compute"], expected_args=[self.proxy, "host1", "nova-compute", None], ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=True, ) def test_disable_service_253(self, mv_mock): self._verify( 'openstack.proxy.Proxy._update', self.proxy.disable_service, method_args=["value"], method_kwargs={'disabled_reason': 'some_reason'}, expected_args=[service.Service, "value"], expected_kwargs={ 'status': 'disabled', 'disabled_reason': 'some_reason', }, ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=False, ) def test_force_service_down_252(self, mv_mock): self._verify( 'openstack.compute.v2.service.Service.set_forced_down', self.proxy.update_service_forced_down, method_args=["value", "host1", "nova-compute"], expected_args=[self.proxy, "host1", "nova-compute", True], ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=False, ) def test_force_service_down_252_empty_vals(self, mv_mock): self.assertRaises( ValueError, self.proxy.update_service_forced_down, "value", None, None, ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=False, ) def test_force_service_down_252_empty_vals_svc(self, mv_mock): self._verify( 'openstack.compute.v2.service.Service.set_forced_down', self.proxy.update_service_forced_down, method_args=[{'host': 'a', 'binary': 'b'}, None, None], expected_args=[self.proxy, None, None, True], ) def test_find_service(self): self.verify_find( self.proxy.find_service, service.Service, ) def test_find_service_args(self): self.verify_find( self.proxy.find_service, service.Service, method_kwargs={'host': 'h1'}, expected_kwargs={'host': 'h1'}, ) class TestVolumeAttachment(TestComputeProxy): def test_volume_attachment_create(self): self.verify_create( self.proxy.create_volume_attachment, volume_attachment.VolumeAttachment, method_kwargs={'server': 'server_id', 'volume': 'volume_id'}, expected_kwargs={ 'server_id': 'server_id', 'volume_id': 'volume_id', }, ) def test_volume_attachment_create__legacy_parameters(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') self.verify_create( self.proxy.create_volume_attachment, volume_attachment.VolumeAttachment, method_kwargs={'server': 'server_id', 'volumeId': 'volume_id'}, expected_kwargs={ 'server_id': 'server_id', 'volume_id': 'volume_id', }, ) self.assertEqual(1, len(w)) self.assertEqual( os_warnings.OpenStackDeprecationWarning, w[-1].category, ) self.assertIn( 'This method was called with a volume_id or volumeId argument', str(w[-1]), ) def test_volume_attachment_create__missing_parameters(self): exc = self.assertRaises( TypeError, self.proxy.create_volume_attachment, 'server_id', ) self.assertIn( 'create_volume_attachment() missing 1 required positional argument: volume', # noqa: E501 str(exc), ) def test_volume_attachment_update(self): self.verify_update( self.proxy.update_volume_attachment, volume_attachment.VolumeAttachment, method_args=[], method_kwargs={'server': 'server_id', 'volume': 'volume_id'}, expected_kwargs={ 'id': 'volume_id', 'server_id': 'server_id', 'volume_id': 'volume_id', }, ) def test_volume_attachment_delete(self): # We pass objects to avoid the lookup that's done as part of the # handling of legacy option order. We test that legacy path separately. fake_server = server.Server(id=str(uuid.uuid4())) fake_volume = volume.Volume(id=str(uuid.uuid4())) self.verify_delete( self.proxy.delete_volume_attachment, volume_attachment.VolumeAttachment, ignore_missing=False, method_args=[fake_server, fake_volume], method_kwargs={}, expected_args=[], expected_kwargs={ 'id': fake_volume.id, 'server_id': fake_server.id, }, ) def test_volume_attachment_delete__ignore(self): # We pass objects to avoid the lookup that's done as part of the # handling of legacy option order. We test that legacy path separately. fake_server = server.Server(id=str(uuid.uuid4())) fake_volume = volume.Volume(id=str(uuid.uuid4())) self.verify_delete( self.proxy.delete_volume_attachment, volume_attachment.VolumeAttachment, ignore_missing=True, method_args=[fake_server, fake_volume], method_kwargs={}, expected_args=[], expected_kwargs={ 'id': fake_volume.id, 'server_id': fake_server.id, }, ) def test_volume_attachment_delete__legacy_parameters(self): fake_server = server.Server(id=str(uuid.uuid4())) fake_volume = volume.Volume(id=str(uuid.uuid4())) with mock.patch.object( self.proxy, 'find_server', return_value=None, ) as mock_find_server: # we are calling the method with volume and server ID arguments as # strings and in the wrong order, which results in a query as we # attempt to match the server ID to an actual server before we # switch the argument order once we realize we can't do this self.verify_delete( self.proxy.delete_volume_attachment, volume_attachment.VolumeAttachment, ignore_missing=False, method_args=[fake_volume.id, fake_server.id], method_kwargs={}, expected_args=[], expected_kwargs={ 'id': fake_volume.id, 'server_id': fake_server.id, }, ) # note that we attempted to call the server with the volume ID but # this was mocked to return None (as would happen in the real # world) mock_find_server.assert_called_once_with( fake_volume.id, ignore_missing=True, ) def test_volume_attachment_get(self): self.verify_get( self.proxy.get_volume_attachment, volume_attachment.VolumeAttachment, method_args=[], method_kwargs={'server': 'server_id', 'volume': 'volume_id'}, expected_kwargs={ 'id': 'volume_id', 'server_id': 'server_id', }, ) def test_volume_attachments(self): self.verify_list( self.proxy.volume_attachments, volume_attachment.VolumeAttachment, method_kwargs={'server': 'server_id'}, expected_kwargs={'server_id': 'server_id'}, ) class TestHypervisor(TestComputeProxy): def test_hypervisors_not_detailed(self): self.verify_list( self.proxy.hypervisors, hypervisor.Hypervisor, method_kwargs={"details": False}, expected_kwargs={}, ) def test_hypervisors_detailed(self): self.verify_list( self.proxy.hypervisors, hypervisor.HypervisorDetail, method_kwargs={"details": True}, expected_kwargs={}, ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=False, ) def test_hypervisors_search_before_253_no_qp(self, sm): self.verify_list( self.proxy.hypervisors, hypervisor.Hypervisor, base_path='/os-hypervisors/detail', method_kwargs={'details': True}, expected_kwargs={}, ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=False, ) def test_hypervisors_search_before_253(self, sm): self.verify_list( self.proxy.hypervisors, hypervisor.Hypervisor, base_path='/os-hypervisors/substring/search', method_kwargs={'hypervisor_hostname_pattern': 'substring'}, expected_kwargs={}, ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=True, ) def test_hypervisors_search_after_253(self, sm): self.verify_list( self.proxy.hypervisors, hypervisor.Hypervisor, method_kwargs={'hypervisor_hostname_pattern': 'substring'}, base_path=None, expected_kwargs={'hypervisor_hostname_pattern': 'substring'}, ) def test_find_hypervisor_detail(self): self.verify_find( self.proxy.find_hypervisor, hypervisor.Hypervisor, expected_kwargs={ 'list_base_path': '/os-hypervisors/detail', 'ignore_missing': True, }, ) def test_find_hypervisor_no_detail(self): self.verify_find( self.proxy.find_hypervisor, hypervisor.Hypervisor, method_kwargs={'details': False}, expected_kwargs={'list_base_path': None, 'ignore_missing': True}, ) def test_get_hypervisor(self): self.verify_get(self.proxy.get_hypervisor, hypervisor.Hypervisor) def test_get_hypervisor_uptime(self): self._verify( "openstack.compute.v2.hypervisor.Hypervisor.get_uptime", self.proxy.get_hypervisor_uptime, method_args=["value"], expected_args=[self.proxy], ) class TestCompute(TestComputeProxy): def test_extension_find(self): self.verify_find(self.proxy.find_extension, extension.Extension) def test_extensions(self): self.verify_list(self.proxy.extensions, extension.Extension) @contextlib.contextmanager def _check_image_proxy_deprecation_warning(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") yield self.assertEqual(1, len(w)) self.assertTrue(issubclass(w[-1].category, DeprecationWarning)) self.assertIn( "This API is a proxy to the image service ", str(w[-1].message), ) def test_image_delete(self): with self._check_image_proxy_deprecation_warning(): self.verify_delete(self.proxy.delete_image, image.Image, False) def test_image_delete_ignore(self): with self._check_image_proxy_deprecation_warning(): self.verify_delete(self.proxy.delete_image, image.Image, True) def test_image_find(self): with self._check_image_proxy_deprecation_warning(): self.verify_find(self.proxy.find_image, image.Image) def test_image_get(self): with self._check_image_proxy_deprecation_warning(): self.verify_get(self.proxy.get_image, image.Image) def test_images_detailed(self): with self._check_image_proxy_deprecation_warning(): self.verify_list( self.proxy.images, image.ImageDetail, method_kwargs={"details": True, "query": 1}, expected_kwargs={"query": 1}, ) def test_images_not_detailed(self): with self._check_image_proxy_deprecation_warning(): self.verify_list( self.proxy.images, image.Image, method_kwargs={"details": False, "query": 1}, expected_kwargs={"query": 1}, ) def test_limits_get(self): self._verify( "openstack.compute.v2.limits.Limits.fetch", self.proxy.get_limits, method_args=[], method_kwargs={"a": "b"}, expected_args=[self.proxy], expected_kwargs={"a": "b"}, ) def test_server_interface_create(self): self.verify_create( self.proxy.create_server_interface, server_interface.ServerInterface, method_kwargs={"server": "test_id"}, expected_kwargs={"server_id": "test_id"}, ) def test_server_interface_delete(self): self.proxy._get_uri_attribute = lambda *args: args[1] interface_id = "test_interface_id" server_id = "test_server_id" test_interface = server_interface.ServerInterface(id=interface_id) test_interface.server_id = server_id # Case1: ServerInterface instance is provided as value self._verify( "openstack.proxy.Proxy._delete", self.proxy.delete_server_interface, method_args=[test_interface], method_kwargs={"server": server_id}, expected_args=[server_interface.ServerInterface, interface_id], expected_kwargs={"server_id": server_id, "ignore_missing": True}, ) # Case2: ServerInterface ID is provided as value self._verify( "openstack.proxy.Proxy._delete", self.proxy.delete_server_interface, method_args=[interface_id], method_kwargs={"server": server_id}, expected_args=[server_interface.ServerInterface, interface_id], expected_kwargs={"server_id": server_id, "ignore_missing": True}, ) def test_server_interface_delete_ignore(self): self.proxy._get_uri_attribute = lambda *args: args[1] self.verify_delete( self.proxy.delete_server_interface, server_interface.ServerInterface, True, method_kwargs={"server": "test_id"}, expected_kwargs={"server_id": "test_id"}, ) def test_server_interface_get(self): self.proxy._get_uri_attribute = lambda *args: args[1] interface_id = "test_interface_id" server_id = "test_server_id" test_interface = server_interface.ServerInterface(id=interface_id) test_interface.server_id = server_id # Case1: ServerInterface instance is provided as value self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_server_interface, method_args=[test_interface], method_kwargs={"server": server_id}, expected_args=[server_interface.ServerInterface], expected_kwargs={"port_id": interface_id, "server_id": server_id}, ) # Case2: ServerInterface ID is provided as value self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_server_interface, method_args=[interface_id], method_kwargs={"server": server_id}, expected_args=[server_interface.ServerInterface], expected_kwargs={"port_id": interface_id, "server_id": server_id}, ) def test_server_interfaces(self): self.verify_list( self.proxy.server_interfaces, server_interface.ServerInterface, method_args=["test_id"], expected_args=[], expected_kwargs={"server_id": "test_id"}, ) def test_server_ips_with_network_label(self): self.verify_list( self.proxy.server_ips, server_ip.ServerIP, method_args=["test_id"], method_kwargs={"network_label": "test_label"}, expected_args=[], expected_kwargs={ "server_id": "test_id", "network_label": "test_label", }, ) def test_server_ips_without_network_label(self): self.verify_list( self.proxy.server_ips, server_ip.ServerIP, method_args=["test_id"], expected_args=[], expected_kwargs={"server_id": "test_id", "network_label": None}, ) def test_server_create_attrs(self): self.verify_create(self.proxy.create_server, server.Server) def test_server_delete(self): self.verify_delete(self.proxy.delete_server, server.Server, False) def test_server_delete_ignore(self): self.verify_delete(self.proxy.delete_server, server.Server, True) def test_server_force_delete(self): self._verify( "openstack.compute.v2.server.Server.force_delete", self.proxy.delete_server, method_args=["value", False, True], expected_args=[self.proxy], ) def test_server_find(self): self.verify_find( self.proxy.find_server, server.Server, method_kwargs={'all_projects': True}, expected_kwargs={ 'list_base_path': '/servers/detail', 'all_projects': True, }, ) def test_server_get(self): self.verify_get(self.proxy.get_server, server.Server) def test_servers_detailed(self): self.verify_list( self.proxy.servers, server.Server, method_kwargs={"details": True, "changes_since": 1, "image": 2}, expected_kwargs={ "changes_since": 1, "image": 2, "base_path": "/servers/detail", }, ) def test_servers_not_detailed(self): self.verify_list( self.proxy.servers, server.Server, method_kwargs={"details": False, "changes_since": 1, "image": 2}, expected_kwargs={"changes_since": 1, "image": 2}, ) def test_server_update(self): self.verify_update(self.proxy.update_server, server.Server) def test_server_change_password(self): self._verify( "openstack.compute.v2.server.Server.change_password", self.proxy.change_server_password, method_args=["value", "password"], expected_args=[self.proxy, "password"], ) def test_server_get_password(self): self._verify( "openstack.compute.v2.server.Server.get_password", self.proxy.get_server_password, method_args=["value"], expected_args=[self.proxy], ) def test_server_clear_password(self): self._verify( "openstack.compute.v2.server.Server.clear_password", self.proxy.clear_server_password, method_args=["value"], expected_args=[self.proxy], ) def test_server_wait_for(self): value = server.Server(id='1234') self.verify_wait_for_status( self.proxy.wait_for_server, method_args=[value], expected_args=[self.proxy, value, 'ACTIVE', ['ERROR'], 2, 120], expected_kwargs={'callback': None}, ) def test_server_resize(self): self._verify( "openstack.compute.v2.server.Server.resize", self.proxy.resize_server, method_args=["value", "test-flavor"], expected_args=[self.proxy, "test-flavor"], ) def test_server_confirm_resize(self): self._verify( "openstack.compute.v2.server.Server.confirm_resize", self.proxy.confirm_server_resize, method_args=["value"], expected_args=[self.proxy], ) def test_server_revert_resize(self): self._verify( "openstack.compute.v2.server.Server.revert_resize", self.proxy.revert_server_resize, method_args=["value"], expected_args=[self.proxy], ) def test_server_rebuild(self): id = 'test_image_id' image_obj = image.Image(id='test_image_id') # Case1: image object is provided # NOTE: Inside of Server.rebuild is where image_obj gets converted # to an ID instead of object. self._verify( 'openstack.compute.v2.server.Server.rebuild', self.proxy.rebuild_server, method_args=["value"], method_kwargs={ "name": "test_server", "admin_password": "test_pass", "metadata": {"k1": "v1"}, "image": image_obj, }, expected_args=[self.proxy], expected_kwargs={ "name": "test_server", "admin_password": "test_pass", "metadata": {"k1": "v1"}, "image": image_obj, }, ) # Case2: image name or id is provided self._verify( 'openstack.compute.v2.server.Server.rebuild', self.proxy.rebuild_server, method_args=["value"], method_kwargs={ "name": "test_server", "admin_password": "test_pass", "metadata": {"k1": "v1"}, "image": id, }, expected_args=[self.proxy], expected_kwargs={ "name": "test_server", "admin_password": "test_pass", "metadata": {"k1": "v1"}, "image": id, }, ) def test_add_fixed_ip_to_server(self): self._verify( "openstack.compute.v2.server.Server.add_fixed_ip", self.proxy.add_fixed_ip_to_server, method_args=["value", "network-id"], expected_args=[self.proxy, "network-id"], ) def test_fixed_ip_from_server(self): self._verify( "openstack.compute.v2.server.Server.remove_fixed_ip", self.proxy.remove_fixed_ip_from_server, method_args=["value", "address"], expected_args=[self.proxy, "address"], ) def test_floating_ip_to_server(self): self._verify( "openstack.compute.v2.server.Server.add_floating_ip", self.proxy.add_floating_ip_to_server, method_args=["value", "floating-ip"], expected_args=[self.proxy, "floating-ip"], expected_kwargs={'fixed_address': None}, ) def test_add_floating_ip_to_server_with_fixed_addr(self): self._verify( "openstack.compute.v2.server.Server.add_floating_ip", self.proxy.add_floating_ip_to_server, method_args=["value", "floating-ip", 'fixed-addr'], expected_args=[self.proxy, "floating-ip"], expected_kwargs={'fixed_address': 'fixed-addr'}, ) def test_remove_floating_ip_from_server(self): self._verify( "openstack.compute.v2.server.Server.remove_floating_ip", self.proxy.remove_floating_ip_from_server, method_args=["value", "address"], expected_args=[self.proxy, "address"], ) def test_server_backup(self): self._verify( "openstack.compute.v2.server.Server.backup", self.proxy.backup_server, method_args=["value", "name", "daily", 1], expected_args=[self.proxy, "name", "daily", 1], ) def test_server_pause(self): self._verify( "openstack.compute.v2.server.Server.pause", self.proxy.pause_server, method_args=["value"], expected_args=[self.proxy], ) def test_server_unpause(self): self._verify( "openstack.compute.v2.server.Server.unpause", self.proxy.unpause_server, method_args=["value"], expected_args=[self.proxy], ) def test_server_suspend(self): self._verify( "openstack.compute.v2.server.Server.suspend", self.proxy.suspend_server, method_args=["value"], expected_args=[self.proxy], ) def test_server_resume(self): self._verify( "openstack.compute.v2.server.Server.resume", self.proxy.resume_server, method_args=["value"], expected_args=[self.proxy], ) def test_server_lock(self): self._verify( "openstack.compute.v2.server.Server.lock", self.proxy.lock_server, method_args=["value"], expected_args=[self.proxy], expected_kwargs={"locked_reason": None}, ) def test_server_lock_with_options(self): self._verify( "openstack.compute.v2.server.Server.lock", self.proxy.lock_server, method_args=["value"], method_kwargs={"locked_reason": "Because why not"}, expected_args=[self.proxy], expected_kwargs={"locked_reason": "Because why not"}, ) def test_server_unlock(self): self._verify( "openstack.compute.v2.server.Server.unlock", self.proxy.unlock_server, method_args=["value"], expected_args=[self.proxy], ) def test_server_rescue(self): self._verify( "openstack.compute.v2.server.Server.rescue", self.proxy.rescue_server, method_args=["value"], expected_args=[self.proxy], expected_kwargs={"admin_pass": None, "image_ref": None}, ) def test_server_rescue_with_options(self): self._verify( "openstack.compute.v2.server.Server.rescue", self.proxy.rescue_server, method_args=["value", 'PASS', 'IMG'], expected_args=[self.proxy], expected_kwargs={"admin_pass": 'PASS', "image_ref": 'IMG'}, ) def test_server_unrescue(self): self._verify( "openstack.compute.v2.server.Server.unrescue", self.proxy.unrescue_server, method_args=["value"], expected_args=[self.proxy], ) def test_server_evacuate(self): self._verify( "openstack.compute.v2.server.Server.evacuate", self.proxy.evacuate_server, method_args=["value"], expected_args=[self.proxy], expected_kwargs={ "host": None, "admin_pass": None, "force": None, "on_shared_storage": None, }, ) def test_server_evacuate_with_options(self): self._verify( "openstack.compute.v2.server.Server.evacuate", self.proxy.evacuate_server, method_args=["value", 'HOST2', 'NEW_PASS', True], method_kwargs={'on_shared_storage': False}, expected_args=[self.proxy], expected_kwargs={ "host": "HOST2", "admin_pass": 'NEW_PASS', "force": True, "on_shared_storage": False, }, ) def test_server_start(self): self._verify( "openstack.compute.v2.server.Server.start", self.proxy.start_server, method_args=["value"], expected_args=[self.proxy], ) def test_server_stop(self): self._verify( "openstack.compute.v2.server.Server.stop", self.proxy.stop_server, method_args=["value"], expected_args=[self.proxy], ) def test_server_restore(self): self._verify( "openstack.compute.v2.server.Server.restore", self.proxy.restore_server, method_args=["value"], expected_args=[self.proxy], ) def test_server_shelve(self): self._verify( "openstack.compute.v2.server.Server.shelve", self.proxy.shelve_server, method_args=["value"], expected_args=[self.proxy], ) def test_server_shelve_offload(self): self._verify( "openstack.compute.v2.server.Server.shelve_offload", self.proxy.shelve_offload_server, method_args=["value"], expected_args=[self.proxy], ) def test_server_unshelve(self): self._verify( "openstack.compute.v2.server.Server.unshelve", self.proxy.unshelve_server, method_args=["value"], expected_args=[self.proxy], expected_kwargs={ "host": None, }, ) def test_server_unshelve_with_options(self): self._verify( "openstack.compute.v2.server.Server.unshelve", self.proxy.unshelve_server, method_args=["value"], method_kwargs={"host": "HOST2"}, expected_args=[self.proxy], expected_kwargs={ "host": "HOST2", }, ) def test_server_trigger_dump(self): self._verify( "openstack.compute.v2.server.Server.trigger_crash_dump", self.proxy.trigger_server_crash_dump, method_args=["value"], expected_args=[self.proxy], ) def test_server_add_tag(self): self._verify( "openstack.compute.v2.server.Server.add_tag", self.proxy.add_tag_to_server, method_args=["value", "tag"], expected_args=[self.proxy, "tag"], ) def test_server_remove_tag(self): self._verify( "openstack.compute.v2.server.Server.remove_tag", self.proxy.remove_tag_from_server, method_args=["value", "tag"], expected_args=[self.proxy, "tag"], ) def test_server_remove_tags(self): self._verify( "openstack.compute.v2.server.Server.remove_all_tags", self.proxy.remove_tags_from_server, method_args=["value"], expected_args=[self.proxy], ) def test_get_server_output(self): self._verify( "openstack.compute.v2.server.Server.get_console_output", self.proxy.get_server_console_output, method_args=["value"], expected_args=[self.proxy], expected_kwargs={"length": None}, ) self._verify( "openstack.compute.v2.server.Server.get_console_output", self.proxy.get_server_console_output, method_args=["value", 1], expected_args=[self.proxy], expected_kwargs={"length": 1}, ) def test_availability_zones_not_detailed(self): self.verify_list( self.proxy.availability_zones, az.AvailabilityZone, method_kwargs={"details": False}, expected_kwargs={}, ) def test_availability_zones_detailed(self): self.verify_list( self.proxy.availability_zones, az.AvailabilityZoneDetail, method_kwargs={"details": True}, expected_kwargs={}, ) def test_get_all_server_metadata(self): self._verify( "openstack.compute.v2.server.Server.fetch_metadata", self.proxy.get_server_metadata, method_args=["value"], expected_args=[self.proxy], expected_result=server.Server(id="value", metadata={}), ) def test_set_server_metadata(self): kwargs = {"a": "1", "b": "2"} id = "an_id" self._verify( "openstack.compute.v2.server.Server.set_metadata", self.proxy.set_server_metadata, method_args=[id], method_kwargs=kwargs, method_result=server.Server.existing(id=id, metadata=kwargs), expected_args=[self.proxy], expected_kwargs={'metadata': kwargs}, expected_result=server.Server.existing(id=id, metadata=kwargs), ) def test_delete_server_metadata(self): self._verify( "openstack.compute.v2.server.Server.delete_metadata_item", self.proxy.delete_server_metadata, expected_result=None, method_args=["value", ["key"]], expected_args=[self.proxy, "key"], ) def test_create_image(self): metadata = {'k1': 'v1'} with mock.patch( 'openstack.compute.v2.server.Server.create_image' ) as ci_mock: ci_mock.return_value = 'image_id' connection_mock = mock.Mock() connection_mock.get_image = mock.Mock(return_value='image') connection_mock.wait_for_image = mock.Mock() self.proxy._connection = connection_mock rsp = self.proxy.create_server_image( 'server', 'image_name', metadata, wait=True, timeout=1 ) ci_mock.assert_called_with(self.proxy, 'image_name', metadata) self.proxy._connection.get_image.assert_called_with('image_id') self.proxy._connection.wait_for_image.assert_called_with( 'image', timeout=1 ) self.assertEqual(connection_mock.wait_for_image(), rsp) def test_server_group_create(self): self.verify_create( self.proxy.create_server_group, server_group.ServerGroup ) def test_server_group_delete(self): self.verify_delete( self.proxy.delete_server_group, server_group.ServerGroup, False ) def test_server_group_delete_ignore(self): self.verify_delete( self.proxy.delete_server_group, server_group.ServerGroup, True ) def test_server_group_find(self): self.verify_find( self.proxy.find_server_group, server_group.ServerGroup, method_kwargs={'all_projects': True}, expected_kwargs={'all_projects': True}, ) def test_server_group_get(self): self.verify_get(self.proxy.get_server_group, server_group.ServerGroup) def test_server_groups(self): self.verify_list(self.proxy.server_groups, server_group.ServerGroup) def test_live_migrate_server(self): self._verify( 'openstack.compute.v2.server.Server.live_migrate', self.proxy.live_migrate_server, method_args=["value", "host1", False], expected_args=[self.proxy, "host1"], expected_kwargs={'force': False, 'block_migration': None}, ) def test_abort_server_migration(self): self._verify( 'openstack.proxy.Proxy._delete', self.proxy.abort_server_migration, method_args=['server_migration', 'server'], expected_args=[ server_migration.ServerMigration, 'server_migration', ], expected_kwargs={ 'server_id': 'server', 'ignore_missing': True, }, ) def test_force_complete_server_migration(self): self._verify( 'openstack.compute.v2.server_migration.ServerMigration.force_complete', # noqa: E501 self.proxy.force_complete_server_migration, method_args=['server_migration', 'server'], expected_args=[self.proxy], ) def test_get_server_migration(self): self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_server_migration, method_args=['server_migration', 'server'], expected_args=[ server_migration.ServerMigration, 'server_migration', ], expected_kwargs={ 'server_id': 'server', 'ignore_missing': True, }, ) def test_server_migrations(self): self._verify( 'openstack.proxy.Proxy._list', self.proxy.server_migrations, method_args=['server'], expected_args=[server_migration.ServerMigration], expected_kwargs={'server_id': 'server'}, ) def test_migrations(self): self.verify_list(self.proxy.migrations, migration.Migration) def test_migrations_kwargs(self): self.verify_list( self.proxy.migrations, migration.Migration, method_kwargs={'host': 'h1'}, expected_kwargs={'host': 'h1'}, ) def test_fetch_security_groups(self): self._verify( 'openstack.compute.v2.server.Server.fetch_security_groups', self.proxy.fetch_server_security_groups, method_args=["value"], expected_args=[self.proxy], ) def test_add_security_groups(self): self._verify( 'openstack.compute.v2.server.Server.add_security_group', self.proxy.add_security_group_to_server, method_args=["value", {'id': 'id', 'name': 'sg'}], expected_args=[self.proxy, 'sg'], ) def test_remove_security_groups(self): self._verify( 'openstack.compute.v2.server.Server.remove_security_group', self.proxy.remove_security_group_from_server, method_args=["value", {'id': 'id', 'name': 'sg'}], expected_args=[self.proxy, 'sg'], ) def test_usages(self): self.verify_list(self.proxy.usages, usage.Usage) def test_usages__with_kwargs(self): now = datetime.datetime.utcnow() start = now - datetime.timedelta(weeks=4) end = end = now + datetime.timedelta(days=1) self.verify_list( self.proxy.usages, usage.Usage, method_kwargs={'start': start, 'end': end}, expected_kwargs={ 'start': start.isoformat(), 'end': end.isoformat(), }, ) def test_get_usage(self): self._verify( "openstack.compute.v2.usage.Usage.fetch", self.proxy.get_usage, method_args=['value'], method_kwargs={}, expected_args=[self.proxy], expected_kwargs={}, ) def test_get_usage__with_kwargs(self): now = datetime.datetime.utcnow() start = now - datetime.timedelta(weeks=4) end = end = now + datetime.timedelta(days=1) self._verify( "openstack.compute.v2.usage.Usage.fetch", self.proxy.get_usage, method_args=['value'], method_kwargs={'start': start, 'end': end}, expected_args=[self.proxy], expected_kwargs={ 'start': start.isoformat(), 'end': end.isoformat(), }, ) def test_create_server_remote_console(self): self.verify_create( self.proxy.create_server_remote_console, server_remote_console.ServerRemoteConsole, method_kwargs={"server": "test_id", "type": "fake"}, expected_kwargs={"server_id": "test_id", "type": "fake"}, ) def test_get_console_url(self): self._verify( 'openstack.compute.v2.server.Server.get_console_url', self.proxy.get_server_console_url, method_args=["value", "console_type"], expected_args=[self.proxy, "console_type"], ) @mock.patch('openstack.utils.supports_microversion', autospec=True) @mock.patch('openstack.compute.v2._proxy.Proxy._create', autospec=True) @mock.patch( 'openstack.compute.v2.server.Server.get_console_url', autospec=True ) def test_create_console_mv_old(self, sgc, rcc, smv): console_fake = {'url': 'a', 'type': 'b', 'protocol': 'c'} smv.return_value = False sgc.return_value = console_fake ret = self.proxy.create_console('fake_server', 'fake_type') smv.assert_called_once_with(self.proxy, '2.6') rcc.assert_not_called() sgc.assert_called_with(mock.ANY, self.proxy, 'fake_type') self.assertDictEqual(console_fake, ret) @mock.patch('openstack.utils.supports_microversion', autospec=True) @mock.patch('openstack.compute.v2._proxy.Proxy._create', autospec=True) @mock.patch( 'openstack.compute.v2.server.Server.get_console_url', autospec=True ) def test_create_console_mv_2_6(self, sgc, rcc, smv): console_fake = {'url': 'a', 'type': 'b', 'protocol': 'c'} # Test server_remote_console is triggered when mv>=2.6 smv.return_value = True rcc.return_value = server_remote_console.ServerRemoteConsole( **console_fake ) ret = self.proxy.create_console('fake_server', 'fake_type') smv.assert_called_once_with(self.proxy, '2.6') sgc.assert_not_called() rcc.assert_called_with( mock.ANY, server_remote_console.ServerRemoteConsole, server_id='fake_server', type='fake_type', protocol=None, ) self.assertEqual(console_fake['url'], ret['url']) class TestQuotaClassSet(TestComputeProxy): def test_quota_class_set_get(self): self.verify_get( self.proxy.get_quota_class_set, quota_class_set.QuotaClassSet ) def test_quota_class_set_update(self): self.verify_update( self.proxy.update_quota_class_set, quota_class_set.QuotaClassSet, False, ) class TestQuotaSet(TestComputeProxy): def test_quota_set_get(self): self._verify( 'openstack.resource.Resource.fetch', self.proxy.get_quota_set, method_args=['prj'], expected_args=[self.proxy], expected_kwargs={ 'error_message': None, 'requires_id': False, }, method_result=quota_set.QuotaSet(), expected_result=quota_set.QuotaSet(), ) def test_quota_set_get_query(self): self._verify( 'openstack.resource.Resource.fetch', self.proxy.get_quota_set, method_args=['prj'], method_kwargs={'usage': True, 'user_id': 'uid'}, expected_args=[self.proxy], expected_kwargs={ 'error_message': None, 'requires_id': False, 'user_id': 'uid', 'base_path': '/os-quota-sets/%(project_id)s/detail', }, ) def test_quota_set_get_defaults(self): self._verify( 'openstack.resource.Resource.fetch', self.proxy.get_quota_set_defaults, method_args=['prj'], expected_args=[self.proxy], expected_kwargs={ 'error_message': None, 'requires_id': False, 'base_path': '/os-quota-sets/%(project_id)s/defaults', }, ) def test_quota_set_reset(self): self._verify( 'openstack.resource.Resource.delete', self.proxy.revert_quota_set, method_args=['prj'], method_kwargs={'user_id': 'uid'}, expected_args=[self.proxy], expected_kwargs={'user_id': 'uid'}, ) @mock.patch.object(proxy_base.Proxy, "_get_resource") def test_quota_set_update(self, mock_get): fake_project = project.Project(id='prj') fake_quota_set = quota_set.QuotaSet(project_id='prj') mock_get.side_effect = [fake_project, fake_quota_set] self._verify( 'openstack.resource.Resource.commit', self.proxy.update_quota_set, method_args=['prj'], method_kwargs={'ram': 123}, expected_args=[self.proxy], expected_kwargs={}, ) mock_get.assert_has_calls( [ mock.call(project.Project, 'prj'), mock.call(quota_set.QuotaSet, None, project_id='prj', ram=123), ] ) class TestServerAction(TestComputeProxy): def test_server_action_get(self): self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_server_action, method_args=['request_id'], method_kwargs={'server': 'server_id'}, expected_args=[server_action.ServerAction], expected_kwargs={ 'request_id': 'request_id', 'server_id': 'server_id', }, ) def test_server_actions(self): self.verify_list( self.proxy.server_actions, server_action.ServerAction, method_kwargs={'server': 'server_a'}, expected_kwargs={'server_id': 'server_a'}, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_server.py0000664000175000017500000013514400000000000025271 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http from unittest import mock from openstack.compute.v2 import flavor from openstack.compute.v2 import server from openstack.image.v2 import image from openstack.tests.unit import base from openstack.tests.unit import fakes IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'OS-DCF:diskConfig': 'AUTO', 'OS-EXT-AZ:availability_zone': 'us-west', 'OS-EXT-SRV-ATTR:host': 'compute', 'OS-EXT-SRV-ATTR:hostname': 'new-server-test', 'OS-EXT-SRV-ATTR:hypervisor_hostname': 'fake-mini', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:kernel_id': '', 'OS-EXT-SRV-ATTR:launch_index': 0, 'OS-EXT-SRV-ATTR:ramdisk_id': '', 'OS-EXT-SRV-ATTR:reservation_id': 'r-ov3q80zj', 'OS-EXT-SRV-ATTR:root_device_name': '/dev/sda', 'OS-EXT-SRV-ATTR:user_data': 'IyEvYmluL2Jhc2gKL2Jpbi9IHlvdSEiCg==', 'OS-EXT-STS:power_state': 1, 'OS-EXT-STS:task_state': None, 'OS-EXT-STS:vm_state': 'active', 'OS-SRV-USG:launched_at': '2017-02-14T19:23:59.895661', 'OS-SRV-USG:terminated_at': '2015-03-09T12:15:57.233772', 'OS-SCH-HNT:scheduler_hints': {'key': '30'}, 'accessIPv4': '1.2.3.4', 'accessIPv6': '80fe::', 'adminPass': '27', 'addresses': { 'private': [ { 'OS-EXT-IPS-MAC:mac_addr': 'aa:bb:cc:dd:ee:ff', 'OS-EXT-IPS:type': 'fixed', 'addr': '192.168.0.3', 'version': 4, } ] }, 'block_device_mapping_v2': {'key': '29'}, 'config_drive': '', 'created': '2017-02-14T19:23:58Z', 'description': 'dummy', 'flavorRef': '5', 'flavor': { 'disk': 1, 'ephemeral': 0, 'extra_specs': { 'hw:cpu_policy': 'dedicated', 'hw:mem_page_size': '2048', }, 'original_name': 'm1.tiny.specs', 'ram': 512, 'swap': 0, }, 'hostId': '2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6', 'host_status': 'UP', 'id': IDENTIFIER, 'imageRef': '8', 'image': { 'id': '70a599e0-31e7-49b7-b260-868f441e862b', 'links': [ { 'href': 'http://openstack.example.com/images/70a599e0', 'rel': 'bookmark', } ], }, 'key_name': 'dummy', 'links': [ { 'href': 'http://openstack.example.com/v2.1/servers/9168b536', 'rel': 'self', }, { 'href': 'http://openstack.example.com/servers/9168b536', 'rel': 'bookmark', }, ], 'locked': True, 'metadata': {'My Server Name': 'Apache1'}, 'name': 'new-server-test', 'networks': 'auto', 'os-extended-volumes:volumes_attached': [], 'progress': 0, 'security_groups': [{'name': 'default'}], 'server_groups': ['3caf4187-8010-491f-b6f5-a4a68a40371e'], 'status': 'ACTIVE', 'tags': [], 'tenant_id': '6f70656e737461636b20342065766572', 'trusted_image_certificates': [ '0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8', '674736e3-f25c-405c-8362-bbf991e0ce0a', ], 'updated': '2017-02-14T19:24:00Z', 'user_id': 'fake', } class TestServer(base.TestCase): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) self.resp.status_code = 200 self.sess = mock.Mock() self.sess.post = mock.Mock(return_value=self.resp) # totally arbitrary self.sess.default_microversion = '2.88' def test_basic(self): sot = server.Server() self.assertEqual('server', sot.resource_key) self.assertEqual('servers', sot.resources_key) self.assertEqual('/servers', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { "access_ipv4": "access_ip_v4", "access_ipv6": "access_ip_v6", "auto_disk_config": "auto_disk_config", "availability_zone": "availability_zone", "changes_before": "changes-before", "changes_since": "changes-since", "compute_host": "host", "has_config_drive": "config_drive", "created_at": "created_at", "description": "description", "flavor": "flavor", "hostname": "hostname", "image": "image", "ipv4_address": "ip", "ipv6_address": "ip6", "id": "uuid", "deleted_only": "deleted", "is_soft_deleted": "soft_deleted", "kernel_id": "kernel_id", "key_name": "key_name", "launch_index": "launch_index", "launched_at": "launched_at", "limit": "limit", "locked_by": "locked_by", "marker": "marker", "name": "name", "node": "node", "power_state": "power_state", "progress": "progress", "project_id": "project_id", "ramdisk_id": "ramdisk_id", "pinned_availability_zone": "pinned_availability_zone", "reservation_id": "reservation_id", "root_device_name": "root_device_name", "sort_dir": "sort_dir", "sort_key": "sort_key", "status": "status", "task_state": "task_state", "terminated_at": "terminated_at", "user_id": "user_id", "vm_state": "vm_state", "all_projects": "all_tenants", "tags": "tags", "any_tags": "tags-any", "not_tags": "not-tags", "not_any_tags": "not-tags-any", }, sot._query_mapping._mapping, ) def test_make_it(self): sot = server.Server(**EXAMPLE) self.assertEqual(EXAMPLE['accessIPv4'], sot.access_ipv4) self.assertEqual(EXAMPLE['accessIPv6'], sot.access_ipv6) self.assertEqual(EXAMPLE['addresses'], sot.addresses) self.assertEqual(EXAMPLE['created'], sot.created_at) self.assertEqual(EXAMPLE['config_drive'], sot.has_config_drive) self.assertEqual(EXAMPLE['flavorRef'], sot.flavor_id) self.assertEqual(flavor.Flavor(**EXAMPLE['flavor']), sot.flavor) self.assertEqual(EXAMPLE['hostId'], sot.host_id) self.assertEqual(EXAMPLE['host_status'], sot.host_status) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['imageRef'], sot.image_id) self.assertEqual(image.Image(**EXAMPLE['image']), sot.image) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['metadata'], sot.metadata) self.assertEqual(EXAMPLE['networks'], sot.networks) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['progress'], sot.progress) self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) self.assertEqual(EXAMPLE['server_groups'], sot.server_groups) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['updated'], sot.updated_at) self.assertEqual(EXAMPLE['user_id'], sot.user_id) self.assertEqual(EXAMPLE['key_name'], sot.key_name) self.assertEqual(EXAMPLE['OS-DCF:diskConfig'], sot.disk_config) self.assertEqual( EXAMPLE['OS-EXT-AZ:availability_zone'], sot.availability_zone ) self.assertEqual(EXAMPLE['OS-EXT-STS:power_state'], sot.power_state) self.assertEqual(EXAMPLE['OS-EXT-STS:task_state'], sot.task_state) self.assertEqual(EXAMPLE['OS-EXT-STS:vm_state'], sot.vm_state) self.assertEqual( EXAMPLE['os-extended-volumes:volumes_attached'], sot.attached_volumes, ) self.assertEqual(EXAMPLE['OS-SRV-USG:launched_at'], sot.launched_at) self.assertEqual( EXAMPLE['OS-SRV-USG:terminated_at'], sot.terminated_at ) self.assertEqual(EXAMPLE['security_groups'], sot.security_groups) self.assertEqual(EXAMPLE['adminPass'], sot.admin_password) self.assertEqual( EXAMPLE['block_device_mapping_v2'], sot.block_device_mapping ) self.assertEqual(EXAMPLE['OS-EXT-SRV-ATTR:host'], sot.compute_host) self.assertEqual(EXAMPLE['OS-EXT-SRV-ATTR:hostname'], sot.hostname) self.assertEqual( EXAMPLE['OS-EXT-SRV-ATTR:hypervisor_hostname'], sot.hypervisor_hostname, ) self.assertEqual( EXAMPLE['OS-EXT-SRV-ATTR:instance_name'], sot.instance_name ) self.assertEqual(EXAMPLE['OS-EXT-SRV-ATTR:kernel_id'], sot.kernel_id) self.assertEqual( EXAMPLE['OS-EXT-SRV-ATTR:launch_index'], sot.launch_index ) self.assertEqual(EXAMPLE['OS-EXT-SRV-ATTR:ramdisk_id'], sot.ramdisk_id) self.assertEqual( EXAMPLE['OS-EXT-SRV-ATTR:reservation_id'], sot.reservation_id ) self.assertEqual( EXAMPLE['OS-EXT-SRV-ATTR:root_device_name'], sot.root_device_name ) self.assertEqual( EXAMPLE['OS-SCH-HNT:scheduler_hints'], sot.scheduler_hints ) self.assertEqual(EXAMPLE['OS-EXT-SRV-ATTR:user_data'], sot.user_data) self.assertEqual(EXAMPLE['locked'], sot.is_locked) self.assertEqual( EXAMPLE['trusted_image_certificates'], sot.trusted_image_certificates, ) def test_to_dict_flavor(self): # Ensure to_dict properly resolves flavor and uses defaults for not # specified flavor proerties. sot = server.Server(**EXAMPLE) dct = sot.to_dict() self.assertEqual(0, dct['flavor']['vcpus']) def test__prepare_server(self): zone = 1 data = 2 hints = {"hint": 3} sot = server.Server( id=1, availability_zone=zone, user_data=data, scheduler_hints=hints, min_count=2, max_count=3, ) request = sot._prepare_request() self.assertNotIn( "OS-EXT-AZ:availability_zone", request.body[sot.resource_key] ) self.assertEqual( request.body[sot.resource_key]["availability_zone"], zone ) self.assertNotIn( "OS-EXT-SRV-ATTR:user_data", request.body[sot.resource_key] ) self.assertEqual(request.body[sot.resource_key]["user_data"], data) self.assertNotIn( "OS-SCH-HNT:scheduler_hints", request.body[sot.resource_key] ) self.assertEqual(request.body["OS-SCH-HNT:scheduler_hints"], hints) self.assertEqual(2, request.body[sot.resource_key]['min_count']) self.assertEqual(3, request.body[sot.resource_key]['max_count']) def test_change_password(self): sot = server.Server(**EXAMPLE) self.assertIsNone(sot.change_password(self.sess, 'a')) url = 'servers/IDENTIFIER/action' body = {"changePassword": {"adminPass": "a"}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_get_password(self): sot = server.Server(**EXAMPLE) self.sess.get.return_value = fakes.FakeResponse( data={'password': 'foo'} ) result = sot.get_password(self.sess) self.assertEqual('foo', result) url = 'servers/IDENTIFIER/os-server-password' self.sess.get.assert_called_with( url, microversion=self.sess.default_microversion ) def test_clear_password(self): sot = server.Server(**EXAMPLE) self.sess.delete.return_value = fakes.FakeResponse( status_code=http.HTTPStatus.NO_CONTENT, ) self.assertIsNone(sot.clear_password(self.sess)) url = 'servers/IDENTIFIER/os-server-password' self.sess.delete.assert_called_with( url, microversion=self.sess.default_microversion ) def test_reboot(self): sot = server.Server(**EXAMPLE) self.assertIsNone(sot.reboot(self.sess, 'HARD')) url = 'servers/IDENTIFIER/action' body = {"reboot": {"type": "HARD"}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_force_delete(self): sot = server.Server(**EXAMPLE) self.assertIsNone(sot.force_delete(self.sess)) url = 'servers/IDENTIFIER/action' body = {'forceDelete': None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_rebuild(self): sot = server.Server(**EXAMPLE) # Let the translate pass through, that portion is tested elsewhere sot._translate_response = lambda arg: arg result = sot.rebuild( self.sess, '123', name='noo', admin_password='seekr3t', preserve_ephemeral=False, access_ipv4="12.34.56.78", access_ipv6="fe80::100", metadata={"meta var": "meta val"}, user_data="ZWNobyAiaGVsbG8gd29ybGQi", key_name='my-ecdsa-key', description='an updated description', trusted_image_certificates=['foo'], hostname='new-hostname', ) self.assertIsInstance(result, server.Server) url = 'servers/IDENTIFIER/action' body = { "rebuild": { "name": "noo", "imageRef": "123", "adminPass": "seekr3t", "accessIPv4": "12.34.56.78", "accessIPv6": "fe80::100", "metadata": {"meta var": "meta val"}, "user_data": "ZWNobyAiaGVsbG8gd29ybGQi", "preserve_ephemeral": False, "key_name": 'my-ecdsa-key', "description": 'an updated description', "trusted_image_certificates": ['foo'], "hostname": "new-hostname", } } headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_rebuild_minimal(self): sot = server.Server(**EXAMPLE) # Let the translate pass through, that portion is tested elsewhere sot._translate_response = lambda arg: arg result = sot.rebuild( self.sess, '123', name='nootoo', admin_password='seekr3two', ) self.assertIsInstance(result, server.Server) url = 'servers/IDENTIFIER/action' body = { "rebuild": { "name": "nootoo", "imageRef": "123", "adminPass": "seekr3two", } } headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_rebuild_none_values(self): sot = server.Server(**EXAMPLE) # Let the translate pass through, that portion is tested elsewhere sot._translate_response = lambda arg: arg result = sot.rebuild( self.sess, '123', admin_password=None, access_ipv4=None, access_ipv6=None, metadata=None, user_data=None, description=None, ) self.assertIsInstance(result, server.Server) url = 'servers/IDENTIFIER/action' body = { "rebuild": { "imageRef": "123", "adminPass": None, "accessIPv4": None, "accessIPv6": None, "metadata": None, "user_data": None, "description": None, } } headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_resize(self): sot = server.Server(**EXAMPLE) self.assertIsNone(sot.resize(self.sess, '2')) url = 'servers/IDENTIFIER/action' body = {"resize": {"flavorRef": "2"}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_confirm_resize(self): sot = server.Server(**EXAMPLE) self.assertIsNone(sot.confirm_resize(self.sess)) url = 'servers/IDENTIFIER/action' body = {"confirmResize": None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_revert_resize(self): sot = server.Server(**EXAMPLE) self.assertIsNone(sot.revert_resize(self.sess)) url = 'servers/IDENTIFIER/action' body = {"revertResize": None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_shelve_offload(self): sot = server.Server(**EXAMPLE) self.assertIsNone(sot.shelve_offload(self.sess)) url = 'servers/IDENTIFIER/action' body = {"shelveOffload": None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_create_image_header(self): sot = server.Server(**EXAMPLE) name = 'noo' metadata = {'nu': 'image', 'created': 'today'} url = 'servers/IDENTIFIER/action' body = {"createImage": {'name': name, 'metadata': metadata}} headers = {'Accept': ''} rsp = mock.Mock() rsp.json.return_value = None rsp.headers = {'Location': 'dummy/dummy2'} rsp.status_code = 200 self.sess.post.return_value = rsp self.endpoint_data = mock.Mock( spec=['min_microversion', 'max_microversion'], min_microversion=None, max_microversion='2.44', ) self.sess.get_endpoint_data.return_value = self.endpoint_data image_id = sot.create_image(self.sess, name, metadata) self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) self.assertEqual('dummy2', image_id) def test_create_image_microver(self): sot = server.Server(**EXAMPLE) name = 'noo' metadata = {'nu': 'image', 'created': 'today'} url = 'servers/IDENTIFIER/action' body = {"createImage": {'name': name, 'metadata': metadata}} headers = {'Accept': ''} rsp = mock.Mock() rsp.json.return_value = {'image_id': 'dummy3'} rsp.headers = {'Location': 'dummy/dummy2'} rsp.status_code = 200 self.sess.post.return_value = rsp self.endpoint_data = mock.Mock( spec=['min_microversion', 'max_microversion'], min_microversion='2.1', max_microversion='2.56', ) self.sess.get_endpoint_data.return_value = self.endpoint_data self.sess.default_microversion = None image_id = sot.create_image(self.sess, name, metadata) self.sess.post.assert_called_with( url, json=body, headers=headers, microversion='2.45' ) self.assertEqual('dummy3', image_id) def test_create_image_minimal(self): sot = server.Server(**EXAMPLE) name = 'noo' url = 'servers/IDENTIFIER/action' body = {"createImage": {'name': name}} headers = {'Accept': ''} rsp = mock.Mock() rsp.json.return_value = None rsp.headers = {'Location': 'dummy/dummy2'} rsp.status_code = 200 self.sess.post.return_value = rsp self.endpoint_data = mock.Mock( spec=['min_microversion', 'max_microversion'], min_microversion='2.1', max_microversion='2.56', ) self.sess.get_endpoint_data.return_value = self.endpoint_data self.sess.default_microversion = None self.assertIsNone(self.resp.body, sot.create_image(self.sess, name)) self.sess.post.assert_called_with( url, json=body, headers=headers, microversion='2.45' ) def test_add_security_group(self): sot = server.Server(**EXAMPLE) self.assertIsNone(sot.add_security_group(self.sess, "group")) url = 'servers/IDENTIFIER/action' body = {"addSecurityGroup": {"name": "group"}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_remove_security_group(self): sot = server.Server(**EXAMPLE) self.assertIsNone(sot.remove_security_group(self.sess, "group")) url = 'servers/IDENTIFIER/action' body = {"removeSecurityGroup": {"name": "group"}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_reset_state(self): sot = server.Server(**EXAMPLE) self.assertIsNone(sot.reset_state(self.sess, 'active')) url = 'servers/IDENTIFIER/action' body = {"os-resetState": {"state": 'active'}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_add_fixed_ip(self): sot = server.Server(**EXAMPLE) res = sot.add_fixed_ip(self.sess, "NETWORK-ID") self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"addFixedIp": {"networkId": "NETWORK-ID"}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_remove_fixed_ip(self): sot = server.Server(**EXAMPLE) res = sot.remove_fixed_ip(self.sess, "ADDRESS") self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"removeFixedIp": {"address": "ADDRESS"}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_add_floating_ip(self): sot = server.Server(**EXAMPLE) res = sot.add_floating_ip(self.sess, "FLOATING-IP") self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"addFloatingIp": {"address": "FLOATING-IP"}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_add_floating_ip_with_fixed_addr(self): sot = server.Server(**EXAMPLE) res = sot.add_floating_ip(self.sess, "FLOATING-IP", "FIXED-ADDR") self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = { "addFloatingIp": { "address": "FLOATING-IP", "fixed_address": "FIXED-ADDR", } } headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_remove_floating_ip(self): sot = server.Server(**EXAMPLE) res = sot.remove_floating_ip(self.sess, "I-AM-FLOATING") self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"removeFloatingIp": {"address": "I-AM-FLOATING"}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_backup(self): sot = server.Server(**EXAMPLE) res = sot.backup(self.sess, "name", "daily", 1) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = { "createBackup": { "name": "name", "backup_type": "daily", "rotation": 1, } } headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_pause(self): sot = server.Server(**EXAMPLE) res = sot.pause(self.sess) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"pause": None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_unpause(self): sot = server.Server(**EXAMPLE) res = sot.unpause(self.sess) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"unpause": None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_suspend(self): sot = server.Server(**EXAMPLE) res = sot.suspend(self.sess) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"suspend": None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_resume(self): sot = server.Server(**EXAMPLE) res = sot.resume(self.sess) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"resume": None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_lock(self): sot = server.Server(**EXAMPLE) res = sot.lock(self.sess) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"lock": None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_lock_with_options(self): sot = server.Server(**EXAMPLE) res = sot.lock(self.sess, locked_reason='Because why not') self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {'lock': {'locked_reason': 'Because why not'}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_unlock(self): sot = server.Server(**EXAMPLE) res = sot.unlock(self.sess) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"unlock": None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_rescue(self): sot = server.Server(**EXAMPLE) res = sot.rescue(self.sess) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"rescue": {}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_rescue_with_options(self): sot = server.Server(**EXAMPLE) res = sot.rescue(self.sess, admin_pass='SECRET', image_ref='IMG-ID') self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = { "rescue": {'adminPass': 'SECRET', 'rescue_image_ref': 'IMG-ID'} } headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_unrescue(self): sot = server.Server(**EXAMPLE) res = sot.unrescue(self.sess) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"unrescue": None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_evacuate(self): sot = server.Server(**EXAMPLE) res = sot.evacuate(self.sess) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"evacuate": {}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_evacuate_with_options(self): sot = server.Server(**EXAMPLE) res = sot.evacuate( self.sess, host='HOST2', admin_pass='NEW_PASS', force=True, on_shared_storage=False, ) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = { "evacuate": { 'host': 'HOST2', 'adminPass': 'NEW_PASS', 'force': True, 'onSharedStorage': False, } } headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_start(self): sot = server.Server(**EXAMPLE) res = sot.start(self.sess) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"os-start": None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_stop(self): sot = server.Server(**EXAMPLE) res = sot.stop(self.sess) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"os-stop": None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_restore(self): sot = server.Server(**EXAMPLE) res = sot.restore(self.sess) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {'restore': None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_shelve(self): sot = server.Server(**EXAMPLE) res = sot.shelve(self.sess) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"shelve": None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_unshelve(self): sot = server.Server(**EXAMPLE) res = sot.unshelve(self.sess) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"unshelve": None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_unshelve_availability_zone(self): sot = server.Server(**EXAMPLE) res = sot.unshelve(self.sess, sot.availability_zone) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"unshelve": {"availability_zone": sot.availability_zone}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_unshelve_unpin_az(self): sot = server.Server(**EXAMPLE) res = sot.unshelve(self.sess, availability_zone=None) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"unshelve": {"availability_zone": None}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_unshelve_host(self): sot = server.Server(**EXAMPLE) res = sot.unshelve(self.sess, host=sot.hypervisor_hostname) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"unshelve": {"host": sot.hypervisor_hostname}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_unshelve_host_and_availability_zone(self): sot = server.Server(**EXAMPLE) res = sot.unshelve( self.sess, availability_zone=sot.availability_zone, host=sot.hypervisor_hostname, ) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = { "unshelve": { "availability_zone": sot.availability_zone, "host": sot.hypervisor_hostname, } } headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_migrate(self): sot = server.Server(**EXAMPLE) res = sot.migrate(self.sess) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {"migrate": None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_trigger_crash_dump(self): sot = server.Server(**EXAMPLE) res = sot.trigger_crash_dump(self.sess) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {'trigger_crash_dump': None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_get_console_output(self): sot = server.Server(**EXAMPLE) res = sot.get_console_output(self.sess) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {'os-getConsoleOutput': {}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) res = sot.get_console_output(self.sess, length=1) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {'os-getConsoleOutput': {'length': 1}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_get_console_url(self): sot = server.Server(**EXAMPLE) resp = mock.Mock() resp.body = {'console': {'a': 'b'}} resp.json = mock.Mock(return_value=resp.body) resp.status_code = 200 self.sess.post.return_value = resp res = sot.get_console_url(self.sess, 'novnc') self.sess.post.assert_called_with( 'servers/IDENTIFIER/action', json={'os-getVNCConsole': {'type': 'novnc'}}, headers={'Accept': ''}, microversion=self.sess.default_microversion, ) self.assertDictEqual(resp.body['console'], res) sot.get_console_url(self.sess, 'xvpvnc') self.sess.post.assert_called_with( 'servers/IDENTIFIER/action', json={'os-getVNCConsole': {'type': 'xvpvnc'}}, headers={'Accept': ''}, microversion=self.sess.default_microversion, ) sot.get_console_url(self.sess, 'spice-html5') self.sess.post.assert_called_with( 'servers/IDENTIFIER/action', json={'os-getSPICEConsole': {'type': 'spice-html5'}}, headers={'Accept': ''}, microversion=self.sess.default_microversion, ) sot.get_console_url(self.sess, 'rdp-html5') self.sess.post.assert_called_with( 'servers/IDENTIFIER/action', json={'os-getRDPConsole': {'type': 'rdp-html5'}}, headers={'Accept': ''}, microversion=self.sess.default_microversion, ) sot.get_console_url(self.sess, 'serial') self.sess.post.assert_called_with( 'servers/IDENTIFIER/action', json={'os-getSerialConsole': {'type': 'serial'}}, headers={'Accept': ''}, microversion=self.sess.default_microversion, ) self.assertRaises( ValueError, sot.get_console_url, self.sess, 'fake_type' ) def test_live_migrate_no_force(self): sot = server.Server(**EXAMPLE) class FakeEndpointData: min_microversion = None max_microversion = None self.sess.get_endpoint_data.return_value = FakeEndpointData() ex = self.assertRaises( ValueError, sot.live_migrate, self.sess, host='HOST2', force=False, block_migration=False, ) self.assertIn("Live migration on this cloud implies 'force'", str(ex)) def test_live_migrate_no_microversion_force_true(self): sot = server.Server(**EXAMPLE) class FakeEndpointData: min_microversion = None max_microversion = None self.sess.get_endpoint_data.return_value = FakeEndpointData() res = sot.live_migrate( self.sess, host='HOST2', force=True, block_migration=True, disk_over_commit=True, ) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = { 'os-migrateLive': { 'host': 'HOST2', 'disk_over_commit': True, 'block_migration': True, } } headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_live_migrate_25(self): sot = server.Server(**EXAMPLE) class FakeEndpointData: min_microversion = '2.1' max_microversion = '2.25' self.sess.get_endpoint_data.return_value = FakeEndpointData() self.sess.default_microversion = None res = sot.live_migrate( self.sess, host='HOST2', force=True, block_migration=False ) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = { "os-migrateLive": { 'block_migration': False, 'host': 'HOST2', } } headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion='2.25' ) def test_live_migrate_25_default_block(self): sot = server.Server(**EXAMPLE) class FakeEndpointData: min_microversion = '2.1' max_microversion = '2.25' self.sess.get_endpoint_data.return_value = FakeEndpointData() self.sess.default_microversion = None res = sot.live_migrate( self.sess, host='HOST2', force=True, block_migration=None ) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = { "os-migrateLive": { 'block_migration': 'auto', 'host': 'HOST2', } } headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion='2.25' ) def test_live_migrate_30(self): sot = server.Server(**EXAMPLE) class FakeEndpointData: min_microversion = '2.1' max_microversion = '2.30' self.sess.get_endpoint_data.return_value = FakeEndpointData() self.sess.default_microversion = None res = sot.live_migrate( self.sess, host='HOST2', force=False, block_migration=False ) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = {'os-migrateLive': {'block_migration': False, 'host': 'HOST2'}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion='2.30' ) def test_live_migrate_30_force(self): sot = server.Server(**EXAMPLE) class FakeEndpointData: min_microversion = '2.1' max_microversion = '2.30' self.sess.get_endpoint_data.return_value = FakeEndpointData() self.sess.default_microversion = None res = sot.live_migrate( self.sess, host='HOST2', force=True, block_migration=None ) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' body = { 'os-migrateLive': { 'block_migration': 'auto', 'host': 'HOST2', 'force': True, } } headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion='2.30' ) def test_get_topology(self): sot = server.Server(**EXAMPLE) class FakeEndpointData: min_microversion = '2.1' max_microversion = '2.78' self.sess.get_endpoint_data.return_value = FakeEndpointData() self.sess.default_microversion = None response = mock.Mock() topology = { "nodes": [ { "cpu_pinning": {"0": 0, "1": 5}, "host_node": 0, "memory_mb": 1024, "siblings": [[0, 1]], "vcpu_set": [0, 1], }, { "cpu_pinning": {"2": 1, "3": 8}, "host_node": 1, "memory_mb": 2048, "siblings": [[2, 3]], "vcpu_set": [2, 3], }, ], "pagesize_kb": 4, } response.status_code = 200 response.json.return_value = topology self.sess.get.return_value = response fetched_topology = sot.fetch_topology(self.sess) url = 'servers/IDENTIFIER/topology' self.sess.get.assert_called_with(url) self.assertEqual(fetched_topology, topology) def test_get_security_groups(self): sot = server.Server(**EXAMPLE) response = mock.Mock() sgs = [ { 'description': 'default', 'id': 1, 'name': 'default', 'rules': [ { 'direction': 'egress', 'ethertype': 'IPv6', 'id': '3c0e45ff-adaf-4124-b083-bf390e5482ff', 'port_range_max': None, 'port_range_min': None, 'protocol': None, 'remote_group_id': None, 'remote_ip_prefix': None, 'security_group_id': '1', 'project_id': 'e4f50856753b4dc6afee5fa6b9b6c550', 'revision_number': 1, 'tags': ['tag1,tag2'], 'tenant_id': 'e4f50856753b4dc6afee5fa6b9b6c550', 'created_at': '2018-03-19T19:16:56Z', 'updated_at': '2018-03-19T19:16:56Z', 'description': '', } ], 'tenant_id': 'e4f50856753b4dc6afee5fa6b9b6c550', } ] response.status_code = 200 response.json.return_value = {'security_groups': sgs} self.sess.get.return_value = response sot.fetch_security_groups(self.sess) url = 'servers/IDENTIFIER/os-security-groups' self.sess.get.assert_called_with(url) self.assertEqual(sot.security_groups, sgs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_server_actions.py0000664000175000017500000000640500000000000027006 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.compute.v2 import server_action from openstack.tests.unit import base EXAMPLE = { 'action': 'stop', 'events': [ { 'event': 'compute_stop_instance', 'finish_time': '2018-04-25T01:26:36.790544', 'host': 'compute', 'hostId': '2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6', # noqa: E501 'result': 'Success', 'start_time': '2018-04-25T01:26:36.539271', 'traceback': None, 'details': None, } ], 'instance_uuid': '4bf3473b-d550-4b65-9409-292d44ab14a2', 'message': None, 'project_id': '6f70656e737461636b20342065766572', 'request_id': 'req-0d819d5c-1527-4669-bdf0-ffad31b5105b', 'start_time': '2018-04-25T01:26:36.341290', 'updated_at': '2018-04-25T01:26:36.790544', 'user_id': 'admin', } class TestServerAction(base.TestCase): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) self.resp.status_code = 200 self.sess = mock.Mock() self.sess.post = mock.Mock(return_value=self.resp) def test_basic(self): sot = server_action.ServerAction() self.assertEqual('instanceAction', sot.resource_key) self.assertEqual('instanceActions', sot.resources_key) self.assertEqual( '/servers/%(server_id)s/os-instance-actions', sot.base_path, ) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_list) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertDictEqual( { 'changes_before': 'changes-before', 'changes_since': 'changes-since', 'limit': 'limit', 'marker': 'marker', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = server_action.ServerAction(**EXAMPLE) self.assertEqual(EXAMPLE['action'], sot.action) # FIXME: This isn't populated since it conflicts with the server_id URI # argument # self.assertEqual(EXAMPLE['instance_uuid'], sot.server_id) self.assertEqual(EXAMPLE['message'], sot.message) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['request_id'], sot.request_id) self.assertEqual(EXAMPLE['start_time'], sot.start_time) self.assertEqual(EXAMPLE['user_id'], sot.user_id) self.assertEqual( [server_action.ServerActionEvent(**e) for e in EXAMPLE['events']], sot.events, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_server_diagnostics.py0000664000175000017500000000551300000000000027654 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.compute.v2 import server_diagnostics from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { "config_drive": True, "cpu_details": [{"id": 0, "time": 17300000000, "utilisation": 15}], "disk_details": [ { "errors_count": 1, "read_bytes": 262144, "read_requests": 112, "write_bytes": 5778432, "write_requests": 488, } ], "driver": "libvirt", "hypervisor": "kvm", "hypervisor_os": "ubuntu", "memory_details": {"maximum": 524288, "used": 0}, "nic_details": [ { "mac_address": "01:23:45:67:89:ab", "rx_drop": 200, "rx_errors": 100, "rx_octets": 2070139, "rx_packets": 26701, "rx_rate": 300, "tx_drop": 500, "tx_errors": 400, "tx_octets": 140208, "tx_packets": 662, "tx_rate": 600, } ], "num_cpus": 1, "num_disks": 1, "num_nics": 1, "state": "running", "uptime": 46664, } class TestServerInterface(base.TestCase): def test_basic(self): sot = server_diagnostics.ServerDiagnostics() self.assertEqual('diagnostics', sot.resource_key) self.assertEqual('/servers/%(server_id)s/diagnostics', sot.base_path) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.requires_id) def test_make_it(self): sot = server_diagnostics.ServerDiagnostics(**EXAMPLE) self.assertEqual(EXAMPLE['config_drive'], sot.has_config_drive) self.assertEqual(EXAMPLE['cpu_details'], sot.cpu_details) self.assertEqual(EXAMPLE['disk_details'], sot.disk_details) self.assertEqual(EXAMPLE['driver'], sot.driver) self.assertEqual(EXAMPLE['hypervisor'], sot.hypervisor) self.assertEqual(EXAMPLE['hypervisor_os'], sot.hypervisor_os) self.assertEqual(EXAMPLE['memory_details'], sot.memory_details) self.assertEqual(EXAMPLE['nic_details'], sot.nic_details) self.assertEqual(EXAMPLE['num_cpus'], sot.num_cpus) self.assertEqual(EXAMPLE['num_disks'], sot.num_disks) self.assertEqual(EXAMPLE['num_nics'], sot.num_nics) self.assertEqual(EXAMPLE['state'], sot.state) self.assertEqual(EXAMPLE['uptime'], sot.uptime) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_server_group.py0000664000175000017500000000367300000000000026506 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.compute.v2 import server_group from openstack.tests.unit import base EXAMPLE = { 'id': 'IDENTIFIER', 'name': 'test', 'members': ['server1', 'server2'], 'metadata': {}, 'policies': ['anti-affinity'], 'rules': { 'max_server_per_host': 5, }, } class TestServerGroup(base.TestCase): def test_basic(self): sot = server_group.ServerGroup() self.assertEqual('server_group', sot.resource_key) self.assertEqual('server_groups', sot.resources_key) self.assertEqual('/os-server-groups', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { "all_projects": "all_projects", "limit": "limit", "marker": "marker", }, sot._query_mapping._mapping, ) def test_make_it(self): sot = server_group.ServerGroup(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['members'], sot.member_ids) self.assertEqual(EXAMPLE['metadata'], sot.metadata) self.assertEqual(EXAMPLE['policies'], sot.policies) self.assertEqual(EXAMPLE['rules'], sot.rules) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_server_interface.py0000664000175000017500000000370400000000000027305 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.compute.v2 import server_interface from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'fixed_ips': [ { 'ip_address': '192.168.1.1', 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef', } ], 'mac_addr': '2', 'net_id': '3', 'port_id': '4', 'port_state': '5', 'server_id': '6', 'tag': '7', } class TestServerInterface(base.TestCase): def test_basic(self): sot = server_interface.ServerInterface() self.assertEqual('interfaceAttachment', sot.resource_key) self.assertEqual('interfaceAttachments', sot.resources_key) self.assertEqual('/servers/%(server_id)s/os-interface', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = server_interface.ServerInterface(**EXAMPLE) self.assertEqual(EXAMPLE['fixed_ips'], sot.fixed_ips) self.assertEqual(EXAMPLE['mac_addr'], sot.mac_addr) self.assertEqual(EXAMPLE['net_id'], sot.net_id) self.assertEqual(EXAMPLE['port_id'], sot.port_id) self.assertEqual(EXAMPLE['port_state'], sot.port_state) self.assertEqual(EXAMPLE['server_id'], sot.server_id) self.assertEqual(EXAMPLE['tag'], sot.tag) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_server_ip.py0000664000175000017500000000742700000000000025763 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.compute.v2 import server_ip from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'addr': '1', 'network_label': '2', 'version': '4', } class TestServerIP(base.TestCase): def test_basic(self): sot = server_ip.ServerIP() self.assertEqual('addresses', sot.resources_key) self.assertEqual('/servers/%(server_id)s/ips', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = server_ip.ServerIP(**EXAMPLE) self.assertEqual(EXAMPLE['addr'], sot.address) self.assertEqual(EXAMPLE['network_label'], sot.network_label) self.assertEqual(EXAMPLE['version'], sot.version) def test_list(self): sess = mock.Mock() resp = mock.Mock() sess.get.return_value = resp resp.json.return_value = { "addresses": { "label1": [ {"version": 1, "addr": "a1"}, {"version": 2, "addr": "a2"}, ], "label2": [ {"version": 3, "addr": "a3"}, {"version": 4, "addr": "a4"}, ], } } ips = list(server_ip.ServerIP.list(sess, server_id=IDENTIFIER)) self.assertEqual(4, len(ips)) ips = sorted(ips, key=lambda ip: ip.version) self.assertIsInstance(ips[0], server_ip.ServerIP) self.assertEqual(ips[0].network_label, "label1") self.assertEqual(ips[0].address, "a1") self.assertEqual(ips[0].version, 1) self.assertIsInstance(ips[1], server_ip.ServerIP) self.assertEqual(ips[1].network_label, "label1") self.assertEqual(ips[1].address, "a2") self.assertEqual(ips[1].version, 2) self.assertIsInstance(ips[2], server_ip.ServerIP) self.assertEqual(ips[2].network_label, "label2") self.assertEqual(ips[2].address, "a3") self.assertEqual(ips[2].version, 3) self.assertIsInstance(ips[3], server_ip.ServerIP) self.assertEqual(ips[3].network_label, "label2") self.assertEqual(ips[3].address, "a4") self.assertEqual(ips[3].version, 4) def test_list_network_label(self): label = "label1" sess = mock.Mock() resp = mock.Mock() sess.get.return_value = resp resp.json.return_value = { label: [{"version": 1, "addr": "a1"}, {"version": 2, "addr": "a2"}] } ips = list( server_ip.ServerIP.list( sess, server_id=IDENTIFIER, network_label=label ) ) self.assertEqual(2, len(ips)) ips = sorted(ips, key=lambda ip: ip.version) self.assertIsInstance(ips[0], server_ip.ServerIP) self.assertEqual(ips[0].network_label, label) self.assertEqual(ips[0].address, "a1") self.assertEqual(ips[0].version, 1) self.assertIsInstance(ips[1], server_ip.ServerIP) self.assertEqual(ips[1].network_label, label) self.assertEqual(ips[1].address, "a2") self.assertEqual(ips[1].version, 2) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_server_migration.py0000664000175000017500000001055100000000000027334 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.compute.v2 import server_migration from openstack.tests.unit import base EXAMPLE = { 'id': 4, 'server_id': '4cfba335-03d8-49b2-8c52-e69043d1e8fe', 'server_uuid': '4cfba335-03d8-49b2-8c52-e69043d1e8fe', 'user_id': '8dbaa0f0-ab95-4ffe-8cb4-9c89d2ac9d24', 'project_id': '5f705771-3aa9-4f4c-8660-0d9522ffdbea', 'created_at': '2016-01-29T13:42:02.000000', 'updated_at': '2016-01-29T13:42:02.000000', 'status': 'migrating', 'source_compute': 'compute1', 'source_node': 'node1', 'dest_host': '1.2.3.4', 'dest_compute': 'compute2', 'dest_node': 'node2', 'memory_processed_bytes': 12345, 'memory_remaining_bytes': 111111, 'memory_total_bytes': 123456, 'disk_processed_bytes': 23456, 'disk_remaining_bytes': 211111, 'disk_total_bytes': 234567, } class TestServerMigration(base.TestCase): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) self.resp.status_code = 200 self.sess = mock.Mock() self.sess.post = mock.Mock(return_value=self.resp) def test_basic(self): sot = server_migration.ServerMigration() self.assertEqual('migration', sot.resource_key) self.assertEqual('migrations', sot.resources_key) self.assertEqual('/servers/%(server_id)s/migrations', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_list) self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) def test_make_it(self): sot = server_migration.ServerMigration(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) # FIXME(stephenfin): This conflicts since there is a server ID in the # URI *and* in the body. We need a field that handles both or we need # to use different names. # self.assertEqual(EXAMPLE['server_uuid'], sot.server_id) self.assertEqual(EXAMPLE['user_id'], sot.user_id) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['source_compute'], sot.source_compute) self.assertEqual(EXAMPLE['source_node'], sot.source_node) self.assertEqual(EXAMPLE['dest_host'], sot.dest_host) self.assertEqual(EXAMPLE['dest_compute'], sot.dest_compute) self.assertEqual(EXAMPLE['dest_node'], sot.dest_node) self.assertEqual( EXAMPLE['memory_processed_bytes'], sot.memory_processed_bytes, ) self.assertEqual( EXAMPLE['memory_remaining_bytes'], sot.memory_remaining_bytes, ) self.assertEqual(EXAMPLE['memory_total_bytes'], sot.memory_total_bytes) self.assertEqual( EXAMPLE['disk_processed_bytes'], sot.disk_processed_bytes, ) self.assertEqual( EXAMPLE['disk_remaining_bytes'], sot.disk_remaining_bytes, ) self.assertEqual(EXAMPLE['disk_total_bytes'], sot.disk_total_bytes) @mock.patch.object( server_migration.ServerMigration, '_get_session', lambda self, x: x, ) def test_force_complete(self): sot = server_migration.ServerMigration(**EXAMPLE) self.assertIsNone(sot.force_complete(self.sess)) url = 'servers/{}/migrations/{}/action'.format( EXAMPLE['server_id'], EXAMPLE['id'], ) body = {'force_complete': None} self.sess.post.assert_called_with( url, microversion=mock.ANY, json=body, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_server_remote_console.py0000664000175000017500000000440500000000000030361 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.compute.v2 import server_remote_console from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = {'protocol': 'rdp', 'type': 'rdp', 'url': 'fake'} class TestServerRemoteConsole(base.TestCase): def setUp(self): super().setUp() self.sess = mock.Mock(spec=adapter.Adapter) self.sess.default_microversion = '2.9' self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) self.resp.status_code = 200 self.sess = mock.Mock() self.sess.post = mock.Mock(return_value=self.resp) self.sess._get_connection = mock.Mock(return_value=self.cloud) def test_basic(self): sot = server_remote_console.ServerRemoteConsole() self.assertEqual('remote_console', sot.resource_key) self.assertEqual( '/servers/%(server_id)s/remote-consoles', sot.base_path ) self.assertTrue(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) def test_make_it(self): sot = server_remote_console.ServerRemoteConsole(**EXAMPLE) self.assertEqual(EXAMPLE['url'], sot.url) def test_create_type_mks_old(self): sot = server_remote_console.ServerRemoteConsole( server_id='fake_server', type='webmks' ) class FakeEndpointData: min_microversion = '2' max_microversion = '2.5' self.sess.get_endpoint_data.return_value = FakeEndpointData() self.assertRaises(ValueError, sot.create, self.sess) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_service.py0000664000175000017500000002002400000000000025411 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.compute.v2 import service from openstack import exceptions from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'binary': 'nova-compute', 'host': 'host1', 'status': 'enabled', 'state': 'up', 'zone': 'nova', } class TestService(base.TestCase): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = {'service': {}} self.resp.json = mock.Mock(return_value=self.resp.body) self.resp.status_code = 200 self.resp.headers = {} self.sess = mock.Mock() self.sess.put = mock.Mock(return_value=self.resp) self.sess.default_microversion = '2.1' def test_basic(self): sot = service.Service() self.assertEqual('service', sot.resource_key) self.assertEqual('services', sot.resources_key) self.assertEqual('/os-services', sot.base_path) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_list) self.assertFalse(sot.allow_fetch) self.assertDictEqual( { 'binary': 'binary', 'host': 'host', 'limit': 'limit', 'marker': 'marker', 'name': 'binary', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = service.Service(**EXAMPLE) self.assertEqual(EXAMPLE['host'], sot.host) self.assertEqual(EXAMPLE['binary'], sot.binary) self.assertEqual(EXAMPLE['binary'], sot.name) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['state'], sot.state) self.assertEqual(EXAMPLE['zone'], sot.availability_zone) self.assertEqual(EXAMPLE['id'], sot.id) def test_find_single_match(self): data = [ service.Service(name='bin1', host='host', id=1), service.Service(name='bin2', host='host', id=2), ] with mock.patch.object(service.Service, 'list') as list_mock: list_mock.return_value = data sot = service.Service.find( self.sess, 'bin1', ignore_missing=True, host='host' ) self.assertEqual(data[0], sot) def test_find_with_id_single_match(self): data = [ service.Service(name='bin1', host='host', id=1), service.Service(name='bin2', host='host', id='2'), ] with mock.patch.object(service.Service, 'list') as list_mock: list_mock.return_value = data sot = service.Service.find( self.sess, '2', ignore_missing=True, binary='bin1', host='host' ) self.assertEqual(data[1], sot) # Verify find when ID is int sot = service.Service.find( self.sess, 1, ignore_missing=True, binary='bin1', host='host' ) self.assertEqual(data[0], sot) def test_find_no_match(self): data = [ service.Service(name='bin1', host='host', id=1), service.Service(name='bin2', host='host', id=2), ] with mock.patch.object(service.Service, 'list') as list_mock: list_mock.return_value = data self.assertIsNone( service.Service.find( self.sess, 'fake', ignore_missing=True, host='host' ) ) def test_find_no_match_exception(self): data = [ service.Service(name='bin1', host='host', id=1), service.Service(name='bin2', host='host', id=2), ] with mock.patch.object(service.Service, 'list') as list_mock: list_mock.return_value = data self.assertRaises( exceptions.NotFoundException, service.Service.find, self.sess, 'fake', ignore_missing=False, host='host', ) def test_find_multiple_match(self): data = [ service.Service(name='bin1', host='host', id=1), service.Service(name='bin1', host='host', id=2), ] with mock.patch.object(service.Service, 'list') as list_mock: list_mock.return_value = data self.assertRaises( exceptions.DuplicateResource, service.Service.find, self.sess, 'bin1', ignore_missing=False, host='host', ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=False, ) def test_set_forced_down_before_211(self, mv_mock): sot = service.Service(**EXAMPLE) res = sot.set_forced_down(self.sess, 'host1', 'nova-compute', True) self.assertIsNotNone(res) url = 'os-services/force-down' body = { 'binary': 'nova-compute', 'host': 'host1', } self.sess.put.assert_called_with( url, json=body, microversion=self.sess.default_microversion ) @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=True, ) def test_set_forced_down_after_211(self, mv_mock): sot = service.Service(**EXAMPLE) res = sot.set_forced_down(self.sess, 'host1', 'nova-compute', True) self.assertIsNotNone(res) url = 'os-services/force-down' body = { 'binary': 'nova-compute', 'host': 'host1', 'forced_down': True, } self.sess.put.assert_called_with(url, json=body, microversion='2.11') @mock.patch( 'openstack.utils.supports_microversion', autospec=True, return_value=True, ) def test_set_forced_down_after_253(self, mv_mock): sot = service.Service(**EXAMPLE) res = sot.set_forced_down(self.sess, None, None, True) self.assertIsNotNone(res) url = 'os-services/force-down' body = { 'binary': sot.binary, 'host': sot.host, 'forced_down': True, } self.sess.put.assert_called_with(url, json=body, microversion='2.11') def test_enable(self): sot = service.Service(**EXAMPLE) res = sot.enable(self.sess, 'host1', 'nova-compute') self.assertIsNotNone(res) url = 'os-services/enable' body = { 'binary': 'nova-compute', 'host': 'host1', } self.sess.put.assert_called_with( url, json=body, microversion=self.sess.default_microversion ) def test_disable(self): sot = service.Service(**EXAMPLE) res = sot.disable(self.sess, 'host1', 'nova-compute') self.assertIsNotNone(res) url = 'os-services/disable' body = { 'binary': 'nova-compute', 'host': 'host1', } self.sess.put.assert_called_with( url, json=body, microversion=self.sess.default_microversion ) def test_disable_with_reason(self): sot = service.Service(**EXAMPLE) reason = 'fencing' res = sot.disable(self.sess, 'host1', 'nova-compute', reason=reason) self.assertIsNotNone(res) url = 'os-services/disable-log-reason' body = { 'binary': 'nova-compute', 'host': 'host1', 'disabled_reason': reason, } self.sess.put.assert_called_with( url, json=body, microversion=self.sess.default_microversion ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_usage.py0000664000175000017500000000727000000000000025065 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.compute.v2 import usage from openstack.tests.unit import base EXAMPLE = { "tenant_id": "781c9299e68d4b7c80ef52712889647f", "server_usages": [ { "hours": 79.51840531333333, "flavor": "m1.tiny", "instance_id": "76638c30-d199-4c2e-8154-7dea963bfe2f", "name": "test-server", "tenant_id": "781c9299e68d4b7c80ef52712889647f", "memory_mb": 512, "local_gb": 1, "vcpus": 1, "started_at": "2022-05-16T10:35:31.000000", "ended_at": None, "state": "active", "uptime": 286266, } ], "total_local_gb_usage": 79.51840531333333, "total_vcpus_usage": 79.51840531333333, "total_memory_mb_usage": 40713.423520426666, "total_hours": 79.51840531333333, "start": "2022-04-21T18:06:47.064959", "stop": "2022-05-19T18:06:37.259128", } class TestUsage(base.TestCase): def test_basic(self): sot = usage.Usage() self.assertEqual('tenant_usage', sot.resource_key) self.assertEqual('tenant_usages', sot.resources_key) self.assertEqual('/os-simple-tenant-usage', sot.base_path) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_list) def test_make_it(self): sot = usage.Usage(**EXAMPLE) self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) self.assertEqual( EXAMPLE['total_local_gb_usage'], sot.total_local_gb_usage, ) self.assertEqual(EXAMPLE['total_vcpus_usage'], sot.total_vcpus_usage) self.assertEqual( EXAMPLE['total_memory_mb_usage'], sot.total_memory_mb_usage, ) self.assertEqual(EXAMPLE['total_hours'], sot.total_hours) self.assertEqual(EXAMPLE['start'], sot.start) self.assertEqual(EXAMPLE['stop'], sot.stop) # now do the embedded objects self.assertIsInstance(sot.server_usages, list) self.assertEqual(1, len(sot.server_usages)) ssot = sot.server_usages[0] self.assertIsInstance(ssot, usage.ServerUsage) self.assertEqual(EXAMPLE['server_usages'][0]['hours'], ssot.hours) self.assertEqual(EXAMPLE['server_usages'][0]['flavor'], ssot.flavor) self.assertEqual( EXAMPLE['server_usages'][0]['instance_id'], ssot.instance_id ) self.assertEqual(EXAMPLE['server_usages'][0]['name'], ssot.name) self.assertEqual( EXAMPLE['server_usages'][0]['tenant_id'], ssot.project_id ) self.assertEqual( EXAMPLE['server_usages'][0]['memory_mb'], ssot.memory_mb ) self.assertEqual( EXAMPLE['server_usages'][0]['local_gb'], ssot.local_gb ) self.assertEqual(EXAMPLE['server_usages'][0]['vcpus'], ssot.vcpus) self.assertEqual( EXAMPLE['server_usages'][0]['started_at'], ssot.started_at ) self.assertEqual( EXAMPLE['server_usages'][0]['ended_at'], ssot.ended_at ) self.assertEqual(EXAMPLE['server_usages'][0]['state'], ssot.state) self.assertEqual(EXAMPLE['server_usages'][0]['uptime'], ssot.uptime) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/compute/v2/test_volume_attachment.py0000664000175000017500000000471000000000000027474 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.compute.v2 import volume_attachment from openstack.tests.unit import base EXAMPLE = { 'attachment_id': '979ce4f8-033a-409d-85e6-6b5c0f6a6302', 'delete_on_termination': False, 'device': '/dev/sdc', 'serverId': '7696780b-3f53-4688-ab25-019bfcbbd806', 'tag': 'foo', 'volumeId': 'a07f71dc-8151-4e7d-a0cc-cd24a3f11113', 'bdm_uuid': 'c088db45-92b8-49e8-81e2-a1b77a144b3b', } class TestServerInterface(base.TestCase): def test_basic(self): sot = volume_attachment.VolumeAttachment() self.assertEqual('volumeAttachment', sot.resource_key) self.assertEqual('volumeAttachments', sot.resources_key) self.assertEqual( '/servers/%(server_id)s/os-volume_attachments', sot.base_path, ) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( {"limit": "limit", "offset": "offset", "marker": "marker"}, sot._query_mapping._mapping, ) def test_make_it(self): sot = volume_attachment.VolumeAttachment(**EXAMPLE) self.assertEqual(EXAMPLE['volumeId'], sot.id) self.assertEqual(EXAMPLE['attachment_id'], sot.attachment_id) self.assertEqual( EXAMPLE['delete_on_termination'], sot.delete_on_termination, ) self.assertEqual(EXAMPLE['device'], sot.device) # FIXME(stephenfin): This conflicts since there is a server ID in the # URI *and* in the body. We need a field that handles both or we need # to use different names. # self.assertEqual(EXAMPLE['serverId'], sot.server_id) self.assertEqual(EXAMPLE['tag'], sot.tag) self.assertEqual(EXAMPLE['volumeId'], sot.volume_id) self.assertEqual(EXAMPLE['bdm_uuid'], sot.bdm_id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4374058 openstacksdk-4.0.0/openstack/tests/unit/config/0000775000175000017500000000000000000000000021604 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/config/__init__.py0000664000175000017500000000000000000000000023703 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/config/base.py0000664000175000017500000002154500000000000023077 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os import tempfile import fixtures import yaml from openstack.config import cloud_region from openstack.tests.unit import base VENDOR_CONF = { 'public-clouds': { '_test_cloud_in_our_cloud': { 'auth': { 'auth_url': 'http://example.com/v2', 'username': 'testotheruser', 'project_name': 'testproject', }, }, } } USER_CONF = { 'cache': { 'max_age': '1', 'expiration': { 'server': 5, 'image': '7', }, }, 'client': { 'force_ipv4': True, }, 'metrics': { 'statsd': {'host': '127.0.0.1', 'port': '1234'}, 'influxdb': { 'host': '127.0.0.1', 'port': '1234', 'use_udp': True, 'username': 'username', 'password': 'password', 'database': 'database', 'measurement': 'measurement.name', 'timeout': 10, }, }, 'clouds': { '_test-cloud_': { 'profile': '_test_cloud_in_our_cloud', 'auth': { 'auth_url': 'http://example.com/v2', 'username': 'testuser', 'password': 'testpass', }, 'region_name': 'test-region', }, '_test_cloud_no_vendor': { 'profile': '_test_non_existant_cloud', 'auth': { 'auth_url': 'http://example.com/v2', 'username': 'testuser', 'project_name': 'testproject', }, 'region-name': 'test-region', }, '_test-cloud-int-project_': { 'auth': { 'username': 'testuser', 'password': 'testpass', 'domain_id': 'awesome-domain', 'project_id': 12345, 'auth_url': 'http://example.com/v2', }, 'region_name': 'test-region', }, '_test-cloud-domain-id_': { 'auth': { 'username': 'testuser', 'password': 'testpass', 'project_id': 12345, 'auth_url': 'http://example.com/v2', 'domain_id': '6789', 'project_domain_id': '123456789', }, 'region_name': 'test-region', }, '_test-cloud-networks_': { 'auth': { 'username': 'testuser', 'password': 'testpass', 'project_id': 12345, 'auth_url': 'http://example.com/v2', 'domain_id': '6789', 'project_domain_id': '123456789', }, 'networks': [ { 'name': 'a-public', 'routes_externally': True, 'nat_source': True, }, { 'name': 'another-public', 'routes_externally': True, 'default_interface': True, }, { 'name': 'a-private', 'routes_externally': False, }, { 'name': 'another-private', 'routes_externally': False, 'nat_destination': True, }, { 'name': 'split-default', 'routes_externally': True, 'routes_ipv4_externally': False, }, { 'name': 'split-no-default', 'routes_ipv6_externally': False, 'routes_ipv4_externally': True, }, ], 'region_name': 'test-region', }, '_test_cloud_regions': { 'auth': { 'username': 'testuser', 'password': 'testpass', 'project-id': 'testproject', 'auth_url': 'http://example.com/v2', }, 'regions': [ { 'name': 'region1', 'values': { 'external_network': 'region1-network', }, }, { 'name': 'region2', 'values': { 'external_network': 'my-network', }, }, { 'name': 'region-no-value', }, ], }, '_test_cloud_hyphenated': { 'auth': { 'username': 'testuser', 'password': 'testpass', 'project-id': '12345', 'auth_url': 'http://example.com/v2', }, 'region_name': 'test-region', }, '_test-cloud_no_region': { 'profile': '_test_cloud_in_our_cloud', 'auth': { 'auth_url': 'http://example.com/v2', 'username': 'testuser', 'password': 'testpass', }, }, '_test-cloud-domain-scoped_': { 'auth': { 'auth_url': 'http://example.com/v2', 'username': 'testuser', 'password': 'testpass', 'domain-id': '12345', }, }, '_test-cloud-override-metrics': { 'auth': { 'auth_url': 'http://example.com/v2', 'username': 'testuser', 'password': 'testpass', }, 'metrics': { 'statsd': { 'host': '127.0.0.1', 'port': 4321, 'prefix': 'statsd.override.prefix', }, 'influxdb': { 'username': 'override-username', 'password': 'override-password', 'database': 'override-database', }, }, }, }, 'ansible': { 'expand-hostvars': False, 'use_hostnames': True, }, } SECURE_CONF = { 'clouds': { '_test_cloud_no_vendor': { 'auth': { 'password': 'testpass', }, } } } NO_CONF = { 'cache': {'max_age': 1}, } def _write_yaml(obj): # Assume NestedTempfile so we don't have to cleanup with tempfile.NamedTemporaryFile(delete=False, suffix='.yaml') as obj_yaml: obj_yaml.write(yaml.safe_dump(obj).encode('utf-8')) return obj_yaml.name class TestCase(base.TestCase): """Test case base class for all unit tests.""" def setUp(self): super().setUp() conf = copy.deepcopy(USER_CONF) tdir = self.useFixture(fixtures.TempDir()) conf['cache']['path'] = tdir.path self.cloud_yaml = _write_yaml(conf) self.secure_yaml = _write_yaml(SECURE_CONF) self.vendor_yaml = _write_yaml(VENDOR_CONF) self.no_yaml = _write_yaml(NO_CONF) # Isolate the test runs from the environment # Do this as two loops because you can't modify the dict in a loop # over the dict in 3.4 keys_to_isolate = [] for env in os.environ.keys(): if env.startswith('OS_'): keys_to_isolate.append(env) for env in keys_to_isolate: self.useFixture(fixtures.EnvironmentVariable(env)) def _assert_cloud_details(self, cc): self.assertIsInstance(cc, cloud_region.CloudRegion) self.assertTrue(hasattr(cc, 'auth')) self.assertIsInstance(cc.auth, dict) self.assertIsNone(cc.cloud) self.assertIn('username', cc.auth) self.assertEqual('testuser', cc.auth['username']) self.assertEqual('testpass', cc.auth['password']) self.assertFalse(cc.config['image_api_use_tasks']) self.assertTrue('project_name' in cc.auth or 'project_id' in cc.auth) if 'project_name' in cc.auth: self.assertEqual('testproject', cc.auth['project_name']) elif 'project_id' in cc.auth: self.assertEqual('testproject', cc.auth['project_id']) self.assertEqual(cc.get_cache_expiration_time(), 1) self.assertEqual(cc.get_cache_resource_expiration('server'), 5.0) self.assertEqual(cc.get_cache_resource_expiration('image'), 7.0) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/config/test_cloud_config.py0000664000175000017500000004304600000000000025657 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from keystoneauth1 import exceptions as ksa_exceptions from keystoneauth1 import session as ksa_session from openstack.config import cloud_region from openstack.config import defaults from openstack import exceptions from openstack.tests.unit.config import base from openstack import version as openstack_version fake_config_dict = {'a': 1, 'os_b': 2, 'c': 3, 'os_c': 4} fake_services_dict = { 'compute_api_version': '2', 'compute_endpoint_override': 'http://compute.example.com', 'telemetry_endpoint': 'http://telemetry.example.com', 'interface': 'public', 'image_service_type': 'mage', 'identity_interface': 'admin', 'identity_service_name': 'locks', 'volume_api_version': '1', 'auth': {'password': 'hunter2', 'username': 'AzureDiamond'}, 'connect_retries': 1, 'baremetal_status_code_retries': 5, 'baremetal_connect_retries': 3, } class TestCloudRegion(base.TestCase): def test_arbitrary_attributes(self): cc = cloud_region.CloudRegion("test1", "region-al", fake_config_dict) self.assertEqual("test1", cc.name) self.assertEqual("region-al", cc.region_name) # Look up straight value self.assertEqual("1", cc.a) # Look up prefixed attribute, fail - returns None self.assertIsNone(cc.os_b) # Look up straight value, then prefixed value self.assertEqual("3", cc.c) self.assertEqual("3", cc.os_c) # Lookup mystery attribute self.assertIsNone(cc.x) # Test default ipv6 self.assertFalse(cc.force_ipv4) def test_iteration(self): cc = cloud_region.CloudRegion("test1", "region-al", fake_config_dict) self.assertIn('a', cc) self.assertNotIn('x', cc) def test_equality(self): cc1 = cloud_region.CloudRegion("test1", "region-al", fake_config_dict) cc2 = cloud_region.CloudRegion("test1", "region-al", fake_config_dict) self.assertEqual(cc1, cc2) def test_inequality(self): cc1 = cloud_region.CloudRegion("test1", "region-al", fake_config_dict) cc2 = cloud_region.CloudRegion("test2", "region-al", fake_config_dict) self.assertNotEqual(cc1, cc2) cc2 = cloud_region.CloudRegion("test1", "region-xx", fake_config_dict) self.assertNotEqual(cc1, cc2) cc2 = cloud_region.CloudRegion("test1", "region-al", {}) self.assertNotEqual(cc1, cc2) def test_get_config(self): cc = cloud_region.CloudRegion("test1", "region-al", fake_services_dict) self.assertIsNone(cc._get_config('nothing', None)) # This is what is happening behind the scenes in get_default_interface. self.assertEqual( fake_services_dict['interface'], cc._get_config('interface', None) ) # The same call as above, but from one step up the stack self.assertEqual(fake_services_dict['interface'], cc.get_interface()) # Which finally is what is called to populate the below self.assertEqual('public', self.cloud.default_interface) def test_verify(self): config_dict = copy.deepcopy(fake_config_dict) config_dict['cacert'] = None config_dict['verify'] = False cc = cloud_region.CloudRegion("test1", "region-xx", config_dict) (verify, cert) = cc.get_requests_verify_args() self.assertFalse(verify) config_dict['verify'] = True cc = cloud_region.CloudRegion("test1", "region-xx", config_dict) (verify, cert) = cc.get_requests_verify_args() self.assertTrue(verify) config_dict['insecure'] = True cc = cloud_region.CloudRegion("test1", "region-xx", config_dict) (verify, cert) = cc.get_requests_verify_args() self.assertFalse(verify) def test_verify_cacert(self): config_dict = copy.deepcopy(fake_config_dict) config_dict['cacert'] = "certfile" config_dict['verify'] = False cc = cloud_region.CloudRegion("test1", "region-xx", config_dict) (verify, cert) = cc.get_requests_verify_args() self.assertFalse(verify) config_dict['verify'] = True cc = cloud_region.CloudRegion("test1", "region-xx", config_dict) (verify, cert) = cc.get_requests_verify_args() self.assertEqual("certfile", verify) config_dict['insecure'] = True cc = cloud_region.CloudRegion("test1", "region-xx", config_dict) (verify, cert) = cc.get_requests_verify_args() self.assertEqual(False, verify) def test_cert_with_key(self): config_dict = copy.deepcopy(fake_config_dict) config_dict['cacert'] = None config_dict['verify'] = False config_dict['cert'] = 'cert' config_dict['key'] = 'key' cc = cloud_region.CloudRegion("test1", "region-xx", config_dict) (verify, cert) = cc.get_requests_verify_args() self.assertEqual(("cert", "key"), cert) def test_ipv6(self): cc = cloud_region.CloudRegion( "test1", "region-al", fake_config_dict, force_ipv4=True ) self.assertTrue(cc.force_ipv4) def test_getters(self): cc = cloud_region.CloudRegion("test1", "region-al", fake_services_dict) self.assertEqual( ['compute', 'identity', 'image', 'volume'], sorted(cc.get_services()), ) self.assertEqual( {'password': 'hunter2', 'username': 'AzureDiamond'}, cc.get_auth_args(), ) self.assertEqual('public', cc.get_interface()) self.assertEqual('public', cc.get_interface('compute')) self.assertEqual('admin', cc.get_interface('identity')) self.assertEqual('region-al', cc.region_name) self.assertIsNone(cc.get_api_version('image')) self.assertEqual('2', cc.get_api_version('compute')) self.assertEqual('mage', cc.get_service_type('image')) self.assertEqual('compute', cc.get_service_type('compute')) self.assertEqual('1', cc.get_api_version('volume')) self.assertEqual('block-storage', cc.get_service_type('volume')) self.assertEqual( 'http://compute.example.com', cc.get_endpoint('compute') ) self.assertIsNone(cc.get_endpoint('image')) self.assertIsNone(cc.get_service_name('compute')) self.assertEqual('locks', cc.get_service_name('identity')) self.assertIsNone(cc.get_status_code_retries('compute')) self.assertEqual(5, cc.get_status_code_retries('baremetal')) self.assertEqual(1, cc.get_connect_retries('compute')) self.assertEqual(3, cc.get_connect_retries('baremetal')) def test_rackspace_workaround(self): # We're skipping loader here, so we have to expand relevant # parts from the rackspace profile. The thing we're testing # is that the project_id logic works. cc = cloud_region.CloudRegion( "test1", "DFW", { 'profile': 'rackspace', 'region_name': 'DFW', 'auth': {'project_id': '123456'}, 'block_storage_endpoint_override': 'https://example.com/v2/', }, ) self.assertEqual( 'https://example.com/v2/123456', cc.get_endpoint('block-storage') ) def test_rackspace_workaround_only_rax(self): cc = cloud_region.CloudRegion( "test1", "DFW", { 'region_name': 'DFW', 'auth': {'project_id': '123456'}, 'block_storage_endpoint_override': 'https://example.com/v2/', }, ) self.assertEqual( 'https://example.com/v2/', cc.get_endpoint('block-storage') ) def test_get_region_name(self): def assert_region_name(default, compute): self.assertEqual(default, cc.region_name) self.assertEqual(default, cc.get_region_name()) self.assertEqual(default, cc.get_region_name(service_type=None)) self.assertEqual( compute, cc.get_region_name(service_type='compute') ) self.assertEqual( default, cc.get_region_name(service_type='placement') ) # No region_name kwarg, no regions specified in services dict # (including the default). cc = cloud_region.CloudRegion(config=fake_services_dict) assert_region_name(None, None) # Only region_name kwarg; it's returned for everything cc = cloud_region.CloudRegion( region_name='foo', config=fake_services_dict ) assert_region_name('foo', 'foo') # No region_name kwarg; values (including default) show through from # config dict services_dict = dict( fake_services_dict, region_name='the-default', compute_region_name='compute-region', ) cc = cloud_region.CloudRegion(config=services_dict) assert_region_name('the-default', 'compute-region') # region_name kwarg overrides config dict default (for backward # compatibility), but service-specific region_name takes precedence. services_dict = dict( fake_services_dict, region_name='dict', compute_region_name='compute-region', ) cc = cloud_region.CloudRegion( region_name='kwarg', config=services_dict ) assert_region_name('kwarg', 'compute-region') def test_aliases(self): services_dict = fake_services_dict.copy() services_dict['volume_api_version'] = 12 services_dict['alarming_service_name'] = 'aodh' cc = cloud_region.CloudRegion("test1", "region-al", services_dict) self.assertEqual('12', cc.get_api_version('volume')) self.assertEqual('12', cc.get_api_version('block-storage')) self.assertEqual('aodh', cc.get_service_name('alarm')) self.assertEqual('aodh', cc.get_service_name('alarming')) def test_no_override(self): """Test no override happens when defaults are not configured""" cc = cloud_region.CloudRegion("test1", "region-al", fake_services_dict) self.assertEqual('block-storage', cc.get_service_type('volume')) self.assertEqual('workflow', cc.get_service_type('workflow')) self.assertEqual('not-exist', cc.get_service_type('not-exist')) def test_get_session_no_auth(self): config_dict = defaults.get_defaults() config_dict.update(fake_services_dict) cc = cloud_region.CloudRegion("test1", "region-al", config_dict) self.assertRaises(exceptions.ConfigException, cc.get_session) @mock.patch.object(ksa_session, 'Session') def test_get_session(self, mock_session): config_dict = defaults.get_defaults() config_dict.update(fake_services_dict) fake_session = mock.Mock() fake_session.additional_user_agent = [] mock_session.return_value = fake_session cc = cloud_region.CloudRegion( "test1", "region-al", config_dict, auth_plugin=mock.Mock() ) cc.get_session() mock_session.assert_called_with( auth=mock.ANY, verify=True, cert=None, timeout=None, collect_timing=None, discovery_cache=None, ) self.assertEqual( fake_session.additional_user_agent, [('openstacksdk', openstack_version.__version__)], ) @mock.patch.object(ksa_session, 'Session') def test_get_session_with_app_name(self, mock_session): config_dict = defaults.get_defaults() config_dict.update(fake_services_dict) fake_session = mock.Mock() fake_session.additional_user_agent = [] fake_session.app_name = None fake_session.app_version = None mock_session.return_value = fake_session cc = cloud_region.CloudRegion( "test1", "region-al", config_dict, auth_plugin=mock.Mock(), app_name="test_app", app_version="test_version", ) cc.get_session() mock_session.assert_called_with( auth=mock.ANY, verify=True, cert=None, timeout=None, collect_timing=None, discovery_cache=None, ) self.assertEqual(fake_session.app_name, "test_app") self.assertEqual(fake_session.app_version, "test_version") self.assertEqual( fake_session.additional_user_agent, [('openstacksdk', openstack_version.__version__)], ) @mock.patch.object(ksa_session, 'Session') def test_get_session_with_timeout(self, mock_session): fake_session = mock.Mock() fake_session.additional_user_agent = [] mock_session.return_value = fake_session config_dict = defaults.get_defaults() config_dict.update(fake_services_dict) config_dict['api_timeout'] = 9 cc = cloud_region.CloudRegion( "test1", "region-al", config_dict, auth_plugin=mock.Mock() ) cc.get_session() mock_session.assert_called_with( auth=mock.ANY, verify=True, cert=None, timeout=9, collect_timing=None, discovery_cache=None, ) self.assertEqual( fake_session.additional_user_agent, [('openstacksdk', openstack_version.__version__)], ) @mock.patch.object(ksa_session, 'Session') def test_get_session_with_timing(self, mock_session): fake_session = mock.Mock() fake_session.additional_user_agent = [] mock_session.return_value = fake_session config_dict = defaults.get_defaults() config_dict.update(fake_services_dict) config_dict['timing'] = True cc = cloud_region.CloudRegion( "test1", "region-al", config_dict, auth_plugin=mock.Mock() ) cc.get_session() mock_session.assert_called_with( auth=mock.ANY, verify=True, cert=None, timeout=None, collect_timing=True, discovery_cache=None, ) self.assertEqual( fake_session.additional_user_agent, [('openstacksdk', openstack_version.__version__)], ) @mock.patch.object(ksa_session, 'Session') def test_override_session_endpoint_override(self, mock_session): config_dict = defaults.get_defaults() config_dict.update(fake_services_dict) cc = cloud_region.CloudRegion( "test1", "region-al", config_dict, auth_plugin=mock.Mock() ) self.assertEqual( cc.get_session_endpoint('compute'), fake_services_dict['compute_endpoint_override'], ) @mock.patch.object(ksa_session, 'Session') def test_override_session_endpoint(self, mock_session): config_dict = defaults.get_defaults() config_dict.update(fake_services_dict) cc = cloud_region.CloudRegion( "test1", "region-al", config_dict, auth_plugin=mock.Mock() ) self.assertEqual( cc.get_session_endpoint('telemetry'), fake_services_dict['telemetry_endpoint'], ) @mock.patch.object(cloud_region.CloudRegion, 'get_session') def test_session_endpoint(self, mock_get_session): mock_session = mock.Mock() mock_get_session.return_value = mock_session config_dict = defaults.get_defaults() config_dict.update(fake_services_dict) cc = cloud_region.CloudRegion( "test1", "region-al", config_dict, auth_plugin=mock.Mock() ) cc.get_session_endpoint('orchestration') mock_session.get_endpoint.assert_called_with( interface='public', service_name=None, region_name='region-al', service_type='orchestration', ) @mock.patch.object(cloud_region.CloudRegion, 'get_session') def test_session_endpoint_not_found(self, mock_get_session): exc_to_raise = ksa_exceptions.catalog.EndpointNotFound mock_get_session.return_value.get_endpoint.side_effect = exc_to_raise cc = cloud_region.CloudRegion( "test1", "region-al", {}, auth_plugin=mock.Mock() ) self.assertIsNone(cc.get_session_endpoint('notfound')) def test_get_endpoint_from_catalog(self): dns_override = 'https://override.dns.example.com' self.cloud.config.config['dns_endpoint_override'] = dns_override self.assertEqual( 'https://compute.example.com/v2.1/', self.cloud.config.get_endpoint_from_catalog('compute'), ) self.assertEqual( 'https://internal.compute.example.com/v2.1/', self.cloud.config.get_endpoint_from_catalog( 'compute', interface='internal' ), ) self.assertIsNone( self.cloud.config.get_endpoint_from_catalog( 'compute', region_name='unknown-region' ) ) self.assertEqual( 'https://dns.example.com', self.cloud.config.get_endpoint_from_catalog('dns'), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/config/test_config.py0000664000175000017500000015753500000000000024502 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse import copy import os from unittest import mock import fixtures import testtools import yaml from openstack import config from openstack.config import cloud_region from openstack.config import defaults from openstack import exceptions from openstack.tests.unit.config import base def prompt_for_password(prompt=None): """Fake prompt function that just returns a constant string""" return 'promptpass' class TestConfig(base.TestCase): def test_get_all(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], secure_files=[self.no_yaml], ) clouds = c.get_all() # We add two by hand because the regions cloud is going to exist # thrice since it has three regions in it user_clouds = [cloud for cloud in base.USER_CONF['clouds'].keys()] + [ '_test_cloud_regions', '_test_cloud_regions', ] configured_clouds = [cloud.name for cloud in clouds] self.assertCountEqual(user_clouds, configured_clouds) def test_get_all_clouds(self): # Ensure the alias is in place c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], secure_files=[self.no_yaml], ) clouds = c.get_all_clouds() # We add two by hand because the regions cloud is going to exist # thrice since it has three regions in it user_clouds = [cloud for cloud in base.USER_CONF['clouds'].keys()] + [ '_test_cloud_regions', '_test_cloud_regions', ] configured_clouds = [cloud.name for cloud in clouds] self.assertCountEqual(user_clouds, configured_clouds) def test_get_one(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cloud = c.get_one(validate=False) self.assertIsInstance(cloud, cloud_region.CloudRegion) self.assertEqual(cloud.name, '') def test_get_one_cloud(self): # Ensure the alias is in place c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cloud = c.get_one_cloud(validate=False) self.assertIsInstance(cloud, cloud_region.CloudRegion) self.assertEqual(cloud.name, '') def test_get_one_default_cloud_from_file(self): single_conf = base._write_yaml( { 'clouds': { 'single': { 'auth': { 'auth_url': 'http://example.com/v2', 'username': 'testuser', 'password': 'testpass', 'project_name': 'testproject', }, 'region_name': 'test-region', } } } ) c = config.OpenStackConfig( config_files=[single_conf], secure_files=[], vendor_files=[self.vendor_yaml], ) cc = c.get_one() self.assertEqual(cc.name, 'single') def test_remote_profile(self): single_conf = base._write_yaml( { 'clouds': { 'remote': { 'profile': 'https://example.com', 'auth': { 'username': 'testuser', 'password': 'testpass', 'project_name': 'testproject', }, 'region_name': 'test-region', } } } ) self.register_uris( [ dict( method='GET', uri='https://example.com/.well-known/openstack/api', json={ "name": "example", "profile": { "auth": { "auth_url": "https://auth.example.com/v3", } }, }, ), ] ) c = config.OpenStackConfig(config_files=[single_conf]) cc = c.get_one(cloud='remote') self.assertEqual(cc.name, 'remote') self.assertEqual(cc.auth['auth_url'], 'https://auth.example.com/v3') self.assertEqual(cc.auth['username'], 'testuser') def test_get_one_auth_defaults(self): c = config.OpenStackConfig(config_files=[self.cloud_yaml]) cc = c.get_one(cloud='_test-cloud_', auth={'username': 'user'}) self.assertEqual('user', cc.auth['username']) self.assertEqual( defaults._defaults['auth_type'], cc.auth_type, ) def test_get_one_with_config_files(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], secure_files=[self.secure_yaml], ) self.assertIsInstance(c.cloud_config, dict) self.assertIn('cache', c.cloud_config) self.assertIsInstance(c.cloud_config['cache'], dict) self.assertIn('max_age', c.cloud_config['cache']) self.assertIn('path', c.cloud_config['cache']) cc = c.get_one('_test-cloud_') self._assert_cloud_details(cc) cc = c.get_one('_test_cloud_no_vendor') self._assert_cloud_details(cc) def test_get_one_with_int_project_id(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one('_test-cloud-int-project_') self.assertEqual('12345', cc.auth['project_id']) def test_get_one_with_domain_id(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one('_test-cloud-domain-id_') self.assertEqual('6789', cc.auth['user_domain_id']) self.assertEqual('123456789', cc.auth['project_domain_id']) self.assertNotIn('domain_id', cc.auth) self.assertNotIn('domain-id', cc.auth) self.assertNotIn('domain_id', cc) def test_get_one_unscoped_identity(self): single_conf = base._write_yaml( { 'clouds': { 'unscoped': { 'auth': { 'auth_url': 'http://example.com/v2', 'username': 'testuser', 'password': 'testpass', }, } } } ) c = config.OpenStackConfig( config_files=[single_conf], secure_files=[], vendor_files=[self.vendor_yaml], ) cc = c.get_one() self.assertEqual('http://example.com/v2', cc.get_endpoint('identity')) def test_get_one_domain_scoped(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one('_test-cloud-domain-scoped_') self.assertEqual('12345', cc.auth['domain_id']) self.assertNotIn('user_domain_id', cc.auth) self.assertNotIn('project_domain_id', cc.auth) self.assertIsNone(cc.get_endpoint('identity')) def test_get_one_infer_user_domain(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one('_test-cloud-int-project_') self.assertEqual('awesome-domain', cc.auth['user_domain_id']) self.assertEqual('awesome-domain', cc.auth['project_domain_id']) self.assertNotIn('domain_id', cc.auth) self.assertNotIn('domain_id', cc) def test_get_one_infer_passcode(self): single_conf = base._write_yaml( { 'clouds': { 'mfa': { 'auth_type': 'v3multifactor', 'auth_methods': ['v3password', 'v3totp'], 'auth': { 'auth_url': 'fake_url', 'username': 'testuser', 'password': 'testpass', 'project_name': 'testproject', 'project_domain_name': 'projectdomain', 'user_domain_name': 'udn', }, 'region_name': 'test-region', } } } ) c = config.OpenStackConfig(config_files=[single_conf]) cc = c.get_one(cloud='mfa', passcode='123') self.assertEqual('123', cc.auth['passcode']) def test_get_one_with_hyphenated_project_id(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one('_test_cloud_hyphenated') self.assertEqual('12345', cc.auth['project_id']) def test_get_one_with_hyphenated_kwargs(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) args = { 'auth': { 'username': 'testuser', 'password': 'testpass', 'project-id': '12345', 'auth-url': 'http://example.com/v2', }, 'region_name': 'test-region', } cc = c.get_one(**args) self.assertEqual('http://example.com/v2', cc.auth['auth_url']) def test_no_environ(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) self.assertRaises(exceptions.ConfigException, c.get_one, 'envvars') def test_fallthrough(self): c = config.OpenStackConfig( config_files=[self.no_yaml], vendor_files=[self.no_yaml], secure_files=[self.no_yaml], ) for k in os.environ.keys(): if k.startswith('OS_'): self.useFixture(fixtures.EnvironmentVariable(k)) c.get_one(cloud='defaults', validate=False) def test_prefer_ipv6_true(self): c = config.OpenStackConfig( config_files=[self.no_yaml], vendor_files=[self.no_yaml], secure_files=[self.no_yaml], ) cc = c.get_one(cloud='defaults', validate=False) self.assertTrue(cc.prefer_ipv6) def test_prefer_ipv6_false(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one(cloud='_test-cloud_') self.assertFalse(cc.prefer_ipv6) def test_force_ipv4_true(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one(cloud='_test-cloud_') self.assertTrue(cc.force_ipv4) def test_force_ipv4_false(self): c = config.OpenStackConfig( config_files=[self.no_yaml], vendor_files=[self.no_yaml], secure_files=[self.no_yaml], ) cc = c.get_one(cloud='defaults', validate=False) self.assertFalse(cc.force_ipv4) def test_get_one_auth_merge(self): c = config.OpenStackConfig(config_files=[self.cloud_yaml]) cc = c.get_one(cloud='_test-cloud_', auth={'username': 'user'}) self.assertEqual('user', cc.auth['username']) self.assertEqual('testpass', cc.auth['password']) def test_get_one_networks(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one('_test-cloud-networks_') self.assertEqual( ['a-public', 'another-public', 'split-default'], cc.get_external_networks(), ) self.assertEqual( ['a-private', 'another-private', 'split-no-default'], cc.get_internal_networks(), ) self.assertEqual('a-public', cc.get_nat_source()) self.assertEqual('another-private', cc.get_nat_destination()) self.assertEqual('another-public', cc.get_default_network()) self.assertEqual( ['a-public', 'another-public', 'split-no-default'], cc.get_external_ipv4_networks(), ) self.assertEqual( ['a-public', 'another-public', 'split-default'], cc.get_external_ipv6_networks(), ) def test_get_one_no_networks(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one('_test-cloud-domain-scoped_') self.assertEqual([], cc.get_external_networks()) self.assertEqual([], cc.get_internal_networks()) self.assertIsNone(cc.get_nat_source()) self.assertIsNone(cc.get_nat_destination()) self.assertIsNone(cc.get_default_network()) def test_only_secure_yaml(self): c = config.OpenStackConfig( config_files=['nonexistent'], vendor_files=['nonexistent'], secure_files=[self.secure_yaml], ) cc = c.get_one(cloud='_test_cloud_no_vendor', validate=False) self.assertEqual('testpass', cc.auth['password']) def test_get_cloud_names(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], secure_files=[self.no_yaml] ) self.assertCountEqual( [ '_test-cloud-domain-id_', '_test-cloud-domain-scoped_', '_test-cloud-int-project_', '_test-cloud-networks_', '_test-cloud_', '_test-cloud_no_region', '_test_cloud_hyphenated', '_test_cloud_no_vendor', '_test_cloud_regions', '_test-cloud-override-metrics', ], c.get_cloud_names(), ) c = config.OpenStackConfig( config_files=[self.no_yaml], vendor_files=[self.no_yaml], secure_files=[self.no_yaml], ) for k in os.environ.keys(): if k.startswith('OS_'): self.useFixture(fixtures.EnvironmentVariable(k)) c.get_one(cloud='defaults', validate=False) self.assertEqual(['defaults'], sorted(c.get_cloud_names())) def test_set_one_cloud_creates_file(self): config_dir = fixtures.TempDir() self.useFixture(config_dir) config_path = os.path.join(config_dir.path, 'clouds.yaml') config.OpenStackConfig.set_one_cloud(config_path, '_test_cloud_') self.assertTrue(os.path.isfile(config_path)) with open(config_path) as fh: self.assertEqual( {'clouds': {'_test_cloud_': {}}}, yaml.safe_load(fh) ) def test_set_one_cloud_updates_cloud(self): new_config = {'cloud': 'new_cloud', 'auth': {'password': 'newpass'}} resulting_cloud_config = { 'auth': { 'password': 'newpass', 'username': 'testuser', 'auth_url': 'http://example.com/v2', }, 'cloud': 'new_cloud', 'profile': '_test_cloud_in_our_cloud', 'region_name': 'test-region', } resulting_config = copy.deepcopy(base.USER_CONF) resulting_config['clouds']['_test-cloud_'] = resulting_cloud_config config.OpenStackConfig.set_one_cloud( self.cloud_yaml, '_test-cloud_', new_config ) with open(self.cloud_yaml) as fh: written_config = yaml.safe_load(fh) # We write a cache config for testing written_config['cache'].pop('path', None) self.assertEqual(written_config, resulting_config) def test_get_region_no_region_default(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], secure_files=[self.no_yaml], ) region = c._get_region(cloud='_test-cloud_no_region') self.assertEqual(region, {'name': '', 'values': {}}) def test_get_region_no_region(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], secure_files=[self.no_yaml], ) region = c._get_region( cloud='_test-cloud_no_region', region_name='override-region' ) self.assertEqual(region, {'name': 'override-region', 'values': {}}) def test_get_region_region_is_none(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], secure_files=[self.no_yaml], ) region = c._get_region(cloud='_test-cloud_no_region', region_name=None) self.assertEqual(region, {'name': '', 'values': {}}) def test_get_region_region_set(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], secure_files=[self.no_yaml], ) region = c._get_region(cloud='_test-cloud_', region_name='test-region') self.assertEqual(region, {'name': 'test-region', 'values': {}}) def test_get_region_many_regions_default(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], secure_files=[self.no_yaml], ) region = c._get_region(cloud='_test_cloud_regions', region_name='') self.assertEqual( region, { 'name': 'region1', 'values': {'external_network': 'region1-network'}, }, ) def test_get_region_many_regions(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], secure_files=[self.no_yaml], ) region = c._get_region( cloud='_test_cloud_regions', region_name='region2' ) self.assertEqual( region, {'name': 'region2', 'values': {'external_network': 'my-network'}}, ) def test_get_region_by_name_no_value(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) region = c._get_region( cloud='_test_cloud_regions', region_name='region-no-value' ) self.assertEqual(region, {'name': 'region-no-value', 'values': {}}) def test_get_region_invalid_region(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], secure_files=[self.no_yaml], ) self.assertRaises( exceptions.ConfigException, c._get_region, cloud='_test_cloud_regions', region_name='invalid-region', ) def test_get_region_no_cloud(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], secure_files=[self.no_yaml], ) region = c._get_region(region_name='no-cloud-region') self.assertEqual(region, {'name': 'no-cloud-region', 'values': {}}) def test_get_region_invalid_keys(self): invalid_conf = base._write_yaml( { 'clouds': { '_test_cloud': { 'profile': '_test_cloud_in_our_cloud', 'auth': { 'auth_url': 'http://example.com/v2', 'username': 'testuser', 'password': 'testpass', }, 'regions': [ {'name': 'region1', 'foo': 'bar'}, ], } } } ) c = config.OpenStackConfig( config_files=[invalid_conf], vendor_files=[self.vendor_yaml] ) self.assertRaises( exceptions.ConfigException, c._get_region, cloud='_test_cloud', region_name='region1', ) @mock.patch('openstack.config.cloud_region.keyring') @mock.patch( 'keystoneauth1.identity.base.BaseIdentityPlugin.set_auth_state' ) def test_load_auth_cache_not_found(self, ks_mock, kr_mock): c = config.OpenStackConfig( config_files=[self.cloud_yaml], secure_files=[] ) c._cache_auth = True kr_mock.get_password = mock.Mock(side_effect=[RuntimeError]) region = c.get_one('_test-cloud_') kr_mock.get_password.assert_called_with( 'openstacksdk', region._auth.get_cache_id() ) ks_mock.assert_not_called() @mock.patch('openstack.config.cloud_region.keyring') @mock.patch( 'keystoneauth1.identity.base.BaseIdentityPlugin.set_auth_state' ) def test_load_auth_cache_found(self, ks_mock, kr_mock): c = config.OpenStackConfig( config_files=[self.cloud_yaml], secure_files=[] ) c._cache_auth = True fake_auth = {'a': 'b'} kr_mock.get_password = mock.Mock(return_value=fake_auth) region = c.get_one('_test-cloud_') kr_mock.get_password.assert_called_with( 'openstacksdk', region._auth.get_cache_id() ) ks_mock.assert_called_with(fake_auth) @mock.patch('openstack.config.cloud_region.keyring') def test_set_auth_cache_empty_auth(self, kr_mock): c = config.OpenStackConfig( config_files=[self.cloud_yaml], secure_files=[] ) c._cache_auth = True kr_mock.get_password = mock.Mock(side_effect=[RuntimeError]) kr_mock.set_password = mock.Mock() region = c.get_one('_test-cloud_') region.set_auth_cache() kr_mock.set_password.assert_not_called() @mock.patch('openstack.config.cloud_region.keyring') def test_set_auth_cache(self, kr_mock): c = config.OpenStackConfig( config_files=[self.cloud_yaml], secure_files=[] ) c._cache_auth = True kr_mock.get_password = mock.Mock(side_effect=[RuntimeError]) kr_mock.set_password = mock.Mock() region = c.get_one('_test-cloud_') region._auth.set_auth_state( '{"auth_token":"foo", "body":{"token":"bar"}}' ) region.set_auth_cache() kr_mock.set_password.assert_called_with( 'openstacksdk', region._auth.get_cache_id(), region._auth.get_auth_state(), ) def test_metrics_global(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], secure_files=[self.secure_yaml], ) self.assertIsInstance(c.cloud_config, dict) cc = c.get_one('_test-cloud_') statsd = { 'host': '127.0.0.1', 'port': '1234', } # NOTE(ianw) we don't test/call get__client() because we # don't want to instantiate the client, which tries to # connect / do hostname lookups. self.assertEqual(statsd['host'], cc._statsd_host) self.assertEqual(statsd['port'], cc._statsd_port) self.assertEqual('openstack.api', cc.get_statsd_prefix()) influxdb = { 'use_udp': True, 'host': '127.0.0.1', 'port': '1234', 'username': 'username', 'password': 'password', 'database': 'database', 'measurement': 'measurement.name', 'timeout': 10, } self.assertEqual(influxdb, cc._influxdb_config) def test_metrics_override(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], secure_files=[self.secure_yaml], ) self.assertIsInstance(c.cloud_config, dict) cc = c.get_one('_test-cloud-override-metrics') statsd = { 'host': '127.0.0.1', 'port': '4321', 'prefix': 'statsd.override.prefix', } self.assertEqual(statsd['host'], cc._statsd_host) self.assertEqual(statsd['port'], cc._statsd_port) self.assertEqual(statsd['prefix'], cc.get_statsd_prefix()) influxdb = { 'use_udp': True, 'host': '127.0.0.1', 'port': '1234', 'username': 'override-username', 'password': 'override-password', 'database': 'override-database', 'measurement': 'measurement.name', 'timeout': 10, } self.assertEqual(influxdb, cc._influxdb_config) class TestExcludedFormattedConfigValue(base.TestCase): # verify https://storyboard.openstack.org/#!/story/1635696 # # get_one_cloud() and get_one_cloud_osc() iterate over config # values and try to expand any variables in those values by # calling value.format(), however some config values # (e.g. password) should never have format() applied to them, not # only might that change the password but it will also cause the # format() function to raise an exception if it can not parse the # format string. Examples would be single brace (e.g. 'foo{') # which raises an ValueError because it's looking for a matching # end brace or a brace pair with a key value that cannot be found # (e.g. 'foo{bar}') which raises a KeyError. def setUp(self): super().setUp() self.args = dict( auth_url='http://example.com/v2', username='user', project_name='project', region_name='region2', snack_type='cookie', os_auth_token='no-good-things', ) self.options = argparse.Namespace(**self.args) def test_get_one_cloud_password_brace(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) password = 'foo{' # Would raise ValueError, single brace self.options.password = password cc = c.get_one_cloud( cloud='_test_cloud_regions', argparse=self.options, validate=False ) self.assertEqual(cc.password, password) password = 'foo{bar}' # Would raise KeyError, 'bar' not found self.options.password = password cc = c.get_one_cloud( cloud='_test_cloud_regions', argparse=self.options, validate=False ) self.assertEqual(cc.password, password) def test_get_one_cloud_osc_password_brace(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) password = 'foo{' # Would raise ValueError, single brace self.options.password = password cc = c.get_one_cloud_osc( cloud='_test_cloud_regions', argparse=self.options, validate=False ) self.assertEqual(cc.password, password) password = 'foo{bar}' # Would raise KeyError, 'bar' not found self.options.password = password cc = c.get_one_cloud_osc( cloud='_test_cloud_regions', argparse=self.options, validate=False ) self.assertEqual(cc.password, password) class TestConfigArgparse(base.TestCase): def setUp(self): super().setUp() self.args = dict( auth_url='http://example.com/v2', username='user', password='password', project_name='project', region_name='region2', snack_type='cookie', os_auth_token='no-good-things', ) self.options = argparse.Namespace(**self.args) def test_get_one_bad_region_argparse(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) self.assertRaises( exceptions.ConfigException, c.get_one, cloud='_test-cloud_', argparse=self.options, ) def test_get_one_argparse(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one( cloud='_test_cloud_regions', argparse=self.options, validate=False ) self.assertEqual(cc.region_name, 'region2') self.assertEqual(cc.snack_type, 'cookie') def test_get_one_precedence(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) kwargs = { 'auth': { 'username': 'testuser', 'password': 'authpass', 'project-id': 'testproject', 'auth_url': 'http://example.com/v2', }, 'region_name': 'kwarg_region', 'password': 'ansible_password', 'arbitrary': 'value', } args = dict( auth_url='http://example.com/v2', username='user', password='argpass', project_name='project', region_name='region2', snack_type='cookie', ) options = argparse.Namespace(**args) cc = c.get_one(argparse=options, **kwargs) self.assertEqual(cc.region_name, 'region2') self.assertEqual(cc.auth['password'], 'authpass') self.assertEqual(cc.snack_type, 'cookie') def test_get_one_cloud_precedence_osc(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], ) kwargs = { 'auth': { 'username': 'testuser', 'password': 'authpass', 'project-id': 'testproject', 'auth_url': 'http://example.com/v2', }, 'region_name': 'kwarg_region', 'password': 'ansible_password', 'arbitrary': 'value', } args = dict( auth_url='http://example.com/v2', username='user', password='argpass', project_name='project', region_name='region2', snack_type='cookie', ) options = argparse.Namespace(**args) cc = c.get_one_cloud_osc(argparse=options, **kwargs) self.assertEqual(cc.region_name, 'region2') self.assertEqual(cc.auth['password'], 'argpass') self.assertEqual(cc.snack_type, 'cookie') def test_get_one_precedence_no_argparse(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) kwargs = { 'auth': { 'username': 'testuser', 'password': 'authpass', 'project-id': 'testproject', 'auth_url': 'http://example.com/v2', }, 'region_name': 'kwarg_region', 'password': 'ansible_password', 'arbitrary': 'value', } cc = c.get_one(**kwargs) self.assertEqual(cc.region_name, 'kwarg_region') self.assertEqual(cc.auth['password'], 'authpass') self.assertIsNone(cc.password) def test_get_one_just_argparse(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one(argparse=self.options, validate=False) self.assertIsNone(cc.cloud) self.assertEqual(cc.region_name, 'region2') self.assertEqual(cc.snack_type, 'cookie') def test_get_one_just_kwargs(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one(validate=False, **self.args) self.assertIsNone(cc.cloud) self.assertEqual(cc.region_name, 'region2') self.assertEqual(cc.snack_type, 'cookie') def test_get_one_dash_kwargs(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) args = { 'auth-url': 'http://example.com/v2', 'username': 'user', 'password': 'password', 'project_name': 'project', 'region_name': 'other-test-region', 'snack_type': 'cookie', } cc = c.get_one(**args) self.assertIsNone(cc.cloud) self.assertEqual(cc.region_name, 'other-test-region') self.assertEqual(cc.snack_type, 'cookie') def test_get_one_no_argparse(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one(cloud='_test-cloud_', argparse=None) self._assert_cloud_details(cc) self.assertEqual(cc.region_name, 'test-region') self.assertIsNone(cc.snack_type) def test_get_one_no_argparse_regions(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one(cloud='_test_cloud_regions', argparse=None) self._assert_cloud_details(cc) self.assertEqual(cc.region_name, 'region1') self.assertIsNone(cc.snack_type) def test_get_one_bad_region(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) self.assertRaises( exceptions.ConfigException, c.get_one, cloud='_test_cloud_regions', region_name='bad', ) def test_get_one_bad_region_no_regions(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) self.assertRaises( exceptions.ConfigException, c.get_one, cloud='_test-cloud_', region_name='bad_region', ) def test_get_one_no_argparse_region2(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one( cloud='_test_cloud_regions', region_name='region2', argparse=None ) self._assert_cloud_details(cc) self.assertEqual(cc.region_name, 'region2') self.assertIsNone(cc.snack_type) def test_get_one_network(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one( cloud='_test_cloud_regions', region_name='region1', argparse=None ) self._assert_cloud_details(cc) self.assertEqual(cc.region_name, 'region1') self.assertEqual('region1-network', cc.config['external_network']) def test_get_one_per_region_network(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one( cloud='_test_cloud_regions', region_name='region2', argparse=None ) self._assert_cloud_details(cc) self.assertEqual(cc.region_name, 'region2') self.assertEqual('my-network', cc.config['external_network']) def test_get_one_no_yaml_no_cloud(self): c = config.OpenStackConfig(load_yaml_config=False) self.assertRaises( exceptions.ConfigException, c.get_one, cloud='_test_cloud_regions', region_name='region2', argparse=None, ) def test_get_one_no_yaml(self): c = config.OpenStackConfig(load_yaml_config=False) cc = c.get_one( region_name='region2', argparse=None, **base.USER_CONF['clouds']['_test_cloud_regions'] ) # Not using assert_cloud_details because of cache settings which # are not present without the file self.assertIsInstance(cc, cloud_region.CloudRegion) self.assertTrue(hasattr(cc, 'auth')) self.assertIsInstance(cc.auth, dict) self.assertIsNone(cc.cloud) self.assertIn('username', cc.auth) self.assertEqual('testuser', cc.auth['username']) self.assertEqual('testpass', cc.auth['password']) self.assertFalse(cc.config['image_api_use_tasks']) self.assertTrue('project_name' in cc.auth or 'project_id' in cc.auth) if 'project_name' in cc.auth: self.assertEqual('testproject', cc.auth['project_name']) elif 'project_id' in cc.auth: self.assertEqual('testproject', cc.auth['project_id']) self.assertEqual(cc.region_name, 'region2') def test_fix_env_args(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) env_args = {'os-compute-api-version': 1} fixed_args = c._fix_args(env_args) self.assertDictEqual({'compute_api_version': 1}, fixed_args) def test_extra_config(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) defaults = {'use_hostnames': False, 'other-value': 'something'} ansible_options = c.get_extra_config('ansible', defaults) # This should show that the default for use_hostnames above is # overridden by the value in the config file defined in base.py # It should also show that other-value key is normalized and passed # through even though there is no corresponding value in the config # file, and that expand-hostvars key is normalized and the value # from the config comes through even though there is no default. self.assertDictEqual( { 'expand_hostvars': False, 'use_hostnames': True, 'other_value': 'something', }, ansible_options, ) def test_get_client_config(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one(cloud='_test_cloud_regions') defaults = { 'use_hostnames': False, 'other-value': 'something', 'force_ipv4': False, } ansible_options = cc.get_client_config('ansible', defaults) # This should show that the default for use_hostnames and force_ipv4 # above is overridden by the value in the config file defined in # base.py # It should also show that other-value key is normalized and passed # through even though there is no corresponding value in the config # file, and that expand-hostvars key is normalized and the value # from the config comes through even though there is no default. self.assertDictEqual( { 'expand_hostvars': False, 'use_hostnames': True, 'other_value': 'something', 'force_ipv4': True, }, ansible_options, ) def test_register_argparse_cloud(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) parser = argparse.ArgumentParser() c.register_argparse_arguments(parser, []) opts, _remain = parser.parse_known_args(['--os-cloud', 'foo']) self.assertEqual(opts.os_cloud, 'foo') def test_env_argparse_precedence(self): self.useFixture( fixtures.EnvironmentVariable('OS_TENANT_NAME', 'tenants-are-bad') ) c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one(cloud='envvars', argparse=self.options, validate=False) self.assertEqual(cc.auth['project_name'], 'project') def test_argparse_default_no_token(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) parser = argparse.ArgumentParser() c.register_argparse_arguments(parser, []) # novaclient will add this parser.add_argument('--os-auth-token') opts, _remain = parser.parse_known_args() cc = c.get_one(cloud='_test_cloud_regions', argparse=opts) self.assertEqual(cc.config['auth_type'], 'password') self.assertNotIn('token', cc.config['auth']) def test_argparse_token(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) parser = argparse.ArgumentParser() c.register_argparse_arguments(parser, []) # novaclient will add this parser.add_argument('--os-auth-token') opts, _remain = parser.parse_known_args( ['--os-auth-token', 'very-bad-things', '--os-auth-type', 'token'] ) cc = c.get_one(argparse=opts, validate=False) self.assertEqual(cc.config['auth_type'], 'token') self.assertEqual(cc.config['auth']['token'], 'very-bad-things') def test_argparse_underscores(self): c = config.OpenStackConfig( config_files=[self.no_yaml], vendor_files=[self.no_yaml], secure_files=[self.no_yaml], ) parser = argparse.ArgumentParser() parser.add_argument('--os_username') argv = [ '--os_username', 'user', '--os_password', 'pass', '--os-auth-url', 'auth-url', '--os-project-name', 'project', ] c.register_argparse_arguments(parser, argv=argv) opts, _remain = parser.parse_known_args(argv) cc = c.get_one(argparse=opts) self.assertEqual(cc.config['auth']['username'], 'user') self.assertEqual(cc.config['auth']['password'], 'pass') self.assertEqual(cc.config['auth']['auth_url'], 'auth-url') def test_argparse_action_append_no_underscore(self): c = config.OpenStackConfig( config_files=[self.no_yaml], vendor_files=[self.no_yaml], secure_files=[self.no_yaml], ) parser = argparse.ArgumentParser() parser.add_argument('--foo', action='append') argv = ['--foo', '1', '--foo', '2'] c.register_argparse_arguments(parser, argv=argv) opts, _remain = parser.parse_known_args(argv) self.assertEqual(opts.foo, ['1', '2']) def test_argparse_underscores_duplicate(self): c = config.OpenStackConfig( config_files=[self.no_yaml], vendor_files=[self.no_yaml], secure_files=[self.no_yaml], ) parser = argparse.ArgumentParser() parser.add_argument('--os_username') argv = [ '--os_username', 'user', '--os_password', 'pass', '--os-username', 'user1', '--os-password', 'pass1', '--os-auth-url', 'auth-url', '--os-project-name', 'project', ] self.assertRaises( exceptions.ConfigException, c.register_argparse_arguments, parser=parser, argv=argv, ) def test_register_argparse_bad_plugin(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) parser = argparse.ArgumentParser() self.assertRaises( exceptions.ConfigException, c.register_argparse_arguments, parser, ['--os-auth-type', 'foo'], ) def test_register_argparse_not_password(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) parser = argparse.ArgumentParser() args = [ '--os-auth-type', 'v3token', '--os-token', 'some-secret', ] c.register_argparse_arguments(parser, args) opts, _remain = parser.parse_known_args(args) self.assertEqual(opts.os_token, 'some-secret') def test_register_argparse_password(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) parser = argparse.ArgumentParser() args = [ '--os-password', 'some-secret', ] c.register_argparse_arguments(parser, args) opts, _remain = parser.parse_known_args(args) self.assertEqual(opts.os_password, 'some-secret') with testtools.ExpectedException(AttributeError): opts.os_token def test_register_argparse_service_type(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) parser = argparse.ArgumentParser() args = [ '--os-service-type', 'network', '--os-endpoint-type', 'admin', '--http-timeout', '20', ] c.register_argparse_arguments(parser, args) opts, _remain = parser.parse_known_args(args) self.assertEqual(opts.os_service_type, 'network') self.assertEqual(opts.os_endpoint_type, 'admin') self.assertEqual(opts.http_timeout, '20') with testtools.ExpectedException(AttributeError): opts.os_network_service_type cloud = c.get_one(argparse=opts, validate=False) self.assertEqual(cloud.config['service_type'], 'network') self.assertEqual(cloud.config['interface'], 'admin') self.assertEqual(cloud.config['api_timeout'], '20') self.assertNotIn('http_timeout', cloud.config) def test_register_argparse_network_service_type(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) parser = argparse.ArgumentParser() args = [ '--os-endpoint-type', 'admin', '--network-api-version', '4', ] c.register_argparse_arguments(parser, args, ['network']) opts, _remain = parser.parse_known_args(args) self.assertEqual(opts.os_service_type, 'network') self.assertEqual(opts.os_endpoint_type, 'admin') self.assertIsNone(opts.os_network_service_type) self.assertIsNone(opts.os_network_api_version) self.assertEqual(opts.network_api_version, '4') cloud = c.get_one(argparse=opts, validate=False) self.assertEqual(cloud.config['service_type'], 'network') self.assertEqual(cloud.config['interface'], 'admin') self.assertEqual(cloud.config['network_api_version'], '4') self.assertNotIn('http_timeout', cloud.config) def test_register_argparse_network_service_types(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) parser = argparse.ArgumentParser() args = [ '--os-compute-service-name', 'cloudServers', '--os-network-service-type', 'badtype', '--os-endpoint-type', 'admin', '--network-api-version', '4', ] c.register_argparse_arguments( parser, args, ['compute', 'network', 'volume'] ) opts, _remain = parser.parse_known_args(args) self.assertEqual(opts.os_network_service_type, 'badtype') self.assertIsNone(opts.os_compute_service_type) self.assertIsNone(opts.os_volume_service_type) self.assertEqual(opts.os_service_type, 'compute') self.assertEqual(opts.os_compute_service_name, 'cloudServers') self.assertEqual(opts.os_endpoint_type, 'admin') self.assertIsNone(opts.os_network_api_version) self.assertEqual(opts.network_api_version, '4') cloud = c.get_one(argparse=opts, validate=False) self.assertEqual(cloud.config['service_type'], 'compute') self.assertEqual(cloud.config['network_service_type'], 'badtype') self.assertEqual(cloud.config['interface'], 'admin') self.assertEqual(cloud.config['network_api_version'], '4') self.assertNotIn('volume_service_type', cloud.config) self.assertNotIn('http_timeout', cloud.config) class TestConfigPrompt(base.TestCase): def setUp(self): super().setUp() self.args = dict( auth_url='http://example.com/v2', username='user', project_name='project', # region_name='region2', auth_type='password', ) self.options = argparse.Namespace(**self.args) def test_get_one_prompt(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], pw_func=prompt_for_password, ) # This needs a cloud definition without a password. # If this starts failing unexpectedly check that the cloud_yaml # and/or vendor_yaml do not have a password in the selected cloud. cc = c.get_one( cloud='_test_cloud_no_vendor', argparse=self.options, ) self.assertEqual('promptpass', cc.auth['password']) class TestConfigDefault(base.TestCase): def setUp(self): super().setUp() # Reset defaults after each test so that other tests are # not affected by any changes. self.addCleanup(self._reset_defaults) def _reset_defaults(self): defaults._defaults = None def test_set_no_default(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one(cloud='_test-cloud_', argparse=None) self._assert_cloud_details(cc) self.assertEqual('password', cc.auth_type) class TestBackwardsCompatibility(base.TestCase): def test_set_no_default(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cloud = { 'identity_endpoint_type': 'admin', 'compute_endpoint_type': 'private', 'endpoint_type': 'public', 'auth_type': 'v3password', } result = c._fix_backwards_interface(cloud) expected = { 'identity_interface': 'admin', 'compute_interface': 'private', 'interface': 'public', 'auth_type': 'v3password', } self.assertDictEqual(expected, result) def test_project_v2password(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cloud = { 'auth_type': 'v2password', 'auth': { 'project-name': 'my_project_name', 'project-id': 'my_project_id', }, } result = c._fix_backwards_project(cloud) expected = { 'auth_type': 'v2password', 'auth': { 'tenant_name': 'my_project_name', 'tenant_id': 'my_project_id', }, } self.assertEqual(expected, result) def test_project_password(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cloud = { 'auth_type': 'password', 'auth': { 'project-name': 'my_project_name', 'project-id': 'my_project_id', }, } result = c._fix_backwards_project(cloud) expected = { 'auth_type': 'password', 'auth': { 'project_name': 'my_project_name', 'project_id': 'my_project_id', }, } self.assertEqual(expected, result) def test_project_conflict_priority(self): """The order of priority should be 1: env or cli settings 2: setting from 'auth' section of clouds.yaml The ordering of #1 is important so that operators can use domain-wide inherited credentials in clouds.yaml. """ c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cloud = { 'auth_type': 'password', 'auth': { 'project_id': 'my_project_id', }, } result = c._fix_backwards_project(cloud) expected = { 'auth_type': 'password', 'auth': { 'project_id': 'my_project_id', }, } self.assertEqual(expected, result) cloud = { 'auth_type': 'password', 'auth': { 'project_id': 'my_project_id', }, 'project_id': 'different_project_id', } result = c._fix_backwards_project(cloud) expected = { 'auth_type': 'password', 'auth': { 'project_id': 'different_project_id', }, } self.assertEqual(expected, result) def test_backwards_network_fail(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cloud = { 'external_network': 'public', 'networks': [ {'name': 'private', 'routes_externally': False}, ], } self.assertRaises( exceptions.ConfigException, c._fix_backwards_networks, cloud ) def test_backwards_network(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cloud = { 'external_network': 'public', 'internal_network': 'private', } result = c._fix_backwards_networks(cloud) expected = { 'external_network': 'public', 'internal_network': 'private', 'networks': [ { 'name': 'public', 'routes_externally': True, 'nat_destination': False, 'default_interface': True, }, { 'name': 'private', 'routes_externally': False, 'nat_destination': True, 'default_interface': False, }, ], } self.assertEqual(expected, result) def test_normalize_network(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cloud = {'networks': [{'name': 'private'}]} result = c._fix_backwards_networks(cloud) expected = { 'networks': [ { 'name': 'private', 'routes_externally': False, 'nat_destination': False, 'default_interface': False, 'nat_source': False, 'routes_ipv4_externally': False, 'routes_ipv6_externally': False, }, ] } self.assertEqual(expected, result) def test_single_default_interface(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cloud = { 'networks': [ {'name': 'blue', 'default_interface': True}, {'name': 'purple', 'default_interface': True}, ] } self.assertRaises( exceptions.ConfigException, c._fix_backwards_networks, cloud ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/config/test_environ.py0000664000175000017500000001717700000000000024712 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from openstack import config from openstack.config import cloud_region from openstack import exceptions from openstack.tests.unit.config import base class TestEnviron(base.TestCase): def setUp(self): super().setUp() self.useFixture( fixtures.EnvironmentVariable('OS_AUTH_URL', 'https://example.com') ) self.useFixture( fixtures.EnvironmentVariable('OS_USERNAME', 'testuser') ) self.useFixture( fixtures.EnvironmentVariable('OS_PASSWORD', 'testpass') ) self.useFixture( fixtures.EnvironmentVariable('OS_PROJECT_NAME', 'testproject') ) self.useFixture( fixtures.EnvironmentVariable('NOVA_PROJECT_ID', 'testnova') ) def test_get_one(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) self.assertIsInstance(c.get_one(), cloud_region.CloudRegion) def test_no_fallthrough(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) self.assertRaises(exceptions.ConfigException, c.get_one, 'openstack') def test_envvar_name_override(self): self.useFixture( fixtures.EnvironmentVariable('OS_CLOUD_NAME', 'override') ) c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one('override') self._assert_cloud_details(cc) def test_envvar_prefer_ipv6_override(self): self.useFixture( fixtures.EnvironmentVariable('OS_PREFER_IPV6', 'false') ) c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], secure_files=[self.secure_yaml], ) cc = c.get_one('_test-cloud_') self.assertFalse(cc.prefer_ipv6) def test_environ_exists(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], secure_files=[self.secure_yaml], ) cc = c.get_one('envvars') self._assert_cloud_details(cc) self.assertNotIn('auth_url', cc.config) self.assertIn('auth_url', cc.config['auth']) self.assertNotIn('project_id', cc.config['auth']) self.assertNotIn('auth_url', cc.config) cc = c.get_one('_test-cloud_') self._assert_cloud_details(cc) cc = c.get_one('_test_cloud_no_vendor') self._assert_cloud_details(cc) def test_environ_prefix(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], envvar_prefix='NOVA_', secure_files=[self.secure_yaml], ) cc = c.get_one('envvars') self._assert_cloud_details(cc) self.assertNotIn('auth_url', cc.config) self.assertIn('auth_url', cc.config['auth']) self.assertIn('project_id', cc.config['auth']) self.assertNotIn('auth_url', cc.config) cc = c.get_one('_test-cloud_') self._assert_cloud_details(cc) cc = c.get_one('_test_cloud_no_vendor') self._assert_cloud_details(cc) def test_get_one_with_config_files(self): c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], secure_files=[self.secure_yaml], ) self.assertIsInstance(c.cloud_config, dict) self.assertIn('cache', c.cloud_config) self.assertIsInstance(c.cloud_config['cache'], dict) self.assertIn('max_age', c.cloud_config['cache']) self.assertIn('path', c.cloud_config['cache']) cc = c.get_one('_test-cloud_') self._assert_cloud_details(cc) cc = c.get_one('_test_cloud_no_vendor') self._assert_cloud_details(cc) def test_config_file_override(self): self.useFixture( fixtures.EnvironmentVariable( 'OS_CLIENT_CONFIG_FILE', self.cloud_yaml ) ) c = config.OpenStackConfig( config_files=[], vendor_files=[self.vendor_yaml] ) cc = c.get_one('_test-cloud_') self._assert_cloud_details(cc) class TestEnvvars(base.TestCase): def test_no_envvars(self): self.useFixture(fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova')) c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) self.assertRaises(exceptions.ConfigException, c.get_one, 'envvars') def test_test_envvars(self): self.useFixture(fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova')) self.useFixture( fixtures.EnvironmentVariable('OS_STDERR_CAPTURE', 'True') ) c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) self.assertRaises(exceptions.ConfigException, c.get_one, 'envvars') def test_incomplete_envvars(self): self.useFixture(fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova')) self.useFixture(fixtures.EnvironmentVariable('OS_USERNAME', 'user')) config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) # This is broken due to an issue that's fixed in a subsequent patch # commenting it out in this patch to keep the patch size reasonable # self.assertRaises( # keystoneauth1.exceptions.auth_plugins.MissingRequiredOptions, # c.get_one, 'envvars') def test_have_envvars(self): self.useFixture(fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova')) self.useFixture( fixtures.EnvironmentVariable('OS_AUTH_URL', 'http://example.com') ) self.useFixture(fixtures.EnvironmentVariable('OS_USERNAME', 'user')) self.useFixture( fixtures.EnvironmentVariable('OS_PASSWORD', 'password') ) self.useFixture( fixtures.EnvironmentVariable('OS_PROJECT_NAME', 'project') ) c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] ) cc = c.get_one('envvars') self.assertEqual(cc.config['auth']['username'], 'user') def test_old_envvars(self): self.useFixture(fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova')) self.useFixture( fixtures.EnvironmentVariable('NOVA_AUTH_URL', 'http://example.com') ) self.useFixture( fixtures.EnvironmentVariable('NOVA_PASSWORD', 'password') ) self.useFixture( fixtures.EnvironmentVariable('NOVA_PROJECT_NAME', 'project') ) c = config.OpenStackConfig( config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml], envvar_prefix='NOVA_', ) cc = c.get_one('envvars') self.assertEqual(cc.config['auth']['username'], 'nova') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/config/test_from_conf.py0000664000175000017500000003130400000000000025166 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystoneauth1 import exceptions as ks_exc import requests.exceptions from openstack.config import cloud_region from openstack import connection from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base class TestFromConf(base.TestCase): def _get_conn(self, **from_conf_kwargs): oslocfg = self._load_ks_cfg_opts() # Throw name in here to prove **kwargs is working config = cloud_region.from_conf( oslocfg, session=self.cloud.session, name='from_conf.example.com', **from_conf_kwargs ) self.assertEqual('from_conf.example.com', config.name) return connection.Connection(config=config, strict_proxies=True) def test_adapter_opts_set(self): """Adapter opts specified in the conf.""" conn = self._get_conn() discovery = { "versions": { "values": [ { "status": "stable", "updated": "2019-06-01T00:00:00Z", "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.heat-v2+json", # noqa: E501 } ], "id": "v2.0", "links": [ { "href": "https://example.org:8888/heat/v2", "rel": "self", } ], } ] } } self.register_uris( [ dict( method='GET', uri='https://example.org:8888/heat/v2', json=discovery, ), dict( method='GET', uri='https://example.org:8888/heat/v2/foo', json={'foo': {}}, ), ] ) adap = conn.orchestration self.assertEqual('SpecialRegion', adap.region_name) self.assertEqual('orchestration', adap.service_type) self.assertEqual('internal', adap.interface) self.assertEqual( 'https://example.org:8888/heat/v2', adap.endpoint_override ) adap.get('/foo') self.assert_calls() def test_default_adapter_opts(self): """Adapter opts are registered, but all defaulting in conf.""" conn = self._get_conn() server_id = str(uuid.uuid4()) server_name = self.getUniqueString('name') fake_server = fakes.make_fake_server(server_id, server_name) self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': [fake_server]}, ), ] ) # Nova has empty adapter config, so these default adap = conn.compute self.assertIsNone(adap.region_name) self.assertEqual('compute', adap.service_type) self.assertEqual('public', adap.interface) self.assertIsNone(adap.endpoint_override) s = next(adap.servers()) self.assertEqual(s.id, server_id) self.assertEqual(s.name, server_name) self.assert_calls() def test_service_not_ready_catalog(self): """Adapter opts are registered, but all defaulting in conf.""" conn = self._get_conn() server_id = str(uuid.uuid4()) server_name = self.getUniqueString('name') fake_server = fakes.make_fake_server(server_id, server_name) self.register_uris( [ dict( method='GET', uri='https://compute.example.com/v2.1/', exc=requests.exceptions.ConnectionError, ), self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': [fake_server]}, ), ] ) self.assertRaises( exceptions.ServiceDiscoveryException, getattr, conn, 'compute' ) # Nova has empty adapter config, so these default adap = conn.compute self.assertIsNone(adap.region_name) self.assertEqual('compute', adap.service_type) self.assertEqual('public', adap.interface) self.assertIsNone(adap.endpoint_override) s = next(adap.servers()) self.assertEqual(s.id, server_id) self.assertEqual(s.name, server_name) self.assert_calls() def test_name_with_dashes(self): conn = self._get_conn() discovery = { "versions": { "values": [ { "status": "stable", "id": "v1", "links": [ { "href": "https://example.org:5050/v1", "rel": "self", } ], } ] } } status = {'finished': True, 'error': None} self.register_uris( [ dict( method='GET', uri='https://example.org:5050', json=discovery, ), # strict-proxies means we're going to fetch the discovery # doc from the versioned endpoint to verify it works. dict( method='GET', uri='https://example.org:5050/v1', json=discovery, ), dict( method='GET', uri='https://example.org:5050/v1/introspection/abcd', json=status, ), ] ) adap = conn.baremetal_introspection self.assertEqual('baremetal-introspection', adap.service_type) self.assertEqual('public', adap.interface) self.assertEqual('https://example.org:5050/v1', adap.endpoint_override) self.assertTrue(adap.get_introspection('abcd').is_finished) def test_service_not_ready_endpoint_override(self): conn = self._get_conn() discovery = { "versions": { "values": [ { "status": "stable", "id": "v1", "links": [ { "href": "https://example.org:5050/v1", "rel": "self", } ], } ] } } status = {'finished': True, 'error': None} self.register_uris( [ dict( method='GET', uri='https://example.org:5050', exc=requests.exceptions.ConnectTimeout, ), dict( method='GET', uri='https://example.org:5050', json=discovery, ), # strict-proxies means we're going to fetch the discovery # doc from the versioned endpoint to verify it works. dict( method='GET', uri='https://example.org:5050/v1', json=discovery, ), dict( method='GET', uri='https://example.org:5050/v1/introspection/abcd', json=status, ), ] ) self.assertRaises( exceptions.ServiceDiscoveryException, getattr, conn, 'baremetal_introspection', ) adap = conn.baremetal_introspection self.assertEqual('baremetal-introspection', adap.service_type) self.assertEqual('public', adap.interface) self.assertEqual('https://example.org:5050/v1', adap.endpoint_override) self.assertTrue(adap.get_introspection('abcd').is_finished) def assert_service_disabled( self, service_type, expected_reason, **from_conf_kwargs ): conn = self._get_conn(**from_conf_kwargs) # The _ServiceDisabledProxyShim loads up okay... adap = getattr(conn, service_type) # ...but freaks out if you try to use it. ex = self.assertRaises( exceptions.ServiceDisabledException, getattr, adap, 'get' ) self.assertIn( "Service '%s' is disabled because its configuration " "could not be loaded." % service_type, ex.message, ) self.assertIn(expected_reason, ex.message) def test_no_such_conf_section(self): """No conf section (therefore no adapter opts) for service type.""" del self.oslo_config_dict['heat'] self.assert_service_disabled( 'orchestration', "No section for project 'heat' (service type 'orchestration') was " "present in the config.", ) def test_no_such_conf_section_ignore_service_type(self): """Ignore absent conf section if service type not requested.""" del self.oslo_config_dict['heat'] self.assert_service_disabled( 'orchestration', "Not in the list of requested service_types.", # 'orchestration' absent from this list service_types=['compute'], ) def test_no_adapter_opts(self): """Conf section present, but opts for service type not registered.""" self.oslo_config_dict['heat'] = None self.assert_service_disabled( 'orchestration', "Encountered an exception attempting to process config for " "project 'heat' (service type 'orchestration'): no such option", ) def test_no_adapter_opts_ignore_service_type(self): """Ignore unregistered conf section if service type not requested.""" self.oslo_config_dict['heat'] = None self.assert_service_disabled( 'orchestration', "Not in the list of requested service_types.", # 'orchestration' absent from this list service_types=['compute'], ) def test_invalid_adapter_opts(self): """Adapter opts are bogus, in exception-raising ways.""" self.oslo_config_dict['heat'] = { 'interface': 'public', 'valid_interfaces': 'private', } self.assert_service_disabled( 'orchestration', "Encountered an exception attempting to process config for " "project 'heat' (service type 'orchestration'): interface and " "valid_interfaces are mutually exclusive.", ) def test_no_session(self): # TODO(efried): Currently calling without a Session is not implemented. self.assertRaises( exceptions.ConfigException, cloud_region.from_conf, self._load_ks_cfg_opts(), ) def test_no_endpoint(self): """Conf contains adapter opts, but service type not in catalog.""" self.os_fixture.v3_token.remove_service('monitoring') conn = self._get_conn() # Monasca is not in the service catalog self.assertRaises( ks_exc.catalog.EndpointNotFound, getattr, conn, 'monitoring' ) def test_no_endpoint_ignore_service_type(self): """Bogus service type disabled if not in requested service_types.""" self.assert_service_disabled( 'monitoring', "Not in the list of requested service_types.", # 'monitoring' absent from this list service_types={'compute', 'orchestration', 'bogus'}, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/config/test_from_session.py0000664000175000017500000000407700000000000025733 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from testscenarios import load_tests_apply_scenarios as load_tests # noqa from openstack.config import cloud_region from openstack import connection from openstack.tests import fakes from openstack.tests.unit import base class TestFromSession(base.TestCase): scenarios = [ ('no_region', dict(test_region=None)), ('with_region', dict(test_region='RegionOne')), ] def test_from_session(self): config = cloud_region.from_session( self.cloud.session, region_name=self.test_region ) self.assertEqual(config.name, 'identity.example.com') if not self.test_region: self.assertIsNone(config.region_name) else: self.assertEqual(config.region_name, self.test_region) server_id = str(uuid.uuid4()) server_name = self.getUniqueString('name') fake_server = fakes.make_fake_server(server_id, server_name) self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), json={'servers': [fake_server]}, ), ] ) conn = connection.Connection(config=config) s = next(conn.compute.servers()) self.assertEqual(s.id, server_id) self.assertEqual(s.name, server_name) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/config/test_init.py0000664000175000017500000000233100000000000024157 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse import openstack.config from openstack.tests.unit.config import base class TestInit(base.TestCase): def test_get_cloud_region_without_arg_parser(self): cloud_region = openstack.config.get_cloud_region( options=None, validate=False ) self.assertIsInstance( cloud_region, openstack.config.cloud_region.CloudRegion ) def test_get_cloud_region_with_arg_parser(self): cloud_region = openstack.config.get_cloud_region( options=argparse.ArgumentParser(), validate=False ) self.assertIsInstance( cloud_region, openstack.config.cloud_region.CloudRegion ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/config/test_json.py0000664000175000017500000000463300000000000024174 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glob import json import os import jsonschema from testtools import content from openstack.config import defaults from openstack.tests.unit.config import base class TestConfig(base.TestCase): def json_diagnostics(self, exc_info): self.addDetail('filename', content.text_content(self.filename)) for error in sorted(self.validator.iter_errors(self.json_data)): self.addDetail('jsonschema', content.text_content(str(error))) def test_defaults_valid_json(self): _schema_path = os.path.join( os.path.dirname(os.path.realpath(defaults.__file__)), 'schema.json' ) with open(_schema_path) as f: schema = json.load(f) self.validator = jsonschema.Draft4Validator(schema) self.addOnException(self.json_diagnostics) self.filename = os.path.join( os.path.dirname(os.path.realpath(defaults.__file__)), 'defaults.json', ) with open(self.filename) as f: self.json_data = json.load(f) self.assertTrue(self.validator.is_valid(self.json_data)) def test_vendors_valid_json(self): _schema_path = os.path.join( os.path.dirname(os.path.realpath(defaults.__file__)), 'vendor-schema.json', ) with open(_schema_path) as f: schema = json.load(f) self.validator = jsonschema.Draft4Validator(schema) self.addOnException(self.json_diagnostics) _vendors_path = os.path.join( os.path.dirname(os.path.realpath(defaults.__file__)), 'vendors' ) for self.filename in glob.glob(os.path.join(_vendors_path, '*.json')): with open(self.filename) as f: self.json_data = json.load(f) self.assertTrue(self.validator.is_valid(self.json_data)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/config/test_loader.py0000664000175000017500000001210100000000000024456 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile import textwrap from openstack.config import loader from openstack import exceptions from openstack.tests.unit.config import base FILES = { 'yaml': textwrap.dedent( ''' foo: bar baz: - 1 - 2 - 3 ''' ), 'json': textwrap.dedent( ''' { "foo": "bar", "baz": [ 1, 2, 3 ] } ''' ), 'txt': textwrap.dedent( ''' foo bar baz test one two ''' ), } class TestLoader(base.TestCase): def test_base_load_yaml_json_file(self): with tempfile.TemporaryDirectory() as tmpdir: tested_files = [] for key, value in FILES.items(): fn = os.path.join(tmpdir, f'file.{key}') with open(fn, 'w+') as fp: fp.write(value) tested_files.append(fn) path, result = loader.OpenStackConfig()._load_yaml_json_file( tested_files ) # NOTE(hberaud): Prefer to test path rather than file because # our FILES var is a dict so results are appened # without keeping the initial order (python 3.5) self.assertEqual(tmpdir, os.path.dirname(path)) def test__load_yaml_json_file_without_json(self): with tempfile.TemporaryDirectory() as tmpdir: tested_files = [] for key, value in FILES.items(): if key == 'json': continue fn = os.path.join(tmpdir, f'file.{key}') with open(fn, 'w+') as fp: fp.write(value) tested_files.append(fn) path, result = loader.OpenStackConfig()._load_yaml_json_file( tested_files ) # NOTE(hberaud): Prefer to test path rather than file because # our FILES var is a dict so results are appened # without keeping the initial order (python 3.5) self.assertEqual(tmpdir, os.path.dirname(path)) def test__load_yaml_json_file_without_json_yaml(self): with tempfile.TemporaryDirectory() as tmpdir: tested_files = [] fn = os.path.join(tmpdir, 'file.txt') with open(fn, 'w+') as fp: fp.write(FILES['txt']) tested_files.append(fn) path, result = loader.OpenStackConfig()._load_yaml_json_file( tested_files ) self.assertEqual(fn, path) def test__load_yaml_json_file_without_perm(self): with tempfile.TemporaryDirectory() as tmpdir: tested_files = [] fn = os.path.join(tmpdir, 'file.txt') with open(fn, 'w+') as fp: fp.write(FILES['txt']) os.chmod(fn, 222) tested_files.append(fn) path, result = loader.OpenStackConfig()._load_yaml_json_file( tested_files ) self.assertEqual(None, path) def test__load_yaml_json_file_nonexisting(self): tested_files = [] fn = os.path.join('/fake', 'file.txt') tested_files.append(fn) path, result = loader.OpenStackConfig()._load_yaml_json_file( tested_files ) self.assertEqual(None, path) class TestFixArgv(base.TestCase): def test_no_changes(self): argv = [ '-a', '-b', '--long-arg', '--multi-value', 'key1=value1', '--multi-value', 'key2=value2', ] expected = argv[:] loader._fix_argv(argv) self.assertEqual(expected, argv) def test_replace(self): argv = [ '-a', '-b', '--long-arg', '--multi_value', 'key1=value1', '--multi_value', 'key2=value2', ] expected = [ '-a', '-b', '--long-arg', '--multi-value', 'key1=value1', '--multi-value', 'key2=value2', ] loader._fix_argv(argv) self.assertEqual(expected, argv) def test_mix(self): argv = [ '-a', '-b', '--long-arg', '--multi_value', 'key1=value1', '--multi-value', 'key2=value2', ] self.assertRaises(exceptions.ConfigException, loader._fix_argv, argv) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.441408 openstacksdk-4.0.0/openstack/tests/unit/container_infrastructure_management/0000775000175000017500000000000000000000000027655 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/container_infrastructure_management/__init__.py0000664000175000017500000000000000000000000031754 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.441408 openstacksdk-4.0.0/openstack/tests/unit/container_infrastructure_management/v1/0000775000175000017500000000000000000000000030203 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/container_infrastructure_management/v1/__init__.py0000664000175000017500000000000000000000000032302 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/container_infrastructure_management/v1/test_cluster.py0000664000175000017500000000417200000000000033301 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.container_infrastructure_management.v1 import cluster from openstack.tests.unit import base EXAMPLE = { "cluster_template_id": "0562d357-8641-4759-8fed-8173f02c9633", "create_timeout": 60, "discovery_url": None, "flavor_id": None, "keypair": "my_keypair", "labels": {}, "master_count": 2, "master_flavor_id": None, "name": "k8s", "node_count": 2, } class TestCluster(base.TestCase): def test_basic(self): sot = cluster.Cluster() self.assertIsNone(sot.resource_key) self.assertEqual('clusters', sot.resources_key) self.assertEqual('/clusters', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = cluster.Cluster(**EXAMPLE) self.assertEqual( EXAMPLE['cluster_template_id'], sot.cluster_template_id, ) self.assertEqual(EXAMPLE['create_timeout'], sot.create_timeout) self.assertEqual(EXAMPLE['discovery_url'], sot.discovery_url) self.assertEqual(EXAMPLE['flavor_id'], sot.flavor_id) self.assertEqual(EXAMPLE['keypair'], sot.keypair) self.assertEqual(EXAMPLE['labels'], sot.labels) self.assertEqual(EXAMPLE['master_count'], sot.master_count) self.assertEqual(EXAMPLE['master_flavor_id'], sot.master_flavor_id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['node_count'], sot.node_count) ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=openstacksdk-4.0.0/openstack/tests/unit/container_infrastructure_management/v1/test_cluster_certificate.py 22 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/container_infrastructure_management/v1/test_cluster_certific0000664000175000017500000000327600000000000034526 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.container_infrastructure_management.v1 import ( cluster_certificate, ) from openstack.tests.unit import base EXAMPLE = { "cluster_uuid": "0b4b766f-1500-44b3-9804-5a6e12fe6df4", "pem": "-----BEGIN CERTIFICATE-----\nMIICzDCCAbSgAwIBAgIQOOkVcEN7TNa9E80G", "bay_uuid": "0b4b766f-1500-44b3-9804-5a6e12fe6df4", "csr": "-----BEGIN CERTIFICATE REQUEST-----\nMIIEfzCCAmcCAQAwFDESMBAGA1UE", } class TestClusterCertificate(base.TestCase): def test_basic(self): sot = cluster_certificate.ClusterCertificate() self.assertIsNone(sot.resource_key) self.assertEqual('/certificates', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) def test_make_it(self): sot = cluster_certificate.ClusterCertificate(**EXAMPLE) self.assertEqual(EXAMPLE['cluster_uuid'], sot.cluster_uuid) self.assertEqual(EXAMPLE['bay_uuid'], sot.bay_uuid) self.assertEqual(EXAMPLE['csr'], sot.csr) self.assertEqual(EXAMPLE['pem'], sot.pem) ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=openstacksdk-4.0.0/openstack/tests/unit/container_infrastructure_management/v1/test_cluster_template.py 22 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/container_infrastructure_management/v1/test_cluster_template0000664000175000017500000001042400000000000034542 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.container_infrastructure_management.v1 import cluster_template from openstack.tests.unit import base EXAMPLE = { "insecure_registry": None, "http_proxy": "http://10.164.177.169:8080", "updated_at": None, "floating_ip_enabled": True, "fixed_subnet": None, "master_flavor_id": None, "uuid": "085e1c4d-4f68-4bfd-8462-74b9e14e4f39", "no_proxy": "10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", "https_proxy": "http://10.164.177.169:8080", "tls_disabled": False, "keypair_id": "kp", "public": False, "labels": {}, "docker_volume_size": 3, "server_type": "vm", "external_network_id": "public", "cluster_distro": "fedora-atomic", "image_id": "fedora-atomic-latest", "volume_driver": "cinder", "registry_enabled": False, "docker_storage_driver": "devicemapper", "apiserver_port": None, "name": "k8s-bm2", "created_at": "2016-08-29T02:08:08+00:00", "network_driver": "flannel", "fixed_network": None, "coe": "kubernetes", "flavor_id": "m1.small", "master_lb_enabled": True, "dns_nameserver": "8.8.8.8", "hidden": True, } class TestClusterTemplate(base.TestCase): def test_basic(self): sot = cluster_template.ClusterTemplate() self.assertIsNone(sot.resource_key) self.assertEqual('clustertemplates', sot.resources_key) self.assertEqual('/clustertemplates', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = cluster_template.ClusterTemplate(**EXAMPLE) self.assertEqual(EXAMPLE['apiserver_port'], sot.apiserver_port) self.assertEqual(EXAMPLE['cluster_distro'], sot.cluster_distro) self.assertEqual(EXAMPLE['coe'], sot.coe) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual( EXAMPLE['docker_storage_driver'], sot.docker_storage_driver ) self.assertEqual(EXAMPLE['docker_volume_size'], sot.docker_volume_size) self.assertEqual(EXAMPLE['dns_nameserver'], sot.dns_nameserver) self.assertEqual( EXAMPLE['external_network_id'], sot.external_network_id ) self.assertEqual(EXAMPLE['fixed_network'], sot.fixed_network) self.assertEqual(EXAMPLE['fixed_subnet'], sot.fixed_subnet) self.assertEqual(EXAMPLE['flavor_id'], sot.flavor_id) self.assertEqual(EXAMPLE['http_proxy'], sot.http_proxy) self.assertEqual(EXAMPLE['https_proxy'], sot.https_proxy) self.assertEqual(EXAMPLE['image_id'], sot.image_id) self.assertEqual(EXAMPLE['insecure_registry'], sot.insecure_registry) self.assertEqual( EXAMPLE['floating_ip_enabled'], sot.is_floating_ip_enabled ) self.assertEqual(EXAMPLE['hidden'], sot.is_hidden) self.assertEqual( EXAMPLE['master_lb_enabled'], sot.is_master_lb_enabled ) self.assertEqual(EXAMPLE['tls_disabled'], sot.is_tls_disabled) self.assertEqual(EXAMPLE['public'], sot.is_public) self.assertEqual(EXAMPLE['registry_enabled'], sot.is_registry_enabled) self.assertEqual(EXAMPLE['keypair_id'], sot.keypair_id) self.assertEqual(EXAMPLE['master_flavor_id'], sot.master_flavor_id) self.assertEqual(EXAMPLE['network_driver'], sot.network_driver) self.assertEqual(EXAMPLE['no_proxy'], sot.no_proxy) self.assertEqual(EXAMPLE['server_type'], sot.server_type) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) self.assertEqual(EXAMPLE['uuid'], sot.uuid) self.assertEqual(EXAMPLE['volume_driver'], sot.volume_driver) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/container_infrastructure_management/v1/test_proxy.py0000664000175000017500000000751500000000000033005 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.container_infrastructure_management.v1 import ( cluster_certificate, ) from openstack.container_infrastructure_management.v1 import _proxy from openstack.container_infrastructure_management.v1 import cluster from openstack.container_infrastructure_management.v1 import cluster_template from openstack.container_infrastructure_management.v1 import service from openstack.tests.unit import test_proxy_base class TestMagnumProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) class TestCluster(TestMagnumProxy): def test_cluster_get(self): self.verify_get(self.proxy.get_cluster, cluster.Cluster) def test_cluster_find(self): self.verify_find( self.proxy.find_cluster, cluster.Cluster, method_kwargs={}, expected_kwargs={}, ) def test_clusters(self): self.verify_list( self.proxy.clusters, cluster.Cluster, method_kwargs={"query": 1}, expected_kwargs={"query": 1}, ) def test_cluster_create_attrs(self): self.verify_create(self.proxy.create_cluster, cluster.Cluster) def test_cluster_delete(self): self.verify_delete(self.proxy.delete_cluster, cluster.Cluster, False) def test_cluster_delete_ignore(self): self.verify_delete(self.proxy.delete_cluster, cluster.Cluster, True) class TestClusterCertificate(TestMagnumProxy): def test_cluster_certificate_get(self): self.verify_get( self.proxy.get_cluster_certificate, cluster_certificate.ClusterCertificate, ) def test_cluster_certificate_create_attrs(self): self.verify_create( self.proxy.create_cluster_certificate, cluster_certificate.ClusterCertificate, ) class TestClusterTemplate(TestMagnumProxy): def test_cluster_template_get(self): self.verify_get( self.proxy.get_cluster_template, cluster_template.ClusterTemplate ) def test_cluster_template_find(self): self.verify_find( self.proxy.find_cluster_template, cluster_template.ClusterTemplate, method_kwargs={}, expected_kwargs={}, ) def test_cluster_templates(self): self.verify_list( self.proxy.cluster_templates, cluster_template.ClusterTemplate, method_kwargs={"query": 1}, expected_kwargs={"query": 1}, ) def test_cluster_template_create_attrs(self): self.verify_create( self.proxy.create_cluster_template, cluster_template.ClusterTemplate, ) def test_cluster_template_delete(self): self.verify_delete( self.proxy.delete_cluster_template, cluster_template.ClusterTemplate, False, ) def test_cluster_template_delete_ignore(self): self.verify_delete( self.proxy.delete_cluster_template, cluster_template.ClusterTemplate, True, ) class TestService(TestMagnumProxy): def test_services(self): self.verify_list( self.proxy.services, service.Service, method_kwargs={}, expected_kwargs={}, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/container_infrastructure_management/v1/test_service.py0000664000175000017500000000352400000000000033260 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.container_infrastructure_management.v1 import service from openstack.tests.unit import base EXAMPLE = { "binary": "magnum-conductor", "created_at": "2016-08-23T10:52:13+00:00", "state": "up", "report_count": 2179, "updated_at": "2016-08-25T01:13:16+00:00", "host": "magnum-manager", "disabled_reason": None, "id": 1, } class TestService(base.TestCase): def test_basic(self): sot = service.Service() self.assertIsNone(sot.resource_key) self.assertEqual('mservices', sot.resources_key) self.assertEqual('/mservices', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = service.Service(**EXAMPLE) self.assertEqual(EXAMPLE['binary'], sot.binary) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['disabled_reason'], sot.disabled_reason) self.assertEqual(EXAMPLE['host'], sot.host) self.assertEqual(EXAMPLE['report_count'], sot.report_count) self.assertEqual(EXAMPLE['state'], sot.state) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.441408 openstacksdk-4.0.0/openstack/tests/unit/database/0000775000175000017500000000000000000000000022103 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/database/__init__.py0000664000175000017500000000000000000000000024202 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.441408 openstacksdk-4.0.0/openstack/tests/unit/database/v1/0000775000175000017500000000000000000000000022431 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/database/v1/__init__.py0000664000175000017500000000000000000000000024530 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/database/v1/test_database.py0000664000175000017500000000326200000000000025611 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.database.v1 import database from openstack.tests.unit import base IDENTIFIER = 'NAME' INSTANCE_ID = 'INSTANCE_ID' EXAMPLE = { 'character_set': '1', 'collate': '2', 'instance_id': INSTANCE_ID, 'name': IDENTIFIER, } class TestDatabase(base.TestCase): def test_basic(self): sot = database.Database() self.assertEqual('database', sot.resource_key) self.assertEqual('databases', sot.resources_key) path = '/instances/%(instance_id)s/databases' self.assertEqual(path, sot.base_path) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) def test_make_it(self): sot = database.Database(**EXAMPLE) self.assertEqual(IDENTIFIER, sot.id) self.assertEqual(EXAMPLE['character_set'], sot.character_set) self.assertEqual(EXAMPLE['collate'], sot.collate) self.assertEqual(EXAMPLE['instance_id'], sot.instance_id) self.assertEqual(IDENTIFIER, sot.name) self.assertEqual(IDENTIFIER, sot.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/database/v1/test_flavor.py0000664000175000017500000000267600000000000025346 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.database.v1 import flavor from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'links': '1', 'name': '2', 'ram': '3', } class TestFlavor(base.TestCase): def test_basic(self): sot = flavor.Flavor() self.assertEqual('flavor', sot.resource_key) self.assertEqual('flavors', sot.resources_key) self.assertEqual('/flavors', sot.base_path) self.assertTrue(sot.allow_list) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) def test_make_it(self): sot = flavor.Flavor(**EXAMPLE) self.assertEqual(IDENTIFIER, sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['ram'], sot.ram) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/database/v1/test_instance.py0000664000175000017500000001052700000000000025653 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.database.v1 import instance from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'flavor': '1', 'id': IDENTIFIER, 'links': '3', 'name': '4', 'status': '5', 'volume': '6', 'datastore': {'7': 'seven'}, 'region': '8', 'hostname': '9', 'created': '10', 'updated': '11', } class TestInstance(base.TestCase): def test_basic(self): sot = instance.Instance() self.assertEqual('instance', sot.resource_key) self.assertEqual('instances', sot.resources_key) self.assertEqual('/instances', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = instance.Instance(**EXAMPLE) self.assertEqual(EXAMPLE['flavor'], sot.flavor) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['volume'], sot.volume) self.assertEqual(EXAMPLE['datastore'], sot.datastore) self.assertEqual(EXAMPLE['region'], sot.region) self.assertEqual(EXAMPLE['hostname'], sot.hostname) self.assertEqual(EXAMPLE['created'], sot.created_at) self.assertEqual(EXAMPLE['updated'], sot.updated_at) def test_enable_root_user(self): sot = instance.Instance(**EXAMPLE) response = mock.Mock() response.body = {'user': {'name': 'root', 'password': 'foo'}} response.json = mock.Mock(return_value=response.body) sess = mock.Mock() sess.post = mock.Mock(return_value=response) self.assertEqual(response.body['user'], sot.enable_root_user(sess)) url = "instances/%s/root" % IDENTIFIER sess.post.assert_called_with( url, ) def test_is_root_enabled(self): sot = instance.Instance(**EXAMPLE) response = mock.Mock() response.body = {'rootEnabled': True} response.json = mock.Mock(return_value=response.body) sess = mock.Mock() sess.get = mock.Mock(return_value=response) self.assertTrue(sot.is_root_enabled(sess)) url = "instances/%s/root" % IDENTIFIER sess.get.assert_called_with( url, ) def test_action_restart(self): sot = instance.Instance(**EXAMPLE) response = mock.Mock() response.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=response) self.assertIsNone(sot.restart(sess)) url = "instances/%s/action" % IDENTIFIER body = {'restart': None} sess.post.assert_called_with(url, json=body) def test_action_resize(self): sot = instance.Instance(**EXAMPLE) response = mock.Mock() response.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=response) flavor = 'http://flavor/flav' self.assertIsNone(sot.resize(sess, flavor)) url = "instances/%s/action" % IDENTIFIER body = {'resize': {'flavorRef': flavor}} sess.post.assert_called_with(url, json=body) def test_action_resize_volume(self): sot = instance.Instance(**EXAMPLE) response = mock.Mock() response.json = mock.Mock(return_value='') sess = mock.Mock() sess.post = mock.Mock(return_value=response) size = 4 self.assertIsNone(sot.resize_volume(sess, size)) url = "instances/%s/action" % IDENTIFIER body = {'resize': {'volume': size}} sess.post.assert_called_with(url, json=body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/database/v1/test_proxy.py0000664000175000017500000001161700000000000025231 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.database.v1 import _proxy from openstack.database.v1 import database from openstack.database.v1 import flavor from openstack.database.v1 import instance from openstack.database.v1 import user from openstack.tests.unit import test_proxy_base class TestDatabaseProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) def test_database_create_attrs(self): self.verify_create( self.proxy.create_database, database.Database, method_kwargs={"instance": "id"}, expected_kwargs={"instance_id": "id"}, ) def test_database_delete(self): self.verify_delete( self.proxy.delete_database, database.Database, ignore_missing=False, method_kwargs={"instance": "test_id"}, expected_kwargs={"instance_id": "test_id"}, ) def test_database_delete_ignore(self): self.verify_delete( self.proxy.delete_database, database.Database, ignore_missing=True, method_kwargs={"instance": "test_id"}, expected_kwargs={"instance_id": "test_id"}, ) def test_database_find(self): self._verify( 'openstack.proxy.Proxy._find', self.proxy.find_database, method_args=["db", "instance"], expected_args=[database.Database, "db"], expected_kwargs={ "instance_id": "instance", "ignore_missing": True, }, ) def test_databases(self): self.verify_list( self.proxy.databases, database.Database, method_args=["id"], expected_args=[], expected_kwargs={"instance_id": "id"}, ) def test_database_get(self): self.verify_get(self.proxy.get_database, database.Database) def test_flavor_find(self): self.verify_find(self.proxy.find_flavor, flavor.Flavor) def test_flavor_get(self): self.verify_get(self.proxy.get_flavor, flavor.Flavor) def test_flavors(self): self.verify_list(self.proxy.flavors, flavor.Flavor) def test_instance_create_attrs(self): self.verify_create(self.proxy.create_instance, instance.Instance) def test_instance_delete(self): self.verify_delete( self.proxy.delete_instance, instance.Instance, False ) def test_instance_delete_ignore(self): self.verify_delete(self.proxy.delete_instance, instance.Instance, True) def test_instance_find(self): self.verify_find(self.proxy.find_instance, instance.Instance) def test_instance_get(self): self.verify_get(self.proxy.get_instance, instance.Instance) def test_instances(self): self.verify_list(self.proxy.instances, instance.Instance) def test_instance_update(self): self.verify_update(self.proxy.update_instance, instance.Instance) def test_user_create_attrs(self): self.verify_create( self.proxy.create_user, user.User, method_kwargs={"instance": "id"}, expected_kwargs={"instance_id": "id"}, ) def test_user_delete(self): self.verify_delete( self.proxy.delete_user, user.User, False, method_kwargs={"instance": "id"}, expected_kwargs={"instance_id": "id"}, ) def test_user_delete_ignore(self): self.verify_delete( self.proxy.delete_user, user.User, True, method_kwargs={"instance": "id"}, expected_kwargs={"instance_id": "id"}, ) def test_user_find(self): self._verify( 'openstack.proxy.Proxy._find', self.proxy.find_user, method_args=["user", "instance"], expected_args=[user.User, "user"], expected_kwargs={ "instance_id": "instance", "ignore_missing": True, }, ) def test_users(self): self.verify_list( self.proxy.users, user.User, method_args=["test_instance"], expected_args=[], expected_kwargs={"instance_id": "test_instance"}, ) def test_user_get(self): self.verify_get(self.proxy.get_user, user.User) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/database/v1/test_user.py0000664000175000017500000000331600000000000025023 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.database.v1 import user from openstack.tests.unit import base INSTANCE_ID = 'INSTANCE_ID' CREATING = { 'databases': '1', 'name': '2', 'password': '3', } class TestUser(base.TestCase): def test_basic(self): sot = user.User() self.assertEqual('user', sot.resource_key) self.assertEqual('users', sot.resources_key) self.assertEqual('/instances/%(instance_id)s/users', sot.base_path) self.assertTrue(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make(self): sot = user.User(**CREATING) self.assertEqual(CREATING['name'], sot.id) self.assertEqual(CREATING['databases'], sot.databases) self.assertEqual(CREATING['name'], sot.name) self.assertEqual(CREATING['name'], sot.id) self.assertEqual(CREATING['password'], sot.password) def test_create(self): sot = user.User(instance_id=INSTANCE_ID, **CREATING) result = sot._prepare_request() self.assertEqual(result.body, {sot.resources_key: CREATING}) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.441408 openstacksdk-4.0.0/openstack/tests/unit/dns/0000775000175000017500000000000000000000000021123 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/dns/__init__.py0000664000175000017500000000000000000000000023222 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/dns/test_version.py0000664000175000017500000000257700000000000024234 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.dns import version from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'links': '2', 'status': '3', } class TestVersion(base.TestCase): def test_basic(self): sot = version.Version() self.assertEqual('version', sot.resource_key) self.assertEqual('versions', sot.resources_key) self.assertEqual('/', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = version.Version(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['status'], sot.status) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4454098 openstacksdk-4.0.0/openstack/tests/unit/dns/v2/0000775000175000017500000000000000000000000021452 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/dns/v2/__init__.py0000664000175000017500000000000000000000000023551 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/dns/v2/test_floating_ip.py0000664000175000017500000000363400000000000025364 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.dns.v2 import floating_ip as fip from openstack.tests.unit import base IDENTIFIER = 'RegionOne:id' EXAMPLE = { 'status': 'PENDING', 'ptrdname': 'smtp.example.com.', 'description': 'This is a floating ip for 127.0.0.1', 'links': {'self': 'dummylink/reverse/floatingips/RegionOne:id'}, 'ttl': 600, 'address': '172.24.4.10', 'action': 'CREATE', 'id': IDENTIFIER, } class TestFloatingIP(base.TestCase): def test_basic(self): sot = fip.FloatingIP() self.assertEqual(None, sot.resource_key) self.assertEqual('floatingips', sot.resources_key) self.assertEqual('/reverse/floatingips', sot.base_path) self.assertTrue(sot.allow_list) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertEqual('PATCH', sot.commit_method) def test_make_it(self): sot = fip.FloatingIP(**EXAMPLE) self.assertEqual(IDENTIFIER, sot.id) self.assertEqual(EXAMPLE['ptrdname'], sot.ptrdname) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['ttl'], sot.ttl) self.assertEqual(EXAMPLE['address'], sot.address) self.assertEqual(EXAMPLE['action'], sot.action) self.assertEqual(EXAMPLE['status'], sot.status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/dns/v2/test_proxy.py0000664000175000017500000002320300000000000024244 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.dns.v2 import _proxy from openstack.dns.v2 import floating_ip from openstack.dns.v2 import recordset from openstack.dns.v2 import zone from openstack.dns.v2 import zone_export from openstack.dns.v2 import zone_import from openstack.dns.v2 import zone_share from openstack.dns.v2 import zone_transfer from openstack.tests.unit import test_proxy_base class TestDnsProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) class TestDnsZone(TestDnsProxy): def test_zone_create(self): self.verify_create( self.proxy.create_zone, zone.Zone, method_kwargs={'name': 'id'}, expected_kwargs={'name': 'id', 'prepend_key': False}, ) def test_zone_delete(self): self.verify_delete( self.proxy.delete_zone, zone.Zone, True, expected_kwargs={'ignore_missing': True, 'delete_shares': False}, ) def test_zone_find(self): self.verify_find(self.proxy.find_zone, zone.Zone) def test_zone_get(self): self.verify_get(self.proxy.get_zone, zone.Zone) def test_zones(self): self.verify_list(self.proxy.zones, zone.Zone) def test_zone_update(self): self.verify_update(self.proxy.update_zone, zone.Zone) def test_zone_abandon(self): self._verify( "openstack.dns.v2.zone.Zone.abandon", self.proxy.abandon_zone, method_args=[{'zone': 'id'}], expected_args=[self.proxy], ) def test_zone_xfr(self): self._verify( "openstack.dns.v2.zone.Zone.xfr", self.proxy.xfr_zone, method_args=[{'zone': 'id'}], expected_args=[self.proxy], ) class TestDnsRecordset(TestDnsProxy): def test_recordset_create(self): self.verify_create( self.proxy.create_recordset, recordset.Recordset, method_kwargs={'zone': 'id'}, expected_kwargs={'zone_id': 'id', 'prepend_key': False}, ) def test_recordset_delete(self): self.verify_delete( self.proxy.delete_recordset, recordset.Recordset, True ) def test_recordset_update(self): self.verify_update(self.proxy.update_recordset, recordset.Recordset) def test_recordset_get(self): self.verify_get( self.proxy.get_recordset, recordset.Recordset, method_kwargs={'zone': 'zid'}, expected_kwargs={'zone_id': 'zid'}, ) def test_recordsets(self): self.verify_list( self.proxy.recordsets, recordset.Recordset, expected_kwargs={'base_path': '/recordsets'}, ) def test_recordsets_zone(self): self.verify_list( self.proxy.recordsets, recordset.Recordset, method_kwargs={'zone': 'zid'}, expected_kwargs={'zone_id': 'zid'}, ) def test_recordset_find(self): self._verify( "openstack.proxy.Proxy._find", self.proxy.find_recordset, method_args=['zone', 'rs'], method_kwargs={}, expected_args=[recordset.Recordset, 'rs'], expected_kwargs={'ignore_missing': True, 'zone_id': 'zone'}, ) class TestDnsFloatIP(TestDnsProxy): def test_floating_ips(self): self.verify_list(self.proxy.floating_ips, floating_ip.FloatingIP) def test_floating_ip_get(self): self.verify_get(self.proxy.get_floating_ip, floating_ip.FloatingIP) def test_floating_ip_update(self): self.verify_update( self.proxy.update_floating_ip, floating_ip.FloatingIP ) def test_floating_ip_unset(self): self._verify( 'openstack.proxy.Proxy._update', self.proxy.unset_floating_ip, method_args=['value'], method_kwargs={}, expected_args=[floating_ip.FloatingIP, 'value'], expected_kwargs={'ptrdname': None}, ) class TestDnsZoneImport(TestDnsProxy): def test_zone_import_delete(self): self.verify_delete( self.proxy.delete_zone_import, zone_import.ZoneImport, True ) def test_zone_import_get(self): self.verify_get(self.proxy.get_zone_import, zone_import.ZoneImport) def test_zone_imports(self): self.verify_list(self.proxy.zone_imports, zone_import.ZoneImport) def test_zone_import_create(self): self.verify_create( self.proxy.create_zone_import, zone_import.ZoneImport, method_kwargs={'name': 'id'}, expected_kwargs={'name': 'id', 'prepend_key': False}, ) class TestDnsZoneExport(TestDnsProxy): def test_zone_export_delete(self): self.verify_delete( self.proxy.delete_zone_export, zone_export.ZoneExport, True ) def test_zone_export_get(self): self.verify_get(self.proxy.get_zone_export, zone_export.ZoneExport) def test_zone_export_get_text(self): self.verify_get( self.proxy.get_zone_export_text, zone_export.ZoneExport, method_args=[{'id': 'zone_export_id_value'}], expected_kwargs={'base_path': '/zones/tasks/export/%(id)s/export'}, ) def test_zone_exports(self): self.verify_list(self.proxy.zone_exports, zone_export.ZoneExport) def test_zone_export_create(self): self.verify_create( self.proxy.create_zone_export, zone_export.ZoneExport, method_args=[{'id': 'zone_id_value'}], method_kwargs={'name': 'id'}, expected_args=[], expected_kwargs={ 'name': 'id', 'zone_id': 'zone_id_value', 'prepend_key': False, }, ) class TestDnsZoneTransferRequest(TestDnsProxy): def test_zone_transfer_request_delete(self): self.verify_delete( self.proxy.delete_zone_transfer_request, zone_transfer.ZoneTransferRequest, True, ) def test_zone_transfer_request_get(self): self.verify_get( self.proxy.get_zone_transfer_request, zone_transfer.ZoneTransferRequest, ) def test_zone_transfer_requests(self): self.verify_list( self.proxy.zone_transfer_requests, zone_transfer.ZoneTransferRequest, ) def test_zone_transfer_request_create(self): self.verify_create( self.proxy.create_zone_transfer_request, zone_transfer.ZoneTransferRequest, method_args=[{'id': 'zone_id_value'}], method_kwargs={'name': 'id'}, expected_args=[], expected_kwargs={ 'name': 'id', 'zone_id': 'zone_id_value', 'prepend_key': False, }, ) def test_zone_transfer_request_update(self): self.verify_update( self.proxy.update_zone_transfer_request, zone_transfer.ZoneTransferRequest, ) class TestDnsZoneTransferAccept(TestDnsProxy): def test_zone_transfer_accept_get(self): self.verify_get( self.proxy.get_zone_transfer_accept, zone_transfer.ZoneTransferAccept, ) def test_zone_transfer_accepts(self): self.verify_list( self.proxy.zone_transfer_accepts, zone_transfer.ZoneTransferAccept ) def test_zone_transfer_accept_create(self): self.verify_create( self.proxy.create_zone_transfer_accept, zone_transfer.ZoneTransferAccept, ) class TestDnsZoneShare(TestDnsProxy): def test_zone_share_create(self): self.verify_create( self.proxy.create_zone_share, zone_share.ZoneShare, method_kwargs={'zone': 'bogus_id'}, expected_kwargs={'zone_id': 'bogus_id'}, ) def test_zone_share_delete(self): self.verify_delete( self.proxy.delete_zone_share, zone_share.ZoneShare, ignore_missing=True, method_args={'zone': 'bogus_id', 'zone_share': 'bogus_id'}, expected_args=['zone_share'], expected_kwargs={'zone_id': 'zone', 'ignore_missing': True}, ) def test_zone_share_find(self): self.verify_find( self.proxy.find_zone_share, zone_share.ZoneShare, method_args=['zone'], expected_args=['zone'], expected_kwargs={ 'zone_id': 'resource_name', 'ignore_missing': True, }, ) def test_zone_share_get(self): self.verify_get( self.proxy.get_zone_share, zone_share.ZoneShare, method_args=['zone', 'zone_share'], expected_args=['zone_share'], expected_kwargs={'zone_id': 'zone'}, ) def test_zone_shares(self): self.verify_list( self.proxy.zone_shares, zone_share.ZoneShare, method_args=['zone'], expected_args=[], expected_kwargs={'zone_id': 'zone'}, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/dns/v2/test_recordset.py0000664000175000017500000000450500000000000025061 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.dns.v2 import recordset from openstack.tests.unit import base IDENTIFIER = 'NAME' EXAMPLE = { 'description': 'This is an example record set.', 'updated_at': None, 'records': ['10.1.0.2'], 'ttl': 3600, 'id': IDENTIFIER, 'name': 'example.org.', 'project_id': '4335d1f0-f793-11e2-b778-0800200c9a66', 'zone_id': '2150b1bf-dee2-4221-9d85-11f7886fb15f', 'zone_name': 'example.com.', 'created_at': '2014-10-24T19:59:44.000000', 'version': 1, 'type': 'A', 'status': 'ACTIVE', 'action': 'NONE', } class TestRecordset(base.TestCase): def test_basic(self): sot = recordset.Recordset() self.assertIsNone(sot.resource_key) self.assertEqual('recordsets', sot.resources_key) self.assertEqual('/zones/%(zone_id)s/recordsets', sot.base_path) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertDictEqual( { 'data': 'data', 'description': 'description', 'limit': 'limit', 'marker': 'marker', 'name': 'name', 'status': 'status', 'ttl': 'ttl', 'type': 'type', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = recordset.Recordset(**EXAMPLE) self.assertEqual(IDENTIFIER, sot.id) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['ttl'], sot.ttl) self.assertEqual(EXAMPLE['type'], sot.type) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['status'], sot.status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/dns/v2/test_zone.py0000664000175000017500000000560000000000000024037 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.dns.v2 import zone from openstack.tests.unit import base IDENTIFIER = 'NAME' EXAMPLE = { 'attributes': {'tier': 'gold', 'ha': 'true'}, 'id': IDENTIFIER, 'name': 'test.org', 'email': 'joe@example.org', 'type': 'PRIMARY', 'ttl': 7200, 'description': 'This is an example zone.', 'status': 'ACTIVE', 'shared': False, } class TestZone(base.TestCase): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) self.resp.status_code = 200 self.sess = mock.Mock(spec=adapter.Adapter) self.sess.post = mock.Mock(return_value=self.resp) self.sess.default_microversion = None def test_basic(self): sot = zone.Zone() self.assertEqual(None, sot.resource_key) self.assertEqual('zones', sot.resources_key) self.assertEqual('/zones', sot.base_path) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertEqual('PATCH', sot.commit_method) self.assertDictEqual( { 'description': 'description', 'email': 'email', 'limit': 'limit', 'marker': 'marker', 'name': 'name', 'status': 'status', 'ttl': 'ttl', 'type': 'type', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = zone.Zone(**EXAMPLE) self.assertEqual(IDENTIFIER, sot.id) self.assertEqual(EXAMPLE['email'], sot.email) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['ttl'], sot.ttl) self.assertEqual(EXAMPLE['type'], sot.type) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['shared'], sot.is_shared) def test_abandon(self): sot = zone.Zone(**EXAMPLE) self.assertIsNone(sot.abandon(self.sess)) self.sess.post.assert_called_with( 'zones/NAME/tasks/abandon', json=None ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/dns/v2/test_zone_export.py0000664000175000017500000000612000000000000025436 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.dns.v2 import zone_export from openstack.tests.unit import base IDENTIFIER = '074e805e-fe87-4cbb-b10b-21a06e215d41' EXAMPLE = { 'status': 'COMPLETE', 'zone_id': '6625198b-d67d-47dc-8d29-f90bd60f3ac4', 'links': { 'self': 'http://127.0.0.1:9001/v2/zones/tasks/exports/074e805e-f', 'href': 'http://127.0.0.1:9001/v2/zones/6625198b-d67d-', }, 'created_at': '2015-05-08T15:43:42.000000', 'updated_at': '2015-05-08T15:43:43.000000', 'version': 2, 'location': 'designate://v2/zones/tasks/exports/8ec17fe1/export', 'message': 'example.com. exported', 'project_id': 'noauth-project', 'id': IDENTIFIER, } @mock.patch.object(zone_export.ZoneExport, '_translate_response', mock.Mock()) class TestZoneExport(base.TestCase): def test_basic(self): sot = zone_export.ZoneExport() self.assertEqual('', sot.resource_key) self.assertEqual('exports', sot.resources_key) self.assertEqual('/zones/tasks/export', sot.base_path) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'message': 'message', 'status': 'status', 'zone_id': 'zone_id', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = zone_export.ZoneExport(**EXAMPLE) self.assertEqual(IDENTIFIER, sot.id) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) self.assertEqual(EXAMPLE['version'], sot.version) self.assertEqual(EXAMPLE['message'], sot.message) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['zone_id'], sot.zone_id) def test_create(self): sot = zone_export.ZoneExport() response = mock.Mock() response.json = mock.Mock(return_value='') self.session = mock.Mock(spec=adapter.Adapter) self.session.default_microversion = '1.1' sot.create(self.session) self.session.post.assert_called_once_with( mock.ANY, json=None, headers=None, microversion=self.session.default_microversion, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/dns/v2/test_zone_import.py0000664000175000017500000000604100000000000025431 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.dns.v2 import zone_import from openstack.tests.unit import base IDENTIFIER = '074e805e-fe87-4cbb-b10b-21a06e215d41' EXAMPLE = { 'status': 'COMPLETE', 'zone_id': '6625198b-d67d-47dc-8d29-f90bd60f3ac4', 'links': { 'self': 'http://127.0.0.1:9001/v2/zones/tasks/imports/074e805e-f', 'href': 'http://127.0.0.1:9001/v2/zones/6625198b-d67d-', }, 'created_at': '2015-05-08T15:43:42.000000', 'updated_at': '2015-05-08T15:43:43.000000', 'version': 2, 'message': 'example.com. imported', 'project_id': 'noauth-project', 'id': IDENTIFIER, } @mock.patch.object(zone_import.ZoneImport, '_translate_response', mock.Mock()) class TestZoneImport(base.TestCase): def test_basic(self): sot = zone_import.ZoneImport() self.assertEqual('', sot.resource_key) self.assertEqual('imports', sot.resources_key) self.assertEqual('/zones/tasks/import', sot.base_path) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'message': 'message', 'status': 'status', 'zone_id': 'zone_id', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = zone_import.ZoneImport(**EXAMPLE) self.assertEqual(IDENTIFIER, sot.id) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) self.assertEqual(EXAMPLE['version'], sot.version) self.assertEqual(EXAMPLE['message'], sot.message) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['zone_id'], sot.zone_id) def test_create(self): sot = zone_import.ZoneImport() response = mock.Mock() response.json = mock.Mock(return_value='') self.session = mock.Mock(spec=adapter.Adapter) self.session.default_microversion = '1.1' sot.create(self.session) self.session.post.assert_called_once_with( mock.ANY, json=None, headers={'content-type': 'text/dns'}, microversion=self.session.default_microversion, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/dns/v2/test_zone_share.py0000664000175000017500000000450300000000000025222 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.dns.v2 import zone_share from openstack.tests.unit import base class TestZoneShare(base.TestCase): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) self.resp.status_code = 200 self.sess = mock.Mock(spec=adapter.Adapter) self.sess.post = mock.Mock(return_value=self.resp) self.sess.default_microversion = None def test_basic(self): sot = zone_share.ZoneShare() self.assertEqual(None, sot.resource_key) self.assertEqual('shared_zones', sot.resources_key) self.assertEqual('/zones/%(zone_id)s/shares', sot.base_path) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertFalse(sot.allow_commit) self.assertDictEqual( { 'target_project_id': 'target_project_id', 'limit': 'limit', 'marker': 'marker', }, sot._query_mapping._mapping, ) def test_make_it(self): share_id = 'bogus_id' zone_id = 'bogus_zone_id' project_id = 'bogus_project_id' target_id = 'bogus_target_id' expected = { 'id': share_id, 'zone_id': zone_id, 'project_id': project_id, 'target_project_id': target_id, } sot = zone_share.ZoneShare(**expected) self.assertEqual(share_id, sot.id) self.assertEqual(zone_id, sot.zone_id) self.assertEqual(project_id, sot.project_id) self.assertEqual(target_id, sot.target_project_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/dns/v2/test_zone_transfer.py0000664000175000017500000001013400000000000025741 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.dns.v2 import zone_transfer from openstack.tests.unit import base IDENTIFIER = '074e805e-fe87-4cbb-b10b-21a06e215d41' EXAMPLE_REQUEST = { 'created_at': '2014-07-17T20:34:40.882579', 'description': 'some description', 'id': IDENTIFIER, 'key': '9Z2R50Y0', 'project_id': '1', 'status': 'ACTIVE', 'target_project_id': '123456', 'updated_at': None, 'zone_id': '6b78734a-aef1-45cd-9708-8eb3c2d26ff8', 'zone_name': 'qa.dev.example.com.', } EXAMPLE_ACCEPT = { 'status': 'COMPLETE', 'zone_id': 'b4542f5a-f1ea-4ec1-b850-52db9dc3f465', 'created_at': '2016-06-22 06:13:55', 'updated_at': 'null', 'key': 'FUGXMZ5N', 'project_id': '2e43de7ce3504a8fb90a45382532c37e', 'id': IDENTIFIER, 'zone_transfer_request_id': '794fdf58-6e1d-41da-8b2d-16b6d10c8827', } class TestZoneTransferRequest(base.TestCase): def test_basic(self): sot = zone_transfer.ZoneTransferRequest() # self.assertEqual('', sot.resource_key) self.assertEqual('transfer_requests', sot.resources_key) self.assertEqual('/zones/tasks/transfer_requests', sot.base_path) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertDictEqual( {'limit': 'limit', 'marker': 'marker', 'status': 'status'}, sot._query_mapping._mapping, ) def test_make_it(self): sot = zone_transfer.ZoneTransferRequest(**EXAMPLE_REQUEST) self.assertEqual(IDENTIFIER, sot.id) self.assertEqual(EXAMPLE_REQUEST['created_at'], sot.created_at) self.assertEqual(EXAMPLE_REQUEST['updated_at'], sot.updated_at) self.assertEqual(EXAMPLE_REQUEST['description'], sot.description) self.assertEqual(EXAMPLE_REQUEST['key'], sot.key) self.assertEqual(EXAMPLE_REQUEST['project_id'], sot.project_id) self.assertEqual(EXAMPLE_REQUEST['status'], sot.status) self.assertEqual( EXAMPLE_REQUEST['target_project_id'], sot.target_project_id ) self.assertEqual(EXAMPLE_REQUEST['zone_id'], sot.zone_id) self.assertEqual(EXAMPLE_REQUEST['zone_name'], sot.zone_name) class TestZoneTransferAccept(base.TestCase): def test_basic(self): sot = zone_transfer.ZoneTransferAccept() # self.assertEqual('', sot.resource_key) self.assertEqual('transfer_accepts', sot.resources_key) self.assertEqual('/zones/tasks/transfer_accepts', sot.base_path) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertDictEqual( {'limit': 'limit', 'marker': 'marker', 'status': 'status'}, sot._query_mapping._mapping, ) def test_make_it(self): sot = zone_transfer.ZoneTransferAccept(**EXAMPLE_ACCEPT) self.assertEqual(IDENTIFIER, sot.id) self.assertEqual(EXAMPLE_ACCEPT['created_at'], sot.created_at) self.assertEqual(EXAMPLE_ACCEPT['updated_at'], sot.updated_at) self.assertEqual(EXAMPLE_ACCEPT['key'], sot.key) self.assertEqual(EXAMPLE_ACCEPT['project_id'], sot.project_id) self.assertEqual(EXAMPLE_ACCEPT['status'], sot.status) self.assertEqual(EXAMPLE_ACCEPT['zone_id'], sot.zone_id) self.assertEqual( EXAMPLE_ACCEPT['zone_transfer_request_id'], sot.zone_transfer_request_id, ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4454098 openstacksdk-4.0.0/openstack/tests/unit/fake/0000775000175000017500000000000000000000000021245 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fake/__init__.py0000664000175000017500000000000000000000000023344 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fake/fake_service.py0000664000175000017500000000157100000000000024251 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import service_description from openstack.tests.unit.fake.v1 import _proxy as _proxy_1 from openstack.tests.unit.fake.v2 import _proxy as _proxy_2 class FakeService(service_description.ServiceDescription): """The fake service.""" supported_versions = { '1': _proxy_1.Proxy, '2': _proxy_2.Proxy, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4454098 openstacksdk-4.0.0/openstack/tests/unit/fake/v1/0000775000175000017500000000000000000000000021573 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fake/v1/__init__.py0000664000175000017500000000000000000000000023672 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fake/v1/_proxy.py0000664000175000017500000000123500000000000023466 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import proxy class Proxy(proxy.Proxy): skip_discovery = True def dummy(self): return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fake/v1/fake.py0000664000175000017500000000240000000000000023047 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Fake(resource.Resource): resource_key = "resource" resources_key = "resources" base_path = "/fake" allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_head = True #: The transaction date and time. timestamp = resource.Header("x-timestamp") #: The name of this resource. name = resource.Body("name", alternate_id=True) #: The value of the resource. Also available in headers. value = resource.Body("value", alias="x-resource-value") #: Is this resource cool? If so, set it to True. #: This is a multi-line comment about cool stuff. cool = resource.Body("cool", type=bool) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4454098 openstacksdk-4.0.0/openstack/tests/unit/fake/v2/0000775000175000017500000000000000000000000021574 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fake/v2/__init__.py0000664000175000017500000000000000000000000023673 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fake/v2/_proxy.py0000664000175000017500000000123600000000000023470 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import proxy class Proxy(proxy.Proxy): skip_discovery = True def dummy(self): return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fake/v2/fake.py0000664000175000017500000000240000000000000023050 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Fake(resource.Resource): resource_key = "resource" resources_key = "resources" base_path = "/fake" allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_head = True #: The transaction date and time. timestamp = resource.Header("x-timestamp") #: The name of this resource. name = resource.Body("name", alternate_id=True) #: The value of the resource. Also available in headers. value = resource.Body("value", alias="x-resource-value") #: Is this resource cool? If so, set it to True. #: This is a multi-line comment about cool stuff. cool = resource.Body("cool", type=bool) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fakes.py0000664000175000017500000000321300000000000022001 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from unittest import mock import requests class FakeTransport(mock.Mock): RESPONSE = mock.Mock('200 OK') def __init__(self): super().__init__() self.request = mock.Mock() self.request.return_value = self.RESPONSE class FakeAuthenticator(mock.Mock): TOKEN = 'fake_token' ENDPOINT = 'http://www.example.com/endpoint' def __init__(self): super().__init__() self.get_token = mock.Mock() self.get_token.return_value = self.TOKEN self.get_endpoint = mock.Mock() self.get_endpoint.return_value = self.ENDPOINT class FakeResponse(requests.Response): def __init__( self, headers=None, status_code=200, data=None, encoding=None ): super().__init__() headers = headers or {} self.status_code = status_code self.headers.update(headers) self._content = json.dumps(data) if not isinstance(self._content, bytes): self._content = self._content.encode() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4534137 openstacksdk-4.0.0/openstack/tests/unit/fixtures/0000775000175000017500000000000000000000000022210 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/accelerator.json0000664000175000017500000000132200000000000025365 0ustar00zuulzuul00000000000000{ "versions": [ { "id": "2.0", "links": [ { "href": "/v2/", "rel": "self" }, { "href": "https://accelerator.example.com/api-ref/accelerator", "rel": "help" } ], "max_version": "2.0", "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.accelerator-v1+json" } ], "min_version": "2.0", "status": "CURRENT", "updated": "2019-09-01T00:00:00Z" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/bad-glance-version.json0000664000175000017500000000036200000000000026544 0ustar00zuulzuul00000000000000{ "versions": [ { "status": "CURRENT", "updated": "2013-07-23T11:33:21Z", "links": [ { "href": "https://example.com/image/v7/", "rel": "self" } ], "id": "v7" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/bad-placement.json0000664000175000017500000000030200000000000025572 0ustar00zuulzuul00000000000000{ "versions": [ { "id": "v1.0", "links": [{"href": "", "rel": "self"}], "max_version": "1.17", "min_version": "1.0" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/baremetal.json0000664000175000017500000000115100000000000025035 0ustar00zuulzuul00000000000000{ "default_version": { "id": "v1", "links": [ { "href": "https://baremetal.example.com/v1/", "rel": "self" } ], "min_version": "1.1", "status": "CURRENT", "version": "1.33" }, "description": "Ironic is an OpenStack project which aims to provision baremetal machines.", "name": "OpenStack Ironic API", "versions": [ { "id": "v1", "links": [ { "href": "https://baremetal.example.com/v1/", "rel": "self" } ], "min_version": "1.1", "status": "CURRENT", "version": "1.33" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/block-storage-version.json0000664000175000017500000000111300000000000027316 0ustar00zuulzuul00000000000000{ "versions": [ { "status": "CURRENT", "updated": "2017-02-25T12:00:00Z", "links": [ { "href": "https://docs.openstack.org/", "type": "text/html", "rel": "describedby" }, { "href": "https://volume.example.com/v3/", "rel": "self" } ], "min_version": "3.0", "version": "3.0", "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.volume+json;version=3" } ], "id": "v3.0" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4534137 openstacksdk-4.0.0/openstack/tests/unit/fixtures/clouds/0000775000175000017500000000000000000000000023501 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/clouds/clouds.yaml0000664000175000017500000000123700000000000025661 0ustar00zuulzuul00000000000000clouds: _test_cloud_: auth: auth_url: https://identity.example.com password: password project_name: admin username: admin user_domain_name: default project_domain_name: default region_name: RegionOne _test_cloud_v2_: auth: auth_url: https://identity.example.com password: password project_name: admin username: admin identity_api_version: '2.0' region_name: RegionOne _bogus_test_: auth_type: bogus auth: auth_url: https://identity.example.com/v2.0 username: _test_user_ password: _test_pass_ project_name: _test_project_ region_name: _test_region_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/clouds/clouds_cache.yaml0000664000175000017500000000137100000000000027003 0ustar00zuulzuul00000000000000cache: max_age: 90 class: dogpile.cache.memory expiration: server: 1 port: 1 clouds: _test_cloud_: auth: auth_url: https://identity.example.com password: password project_name: admin username: admin user_domain_name: default project_domain_name: default region_name: RegionOne _test_cloud_v2_: auth: auth_url: https://identity.example.com password: password project_name: admin username: admin identity_api_version: '2.0' region_name: RegionOne _bogus_test_: auth_type: bogus auth: auth_url: http://identity.example.com/v2.0 username: _test_user_ password: _test_pass_ project_name: _test_project_ region_name: _test_region_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/clustering.json0000664000175000017500000000131700000000000025264 0ustar00zuulzuul00000000000000{ "versions": [ { "id": "1.0", "links": [ { "href": "/v1/", "rel": "self" }, { "href": "https://clustering.example.com/api-ref/clustering", "rel": "help" } ], "max_version": "1.7", "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.clustering-v1+json" } ], "min_version": "1.0", "status": "CURRENT", "updated": "2016-01-18T00:00:00Z" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/compute-version.json0000664000175000017500000000107500000000000026245 0ustar00zuulzuul00000000000000{ "versions": [ { "status": "SUPPORTED", "updated": "2011-01-21T11:33:21Z", "links": [ { "href": "https://compute.example.com/v2/", "rel": "self" } ], "min_version": "", "version": "", "id": "v2.0" }, { "status": "CURRENT", "updated": "2013-07-23T11:33:21Z", "links": [ { "href": "https://compute.example.com/v2.1/", "rel": "self" } ], "min_version": "2.10", "version": "2.53", "id": "v2.1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/discovery.json0000664000175000017500000000176200000000000025120 0ustar00zuulzuul00000000000000{ "versions": { "values": [ { "status": "stable", "updated": "2016-04-04T00:00:00Z", "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.identity-v3+json" } ], "id": "v3.6", "links": [ { "href": "https://identity.example.com/v3/", "rel": "self" } ] }, { "status": "stable", "updated": "2014-04-17T00:00:00Z", "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.identity-v2.0+json" } ], "id": "v2.0", "links": [ { "href": "https://identity.example.com/v2.0/", "rel": "self" }, { "href": "http://docs.openstack.org/", "type": "text/html", "rel": "describedby" } ] } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/dns.json0000664000175000017500000000101100000000000023660 0ustar00zuulzuul00000000000000{ "versions": { "values": [{ "id": "v1", "links": [ { "href": "https://dns.example.com/v1", "rel": "self" } ], "status": "DEPRECATED" }, { "id": "v2", "links": [ { "href": "https://dns.example.com/v2", "rel": "self" } ], "status": "CURRENT" }] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/image-version-broken.json0000664000175000017500000000204300000000000027125 0ustar00zuulzuul00000000000000{ "versions": [ { "status": "CURRENT", "id": "v2.3", "links": [ { "href": "http://localhost/v2/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v2.2", "links": [ { "href": "http://localhost/v2/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v2.1", "links": [ { "href": "http://localhost/v2/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v2.0", "links": [ { "href": "http://localhost/v2/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v1.1", "links": [ { "href": "http://localhost/v1/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v1.0", "links": [ { "href": "http://localhost/v1/", "rel": "self" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/image-version-suburl.json0000664000175000017500000000212300000000000027160 0ustar00zuulzuul00000000000000{ "versions": [ { "status": "CURRENT", "id": "v2.3", "links": [ { "href": "http://example.com/image/v2/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v2.2", "links": [ { "href": "http://example.com/image/v2/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v2.1", "links": [ { "href": "http://example.com/image/v2/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v2.0", "links": [ { "href": "http://example.com/image/v2/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v1.1", "links": [ { "href": "http://example.com/image/v1/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v1.0", "links": [ { "href": "http://example.com/image/v1/", "rel": "self" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/image-version-v1.json0000664000175000017500000000057700000000000026205 0ustar00zuulzuul00000000000000{ "versions": [ { "status": "CURRENT", "id": "v1.1", "links": [ { "href": "http://image.example.com/v1/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v1.0", "links": [ { "href": "http://image.example.com/v1/", "rel": "self" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/image-version-v2.json0000664000175000017500000000135100000000000026175 0ustar00zuulzuul00000000000000{ "versions": [ { "status": "CURRENT", "id": "v2.3", "links": [ { "href": "http://image.example.com/v2/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v2.2", "links": [ { "href": "http://image.example.com/v2/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v2.1", "links": [ { "href": "http://image.example.com/v2/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v2.0", "links": [ { "href": "http://image.example.com/v2/", "rel": "self" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/image-version.json0000664000175000017500000000212300000000000025646 0ustar00zuulzuul00000000000000{ "versions": [ { "status": "CURRENT", "id": "v2.3", "links": [ { "href": "http://image.example.com/v2/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v2.2", "links": [ { "href": "http://image.example.com/v2/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v2.1", "links": [ { "href": "http://image.example.com/v2/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v2.0", "links": [ { "href": "http://image.example.com/v2/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v1.1", "links": [ { "href": "http://image.example.com/v1/", "rel": "self" } ] }, { "status": "SUPPORTED", "id": "v1.0", "links": [ { "href": "http://image.example.com/v1/", "rel": "self" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/old-compute-version.json0000664000175000017500000000107500000000000027021 0ustar00zuulzuul00000000000000{ "versions": [ { "status": "SUPPORTED", "updated": "2011-01-21T11:33:21Z", "links": [ { "href": "https://compute.example.com/v2/", "rel": "self" } ], "min_version": "", "version": "", "id": "v2.0" }, { "status": "CURRENT", "updated": "2013-07-23T11:33:21Z", "links": [ { "href": "https://compute.example.com/v2.1/", "rel": "self" } ], "min_version": "2.10", "version": "2.50", "id": "v2.1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/placement.json0000664000175000017500000000034300000000000025053 0ustar00zuulzuul00000000000000{ "versions": [ { "id": "v1.0", "links": [{"href": "", "rel": "self"}], "max_version": "1.17", "min_version": "1.0", "status": "CURRENT" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/fixtures/shared-file-system.json0000664000175000017500000000112500000000000026607 0ustar00zuulzuul00000000000000{ "versions": [ { "id": "v2.0", "status": "CURRENT", "version": "2.58", "min_version": "2.0", "updated": "2015-08-27T11:33:21Z", "links": [ { "rel": "describedby", "type": "text/html", "href": "http://docs.openstack.org/" }, { "rel": "self", "href": "https://shared-file-system.example.com/v2/" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.share+json;version=1" } ] } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4534137 openstacksdk-4.0.0/openstack/tests/unit/identity/0000775000175000017500000000000000000000000022170 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/__init__.py0000664000175000017500000000000000000000000024267 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/test_version.py0000664000175000017500000000424700000000000025275 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.identity import version from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'media-types': '2', 'status': '3', 'updated': '4', } class TestVersion(base.TestCase): def test_basic(self): sot = version.Version() self.assertEqual('version', sot.resource_key) self.assertEqual('versions', sot.resources_key) self.assertEqual('/', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = version.Version(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['media-types'], sot.media_types) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['updated'], sot.updated) def test_list(self): resp = mock.Mock() resp.body = { "versions": { "values": [ {"status": "stable", "updated": "a", "id": "v1.0"}, {"status": "stable", "updated": "b", "id": "v1.1"}, ] } } resp.json = mock.Mock(return_value=resp.body) session = mock.Mock() session.get = mock.Mock(return_value=resp) sot = version.Version(**EXAMPLE) result = sot.list(session) self.assertEqual(next(result).id, 'v1.0') self.assertEqual(next(result).id, 'v1.1') self.assertRaises(StopIteration, next, result) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4534137 openstacksdk-4.0.0/openstack/tests/unit/identity/v2/0000775000175000017500000000000000000000000022517 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v2/__init__.py0000664000175000017500000000000000000000000024616 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v2/test_extension.py0000664000175000017500000000446000000000000026150 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.identity.v2 import extension from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'alias': '1', 'description': '2', 'links': [], 'name': '4', 'namespace': '5', 'updated': '2015-03-09T12:14:57.233772', } class TestExtension(base.TestCase): def test_basic(self): sot = extension.Extension() self.assertEqual('extension', sot.resource_key) self.assertEqual('extensions', sot.resources_key) self.assertEqual('/extensions', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = extension.Extension(**EXAMPLE) self.assertEqual(EXAMPLE['alias'], sot.alias) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['namespace'], sot.namespace) self.assertEqual(EXAMPLE['updated'], sot.updated_at) def test_list(self): resp = mock.Mock() resp.body = { "extensions": { "values": [ {"name": "a"}, {"name": "b"}, ] } } resp.json = mock.Mock(return_value=resp.body) session = mock.Mock() session.get = mock.Mock(return_value=resp) sot = extension.Extension(**EXAMPLE) result = sot.list(session) self.assertEqual(next(result).name, 'a') self.assertEqual(next(result).name, 'b') self.assertRaises(StopIteration, next, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v2/test_proxy.py0000664000175000017500000000562700000000000025323 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v2 import _proxy from openstack.identity.v2 import role from openstack.identity.v2 import tenant from openstack.identity.v2 import user from openstack.tests.unit import test_proxy_base as test_proxy_base class TestIdentityProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) def test_role_create_attrs(self): self.verify_create(self.proxy.create_role, role.Role) def test_role_delete(self): self.verify_delete(self.proxy.delete_role, role.Role, False) def test_role_delete_ignore(self): self.verify_delete(self.proxy.delete_role, role.Role, True) def test_role_find(self): self.verify_find(self.proxy.find_role, role.Role) def test_role_get(self): self.verify_get(self.proxy.get_role, role.Role) def test_roles(self): self.verify_list(self.proxy.roles, role.Role) def test_role_update(self): self.verify_update(self.proxy.update_role, role.Role) def test_tenant_create_attrs(self): self.verify_create(self.proxy.create_tenant, tenant.Tenant) def test_tenant_delete(self): self.verify_delete(self.proxy.delete_tenant, tenant.Tenant, False) def test_tenant_delete_ignore(self): self.verify_delete(self.proxy.delete_tenant, tenant.Tenant, True) def test_tenant_find(self): self.verify_find(self.proxy.find_tenant, tenant.Tenant) def test_tenant_get(self): self.verify_get(self.proxy.get_tenant, tenant.Tenant) def test_tenants(self): self.verify_list(self.proxy.tenants, tenant.Tenant) def test_tenant_update(self): self.verify_update(self.proxy.update_tenant, tenant.Tenant) def test_user_create_attrs(self): self.verify_create(self.proxy.create_user, user.User) def test_user_delete(self): self.verify_delete(self.proxy.delete_user, user.User, False) def test_user_delete_ignore(self): self.verify_delete(self.proxy.delete_user, user.User, True) def test_user_find(self): self.verify_find(self.proxy.find_user, user.User) def test_user_get(self): self.verify_get(self.proxy.get_user, user.User) def test_users(self): self.verify_list(self.proxy.users, user.User) def test_user_update(self): self.verify_update(self.proxy.update_user, user.User) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v2/test_role.py0000664000175000017500000000270400000000000025074 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v2 import role from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'enabled': 'True', 'description': '1', 'id': IDENTIFIER, 'name': '3', } class TestRole(base.TestCase): def test_basic(self): sot = role.Role() self.assertEqual('role', sot.resource_key) self.assertEqual('roles', sot.resources_key) self.assertEqual('/OS-KSADM/roles', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = role.Role(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertTrue(sot.is_enabled) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v2/test_tenant.py0000664000175000017500000000270400000000000025424 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v2 import tenant from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'description': '1', 'enabled': True, 'id': '3', 'name': '4', } class TestTenant(base.TestCase): def test_basic(self): sot = tenant.Tenant() self.assertEqual('tenant', sot.resource_key) self.assertEqual('tenants', sot.resources_key) self.assertEqual('/tenants', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = tenant.Tenant(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertTrue(sot.is_enabled) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v2/test_user.py0000664000175000017500000000264000000000000025110 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v2 import user from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'email': '1', 'enabled': True, 'id': '3', 'name': '4', } class TestUser(base.TestCase): def test_basic(self): sot = user.User() self.assertEqual('user', sot.resource_key) self.assertEqual('users', sot.resources_key) self.assertEqual('/users', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = user.User(**EXAMPLE) self.assertEqual(EXAMPLE['email'], sot.email) self.assertTrue(sot.is_enabled) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4614174 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/0000775000175000017500000000000000000000000022520 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/__init__.py0000664000175000017500000000000000000000000024617 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_access_rule.py0000664000175000017500000000305700000000000026426 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import access_rule from openstack.tests.unit import base EXAMPLE = { "links": { "self": "https://example.com/identity/v3/access_rules" "/07d719df00f349ef8de77d542edf010c" }, "path": "/v2.1/servers/{server_id}/ips", "method": "GET", "service": "compute", } class TestAccessRule(base.TestCase): def test_basic(self): sot = access_rule.AccessRule() self.assertEqual('access_rule', sot.resource_key) self.assertEqual('access_rules', sot.resources_key) self.assertEqual('/users/%(user_id)s/access_rules', sot.base_path) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = access_rule.AccessRule(**EXAMPLE) self.assertEqual(EXAMPLE['path'], sot.path) self.assertEqual(EXAMPLE['method'], sot.method) self.assertEqual(EXAMPLE['service'], sot.service) self.assertEqual(EXAMPLE['links'], sot.links) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_application_credential.py0000664000175000017500000000446700000000000030641 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import application_credential from openstack.tests.unit import base EXAMPLE = { "user": {"id": "8ac43bb0926245cead88676a96c750d3"}, "name": 'monitoring', "secret": 'rEaqvJka48mpv', "roles": [{"name": "Reader"}], "access_rules": [ {"path": "/v2.0/metrics", "service": "monitoring", "method": "GET"}, ], "expires_at": '2018-02-27T18:30:59Z', "description": "Application credential for monitoring", "unrestricted": "False", "project_id": "3", "links": {"self": "http://example.com/v3/application_credential_1"}, } class TestApplicationCredential(base.TestCase): def test_basic(self): sot = application_credential.ApplicationCredential() self.assertEqual('application_credential', sot.resource_key) self.assertEqual('application_credentials', sot.resources_key) self.assertEqual( '/users/%(user_id)s/application_credentials', sot.base_path ) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = application_credential.ApplicationCredential(**EXAMPLE) self.assertEqual(EXAMPLE['user'], sot.user) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['secret'], sot.secret) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['expires_at'], sot.expires_at) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['roles'], sot.roles) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['access_rules'], sot.access_rules) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_credential.py0000664000175000017500000000356200000000000026251 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import credential from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'blob': '1', 'id': IDENTIFIER, 'project_id': '3', 'type': '4', 'user_id': '5', } class TestCredential(base.TestCase): def test_basic(self): sot = credential.Credential() self.assertEqual('credential', sot.resource_key) self.assertEqual('credentials', sot.resources_key) self.assertEqual('/credentials', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) self.assertDictEqual( { 'type': 'type', 'user_id': 'user_id', 'limit': 'limit', 'marker': 'marker', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = credential.Credential(**EXAMPLE) self.assertEqual(EXAMPLE['blob'], sot.blob) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['type'], sot.type) self.assertEqual(EXAMPLE['user_id'], sot.user_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_domain.py0000664000175000017500000001575500000000000025415 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.identity.v3 import domain from openstack.identity.v3 import group from openstack.identity.v3 import role from openstack.identity.v3 import user from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'description': '1', 'enabled': True, 'id': IDENTIFIER, 'links': {'self': 'http://example.com/identity/v3/domains/id'}, 'name': '4', } class TestDomain(base.TestCase): def setUp(self): super().setUp() self.sess = mock.Mock(spec=adapter.Adapter) self.sess.default_microversion = 1 self.sess._get_connection = mock.Mock(return_value=self.cloud) self.good_resp = mock.Mock() self.good_resp.body = None self.good_resp.json = mock.Mock(return_value=self.good_resp.body) self.good_resp.status_code = 204 self.bad_resp = mock.Mock() self.bad_resp.body = None self.bad_resp.json = mock.Mock(return_value=self.bad_resp.body) self.bad_resp.status_code = 401 def test_basic(self): sot = domain.Domain() self.assertEqual('domain', sot.resource_key) self.assertEqual('domains', sot.resources_key) self.assertEqual('/domains', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) self.assertDictEqual( { 'name': 'name', 'is_enabled': 'enabled', 'limit': 'limit', 'marker': 'marker', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = domain.Domain(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertTrue(sot.is_enabled) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) def test_assign_role_to_user_good(self): sot = domain.Domain(**EXAMPLE) resp = self.good_resp self.sess.put = mock.Mock(return_value=resp) self.assertTrue( sot.assign_role_to_user( self.sess, user.User(id='1'), role.Role(id='2') ) ) self.sess.put.assert_called_with('domains/IDENTIFIER/users/1/roles/2') def test_assign_role_to_user_bad(self): sot = domain.Domain(**EXAMPLE) resp = self.bad_resp self.sess.put = mock.Mock(return_value=resp) self.assertFalse( sot.assign_role_to_user( self.sess, user.User(id='1'), role.Role(id='2') ) ) def test_validate_user_has_role_good(self): sot = domain.Domain(**EXAMPLE) resp = self.good_resp self.sess.head = mock.Mock(return_value=resp) self.assertTrue( sot.validate_user_has_role( self.sess, user.User(id='1'), role.Role(id='2') ) ) self.sess.head.assert_called_with('domains/IDENTIFIER/users/1/roles/2') def test_validate_user_has_role_bad(self): sot = domain.Domain(**EXAMPLE) resp = self.bad_resp self.sess.head = mock.Mock(return_value=resp) self.assertFalse( sot.validate_user_has_role( self.sess, user.User(id='1'), role.Role(id='2') ) ) def test_unassign_role_from_user_good(self): sot = domain.Domain(**EXAMPLE) resp = self.good_resp self.sess.delete = mock.Mock(return_value=resp) self.assertTrue( sot.unassign_role_from_user( self.sess, user.User(id='1'), role.Role(id='2') ) ) self.sess.delete.assert_called_with( 'domains/IDENTIFIER/users/1/roles/2' ) def test_unassign_role_from_user_bad(self): sot = domain.Domain(**EXAMPLE) resp = self.bad_resp self.sess.delete = mock.Mock(return_value=resp) self.assertFalse( sot.unassign_role_from_user( self.sess, user.User(id='1'), role.Role(id='2') ) ) def test_assign_role_to_group_good(self): sot = domain.Domain(**EXAMPLE) resp = self.good_resp self.sess.put = mock.Mock(return_value=resp) self.assertTrue( sot.assign_role_to_group( self.sess, group.Group(id='1'), role.Role(id='2') ) ) self.sess.put.assert_called_with('domains/IDENTIFIER/groups/1/roles/2') def test_assign_role_to_group_bad(self): sot = domain.Domain(**EXAMPLE) resp = self.bad_resp self.sess.put = mock.Mock(return_value=resp) self.assertFalse( sot.assign_role_to_group( self.sess, group.Group(id='1'), role.Role(id='2') ) ) def test_validate_group_has_role_good(self): sot = domain.Domain(**EXAMPLE) resp = self.good_resp self.sess.head = mock.Mock(return_value=resp) self.assertTrue( sot.validate_group_has_role( self.sess, group.Group(id='1'), role.Role(id='2') ) ) self.sess.head.assert_called_with( 'domains/IDENTIFIER/groups/1/roles/2' ) def test_validate_group_has_role_bad(self): sot = domain.Domain(**EXAMPLE) resp = self.bad_resp self.sess.head = mock.Mock(return_value=resp) self.assertFalse( sot.validate_group_has_role( self.sess, group.Group(id='1'), role.Role(id='2') ) ) def test_unassign_role_from_group_good(self): sot = domain.Domain(**EXAMPLE) resp = self.good_resp self.sess.delete = mock.Mock(return_value=resp) self.assertTrue( sot.unassign_role_from_group( self.sess, group.Group(id='1'), role.Role(id='2') ) ) self.sess.delete.assert_called_with( 'domains/IDENTIFIER/groups/1/roles/2' ) def test_unassign_role_from_group_bad(self): sot = domain.Domain(**EXAMPLE) resp = self.bad_resp self.sess.delete = mock.Mock(return_value=resp) self.assertFalse( sot.unassign_role_from_group( self.sess, group.Group(id='1'), role.Role(id='2') ) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_domain_config.py0000664000175000017500000000325500000000000026732 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import domain_config from openstack.tests.unit import base EXAMPLE = { 'identity': { 'driver': 'ldap', }, 'ldap': { 'url': 'ldap://myldap.com:389/', 'user_tree_dn': 'ou=Users,dc=my_new_root,dc=org', }, } class TestDomainConfig(base.TestCase): def test_basic(self): sot = domain_config.DomainConfig() self.assertEqual('config', sot.resource_key) self.assertEqual('/domains/%(domain_id)s/config', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) def test_make_it(self): sot = domain_config.DomainConfig(**EXAMPLE) self.assertIsInstance(sot.identity, domain_config.DomainConfigDriver) self.assertEqual(EXAMPLE['identity']['driver'], sot.identity.driver) self.assertIsInstance(sot.ldap, domain_config.DomainConfigLDAP) self.assertEqual(EXAMPLE['ldap']['url'], sot.ldap.url) self.assertEqual( EXAMPLE['ldap']['user_tree_dn'], sot.ldap.user_tree_dn, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_endpoint.py0000664000175000017500000000412500000000000025753 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import endpoint from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'enabled': True, 'id': IDENTIFIER, 'interface': '3', 'links': {'self': 'http://example.com/endpoint1'}, 'region_id': '4', 'service_id': '5', 'url': '6', } class TestEndpoint(base.TestCase): def test_basic(self): sot = endpoint.Endpoint() self.assertEqual('endpoint', sot.resource_key) self.assertEqual('endpoints', sot.resources_key) self.assertEqual('/endpoints', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) self.assertDictEqual( { 'interface': 'interface', 'service_id': 'service_id', 'region_id': 'region_id', 'limit': 'limit', 'marker': 'marker', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = endpoint.Endpoint(**EXAMPLE) self.assertTrue(sot.is_enabled) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['interface'], sot.interface) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['region_id'], sot.region_id) self.assertEqual(EXAMPLE['service_id'], sot.service_id) self.assertEqual(EXAMPLE['url'], sot.url) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_federation_protocol.py0000664000175000017500000000367300000000000030203 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import federation_protocol from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'idp_id': 'example_idp', 'mapping_id': 'example_mapping', } class TestFederationProtocol(base.TestCase): def test_basic(self): sot = federation_protocol.FederationProtocol() self.assertEqual('protocol', sot.resource_key) self.assertEqual('protocols', sot.resources_key) self.assertEqual( '/OS-FEDERATION/identity_providers/%(idp_id)s/protocols', sot.base_path, ) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertTrue(sot.create_exclude_id_from_body) self.assertEqual('PATCH', sot.commit_method) self.assertEqual('PUT', sot.create_method) self.assertDictEqual( { 'id': 'id', 'limit': 'limit', 'marker': 'marker', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = federation_protocol.FederationProtocol(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['idp_id'], sot.idp_id) self.assertEqual(EXAMPLE['mapping_id'], sot.mapping_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_group.py0000664000175000017500000000612700000000000025273 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.identity.v3 import group from openstack.identity.v3 import user from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'description': '1', 'domain_id': '2', 'id': IDENTIFIER, 'name': '4', } class TestGroup(base.TestCase): def setUp(self): super().setUp() self.sess = mock.Mock(spec=adapter.Adapter) self.sess.default_microversion = 1 self.sess._get_connection = mock.Mock(return_value=self.cloud) self.good_resp = mock.Mock() self.good_resp.body = None self.good_resp.json = mock.Mock(return_value=self.good_resp.body) self.good_resp.status_code = 204 def test_basic(self): sot = group.Group() self.assertEqual('group', sot.resource_key) self.assertEqual('groups', sot.resources_key) self.assertEqual('/groups', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) self.assertDictEqual( { 'domain_id': 'domain_id', 'name': 'name', 'limit': 'limit', 'marker': 'marker', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = group.Group(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['domain_id'], sot.domain_id) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) def test_add_user(self): sot = group.Group(**EXAMPLE) resp = self.good_resp self.sess.put = mock.Mock(return_value=resp) sot.add_user(self.sess, user.User(id='1')) self.sess.put.assert_called_with('groups/IDENTIFIER/users/1') def test_remove_user(self): sot = group.Group(**EXAMPLE) resp = self.good_resp self.sess.delete = mock.Mock(return_value=resp) sot.remove_user(self.sess, user.User(id='1')) self.sess.delete.assert_called_with('groups/IDENTIFIER/users/1') def test_check_user(self): sot = group.Group(**EXAMPLE) resp = self.good_resp self.sess.head = mock.Mock(return_value=resp) self.assertTrue(sot.check_user(self.sess, user.User(id='1'))) self.sess.head.assert_called_with('groups/IDENTIFIER/users/1') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_identity_provider.py0000664000175000017500000000433300000000000027677 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import identity_provider from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'domain_id': 'example_domain', 'description': 'An example description', 'is_enabled': True, 'remote_ids': ['https://auth.example.com/auth/realms/ExampleRealm'], } class TestIdentityProvider(base.TestCase): def test_basic(self): sot = identity_provider.IdentityProvider() self.assertEqual('identity_provider', sot.resource_key) self.assertEqual('identity_providers', sot.resources_key) self.assertEqual('/OS-FEDERATION/identity_providers', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertTrue(sot.create_exclude_id_from_body) self.assertEqual('PATCH', sot.commit_method) self.assertEqual('PUT', sot.create_method) self.assertDictEqual( { 'id': 'id', 'limit': 'limit', 'marker': 'marker', 'is_enabled': 'enabled', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = identity_provider.IdentityProvider(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['id'], sot.name) self.assertEqual(EXAMPLE['domain_id'], sot.domain_id) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['is_enabled'], sot.is_enabled) self.assertEqual(EXAMPLE['remote_ids'], sot.remote_ids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_limit.py0000664000175000017500000000434200000000000025252 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import limit from openstack.tests.unit import base EXAMPLE = { "service_id": "8ac43bb0926245cead88676a96c750d3", "region_id": 'RegionOne', "resource_name": 'cores', "resource_limit": 10, "project_id": 'a8455cdd4249498f99b63d5af2fb4bc8', "description": "compute cores for project 123", "links": {"self": "http://example.com/v3/limit_1"}, } class TestLimit(base.TestCase): def test_basic(self): sot = limit.Limit() self.assertEqual('limits', sot.resources_key) self.assertEqual('/limits', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) self.assertDictEqual( { 'service_id': 'service_id', 'region_id': 'region_id', 'resource_name': 'resource_name', 'project_id': 'project_id', 'marker': 'marker', 'limit': 'limit', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = limit.Limit(**EXAMPLE) self.assertEqual(EXAMPLE['service_id'], sot.service_id) self.assertEqual(EXAMPLE['region_id'], sot.region_id) self.assertEqual(EXAMPLE['resource_name'], sot.resource_name) self.assertEqual(EXAMPLE['resource_limit'], sot.resource_limit) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['links'], sot.links) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_mapping.py0000664000175000017500000000320500000000000025564 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import mapping from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'rules': [{'local': [], 'remote': []}], } class TestMapping(base.TestCase): def test_basic(self): sot = mapping.Mapping() self.assertEqual('mapping', sot.resource_key) self.assertEqual('mappings', sot.resources_key) self.assertEqual('/OS-FEDERATION/mappings', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) self.assertEqual('PUT', sot.create_method) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = mapping.Mapping(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['rules'], sot.rules) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_policy.py0000664000175000017500000000325700000000000025437 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import policy from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'blob': '1', 'id': IDENTIFIER, 'links': {'self': 'a-pointer'}, 'project_id': '2', 'type': '3', 'user_id': '4', } class TestPolicy(base.TestCase): def test_basic(self): sot = policy.Policy() self.assertEqual('policy', sot.resource_key) self.assertEqual('policies', sot.resources_key) self.assertEqual('/policies', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) def test_make_it(self): sot = policy.Policy(**EXAMPLE) self.assertEqual(EXAMPLE['blob'], sot.blob) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['type'], sot.type) self.assertEqual(EXAMPLE['user_id'], sot.user_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_project.py0000664000175000017500000002002700000000000025600 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.identity.v3 import group from openstack.identity.v3 import project from openstack.identity.v3 import role from openstack.identity.v3 import user from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'description': '1', 'domain_id': '2', 'enabled': True, 'id': IDENTIFIER, 'is_domain': False, 'name': '5', 'parent_id': '6', 'options': {'foo': 'bar'}, } class TestProject(base.TestCase): def setUp(self): super().setUp() self.sess = mock.Mock(spec=adapter.Adapter) self.sess.default_microversion = 1 self.sess._get_connection = mock.Mock(return_value=self.cloud) self.good_resp = mock.Mock() self.good_resp.body = None self.good_resp.json = mock.Mock(return_value=self.good_resp.body) self.good_resp.status_code = 204 self.bad_resp = mock.Mock() self.bad_resp.body = None self.bad_resp.json = mock.Mock(return_value=self.bad_resp.body) self.bad_resp.status_code = 401 def test_basic(self): sot = project.Project() self.assertEqual('project', sot.resource_key) self.assertEqual('projects', sot.resources_key) self.assertEqual('/projects', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) self.assertDictEqual( { 'domain_id': 'domain_id', 'is_domain': 'is_domain', 'name': 'name', 'parent_id': 'parent_id', 'is_enabled': 'enabled', 'limit': 'limit', 'marker': 'marker', 'tags': 'tags', 'any_tags': 'tags-any', 'not_tags': 'not-tags', 'not_any_tags': 'not-tags-any', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = project.Project(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['domain_id'], sot.domain_id) self.assertFalse(sot.is_domain) self.assertTrue(sot.is_enabled) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['parent_id'], sot.parent_id) self.assertDictEqual(EXAMPLE['options'], sot.options) def test_assign_role_to_user_good(self): sot = project.Project(**EXAMPLE) resp = self.good_resp self.sess.put = mock.Mock(return_value=resp) self.assertTrue( sot.assign_role_to_user( self.sess, user.User(id='1'), role.Role(id='2') ) ) self.sess.put.assert_called_with('projects/IDENTIFIER/users/1/roles/2') def test_assign_role_to_user_bad(self): sot = project.Project(**EXAMPLE) resp = self.bad_resp self.sess.put = mock.Mock(return_value=resp) self.assertFalse( sot.assign_role_to_user( self.sess, user.User(id='1'), role.Role(id='2') ) ) def test_validate_user_has_role_good(self): sot = project.Project(**EXAMPLE) resp = self.good_resp self.sess.head = mock.Mock(return_value=resp) self.assertTrue( sot.validate_user_has_role( self.sess, user.User(id='1'), role.Role(id='2') ) ) self.sess.head.assert_called_with( 'projects/IDENTIFIER/users/1/roles/2' ) def test_validate_user_has_role_bad(self): sot = project.Project(**EXAMPLE) resp = self.bad_resp self.sess.head = mock.Mock(return_value=resp) self.assertFalse( sot.validate_user_has_role( self.sess, user.User(id='1'), role.Role(id='2') ) ) def test_unassign_role_from_user_good(self): sot = project.Project(**EXAMPLE) resp = self.good_resp self.sess.delete = mock.Mock(return_value=resp) self.assertTrue( sot.unassign_role_from_user( self.sess, user.User(id='1'), role.Role(id='2') ) ) self.sess.delete.assert_called_with( 'projects/IDENTIFIER/users/1/roles/2' ) def test_unassign_role_from_user_bad(self): sot = project.Project(**EXAMPLE) resp = self.bad_resp self.sess.delete = mock.Mock(return_value=resp) self.assertFalse( sot.unassign_role_from_user( self.sess, user.User(id='1'), role.Role(id='2') ) ) def test_assign_role_to_group_good(self): sot = project.Project(**EXAMPLE) resp = self.good_resp self.sess.put = mock.Mock(return_value=resp) self.assertTrue( sot.assign_role_to_group( self.sess, group.Group(id='1'), role.Role(id='2') ) ) self.sess.put.assert_called_with( 'projects/IDENTIFIER/groups/1/roles/2' ) def test_assign_role_to_group_bad(self): sot = project.Project(**EXAMPLE) resp = self.bad_resp self.sess.put = mock.Mock(return_value=resp) self.assertFalse( sot.assign_role_to_group( self.sess, group.Group(id='1'), role.Role(id='2') ) ) def test_validate_group_has_role_good(self): sot = project.Project(**EXAMPLE) resp = self.good_resp self.sess.head = mock.Mock(return_value=resp) self.assertTrue( sot.validate_group_has_role( self.sess, group.Group(id='1'), role.Role(id='2') ) ) self.sess.head.assert_called_with( 'projects/IDENTIFIER/groups/1/roles/2' ) def test_validate_group_has_role_bad(self): sot = project.Project(**EXAMPLE) resp = self.bad_resp self.sess.head = mock.Mock(return_value=resp) self.assertFalse( sot.validate_group_has_role( self.sess, group.Group(id='1'), role.Role(id='2') ) ) def test_unassign_role_from_group_good(self): sot = project.Project(**EXAMPLE) resp = self.good_resp self.sess.delete = mock.Mock(return_value=resp) self.assertTrue( sot.unassign_role_from_group( self.sess, group.Group(id='1'), role.Role(id='2') ) ) self.sess.delete.assert_called_with( 'projects/IDENTIFIER/groups/1/roles/2' ) def test_unassign_role_from_group_bad(self): sot = project.Project(**EXAMPLE) resp = self.bad_resp self.sess.delete = mock.Mock(return_value=resp) self.assertFalse( sot.unassign_role_from_group( self.sess, group.Group(id='1'), role.Role(id='2') ) ) class TestUserProject(base.TestCase): def test_basic(self): sot = project.UserProject() self.assertEqual('project', sot.resource_key) self.assertEqual('projects', sot.resources_key) self.assertEqual('/users/%(user_id)s/projects', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_proxy.py0000664000175000017500000006624200000000000025324 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.identity.v3 import _proxy from openstack.identity.v3 import access_rule from openstack.identity.v3 import credential from openstack.identity.v3 import domain from openstack.identity.v3 import domain_config from openstack.identity.v3 import endpoint from openstack.identity.v3 import group from openstack.identity.v3 import policy from openstack.identity.v3 import project from openstack.identity.v3 import region from openstack.identity.v3 import role from openstack.identity.v3 import role_domain_group_assignment from openstack.identity.v3 import role_domain_user_assignment from openstack.identity.v3 import role_project_group_assignment from openstack.identity.v3 import role_project_user_assignment from openstack.identity.v3 import role_system_group_assignment from openstack.identity.v3 import role_system_user_assignment from openstack.identity.v3 import service from openstack.identity.v3 import service_provider from openstack.identity.v3 import trust from openstack.identity.v3 import user from openstack.tests.unit import test_proxy_base USER_ID = 'user-id-' + uuid.uuid4().hex class TestIdentityProxyBase(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) class TestIdentityProxyCredential(TestIdentityProxyBase): def test_credential_create_attrs(self): self.verify_create(self.proxy.create_credential, credential.Credential) def test_credential_delete(self): self.verify_delete( self.proxy.delete_credential, credential.Credential, False ) def test_credential_delete_ignore(self): self.verify_delete( self.proxy.delete_credential, credential.Credential, True ) def test_credential_find(self): self.verify_find(self.proxy.find_credential, credential.Credential) def test_credential_get(self): self.verify_get(self.proxy.get_credential, credential.Credential) def test_credentials(self): self.verify_list(self.proxy.credentials, credential.Credential) def test_credential_update(self): self.verify_update(self.proxy.update_credential, credential.Credential) class TestIdentityProxyDomain(TestIdentityProxyBase): def test_domain_create_attrs(self): self.verify_create(self.proxy.create_domain, domain.Domain) def test_domain_delete(self): self.verify_delete(self.proxy.delete_domain, domain.Domain, False) def test_domain_delete_ignore(self): self.verify_delete(self.proxy.delete_domain, domain.Domain, True) def test_domain_find(self): self.verify_find(self.proxy.find_domain, domain.Domain) def test_domain_get(self): self.verify_get(self.proxy.get_domain, domain.Domain) def test_domains(self): self.verify_list(self.proxy.domains, domain.Domain) def test_domain_update(self): self.verify_update(self.proxy.update_domain, domain.Domain) class TestIdentityProxyDomainConfig(TestIdentityProxyBase): def test_domain_config_create_attrs(self): self.verify_create( self.proxy.create_domain_config, domain_config.DomainConfig, method_args=['domain_id'], method_kwargs={}, expected_args=[], expected_kwargs={ 'domain_id': 'domain_id', }, ) def test_domain_config_delete(self): self.verify_delete( self.proxy.delete_domain_config, domain_config.DomainConfig, ignore_missing=False, method_args=['domain_id'], method_kwargs={}, expected_args=[], expected_kwargs={ 'domain_id': 'domain_id', }, ) def test_domain_config_delete_ignore(self): self.verify_delete( self.proxy.delete_domain_config, domain_config.DomainConfig, ignore_missing=True, method_args=['domain_id'], method_kwargs={}, expected_args=[], expected_kwargs={ 'domain_id': 'domain_id', }, ) # no find_domain_config def test_domain_config_get(self): self.verify_get( self.proxy.get_domain_config, domain_config.DomainConfig, method_args=['domain_id'], method_kwargs={}, expected_args=[], expected_kwargs={ 'domain_id': 'domain_id', 'requires_id': False, }, ) # no domain_configs def test_domain_config_update(self): self.verify_update( self.proxy.update_domain_config, domain_config.DomainConfig, method_args=['domain_id'], method_kwargs={}, expected_args=[], expected_kwargs={ 'domain_id': 'domain_id', }, ) class TestIdentityProxyEndpoint(TestIdentityProxyBase): def test_endpoint_create_attrs(self): self.verify_create(self.proxy.create_endpoint, endpoint.Endpoint) def test_endpoint_delete(self): self.verify_delete( self.proxy.delete_endpoint, endpoint.Endpoint, False ) def test_endpoint_delete_ignore(self): self.verify_delete(self.proxy.delete_endpoint, endpoint.Endpoint, True) def test_endpoint_find(self): self.verify_find(self.proxy.find_endpoint, endpoint.Endpoint) def test_endpoint_get(self): self.verify_get(self.proxy.get_endpoint, endpoint.Endpoint) def test_endpoints(self): self.verify_list(self.proxy.endpoints, endpoint.Endpoint) def test_endpoint_update(self): self.verify_update(self.proxy.update_endpoint, endpoint.Endpoint) class TestIdentityProxyGroup(TestIdentityProxyBase): def test_group_create_attrs(self): self.verify_create(self.proxy.create_group, group.Group) def test_group_delete(self): self.verify_delete(self.proxy.delete_group, group.Group, False) def test_group_delete_ignore(self): self.verify_delete(self.proxy.delete_group, group.Group, True) def test_group_find(self): self.verify_find(self.proxy.find_group, group.Group) def test_group_get(self): self.verify_get(self.proxy.get_group, group.Group) def test_groups(self): self.verify_list(self.proxy.groups, group.Group) def test_group_update(self): self.verify_update(self.proxy.update_group, group.Group) def test_add_user_to_group(self): self._verify( "openstack.identity.v3.group.Group.add_user", self.proxy.add_user_to_group, method_args=['uid', 'gid'], expected_args=[ self.proxy, self.proxy._get_resource(user.User, 'uid'), ], ) def test_remove_user_from_group(self): self._verify( "openstack.identity.v3.group.Group.remove_user", self.proxy.remove_user_from_group, method_args=['uid', 'gid'], expected_args=[ self.proxy, self.proxy._get_resource(user.User, 'uid'), ], ) def test_check_user_in_group(self): self._verify( "openstack.identity.v3.group.Group.check_user", self.proxy.check_user_in_group, method_args=['uid', 'gid'], expected_args=[ self.proxy, self.proxy._get_resource(user.User, 'uid'), ], ) def test_group_users(self): self.verify_list( self.proxy.group_users, user.User, method_kwargs={"group": 'group', "attrs": 1}, expected_kwargs={"attrs": 1}, ) class TestIdentityProxyPolicy(TestIdentityProxyBase): def test_policy_create_attrs(self): self.verify_create(self.proxy.create_policy, policy.Policy) def test_policy_delete(self): self.verify_delete(self.proxy.delete_policy, policy.Policy, False) def test_policy_delete_ignore(self): self.verify_delete(self.proxy.delete_policy, policy.Policy, True) def test_policy_find(self): self.verify_find(self.proxy.find_policy, policy.Policy) def test_policy_get(self): self.verify_get(self.proxy.get_policy, policy.Policy) def test_policies(self): self.verify_list(self.proxy.policies, policy.Policy) def test_policy_update(self): self.verify_update(self.proxy.update_policy, policy.Policy) class TestIdentityProxyProject(TestIdentityProxyBase): def test_project_create_attrs(self): self.verify_create(self.proxy.create_project, project.Project) def test_project_delete(self): self.verify_delete(self.proxy.delete_project, project.Project, False) def test_project_delete_ignore(self): self.verify_delete(self.proxy.delete_project, project.Project, True) def test_project_find(self): self.verify_find(self.proxy.find_project, project.Project) def test_project_get(self): self.verify_get(self.proxy.get_project, project.Project) def test_projects(self): self.verify_list(self.proxy.projects, project.Project) def test_user_projects(self): self.verify_list( self.proxy.user_projects, project.UserProject, method_kwargs={'user': USER_ID}, expected_kwargs={'user_id': USER_ID}, ) def test_project_update(self): self.verify_update(self.proxy.update_project, project.Project) class TestIdentityProxyService(TestIdentityProxyBase): def test_service_create_attrs(self): self.verify_create(self.proxy.create_service, service.Service) def test_service_delete(self): self.verify_delete(self.proxy.delete_service, service.Service, False) def test_service_delete_ignore(self): self.verify_delete(self.proxy.delete_service, service.Service, True) def test_service_find(self): self.verify_find(self.proxy.find_service, service.Service) def test_service_get(self): self.verify_get(self.proxy.get_service, service.Service) def test_services(self): self.verify_list(self.proxy.services, service.Service) def test_service_update(self): self.verify_update(self.proxy.update_service, service.Service) class TestIdentityProxyUser(TestIdentityProxyBase): def test_user_create_attrs(self): self.verify_create(self.proxy.create_user, user.User) def test_user_delete(self): self.verify_delete(self.proxy.delete_user, user.User, False) def test_user_delete_ignore(self): self.verify_delete(self.proxy.delete_user, user.User, True) def test_user_find(self): self.verify_find(self.proxy.find_user, user.User) def test_user_get(self): self.verify_get(self.proxy.get_user, user.User) def test_users(self): self.verify_list(self.proxy.users, user.User) def test_user_update(self): self.verify_update(self.proxy.update_user, user.User) class TestIdentityProxyTrust(TestIdentityProxyBase): def test_trust_create_attrs(self): self.verify_create(self.proxy.create_trust, trust.Trust) def test_trust_delete(self): self.verify_delete(self.proxy.delete_trust, trust.Trust, False) def test_trust_delete_ignore(self): self.verify_delete(self.proxy.delete_trust, trust.Trust, True) def test_trust_find(self): self.verify_find(self.proxy.find_trust, trust.Trust) def test_trust_get(self): self.verify_get(self.proxy.get_trust, trust.Trust) def test_trusts(self): self.verify_list(self.proxy.trusts, trust.Trust) class TestIdentityProxyRegion(TestIdentityProxyBase): def test_region_create_attrs(self): self.verify_create(self.proxy.create_region, region.Region) def test_region_delete(self): self.verify_delete(self.proxy.delete_region, region.Region, False) def test_region_delete_ignore(self): self.verify_delete(self.proxy.delete_region, region.Region, True) def test_region_find(self): self.verify_find(self.proxy.find_region, region.Region) def test_region_get(self): self.verify_get(self.proxy.get_region, region.Region) def test_regions(self): self.verify_list(self.proxy.regions, region.Region) def test_region_update(self): self.verify_update(self.proxy.update_region, region.Region) class TestIdentityProxyRole(TestIdentityProxyBase): def test_role_create_attrs(self): self.verify_create(self.proxy.create_role, role.Role) def test_role_delete(self): self.verify_delete(self.proxy.delete_role, role.Role, False) def test_role_delete_ignore(self): self.verify_delete(self.proxy.delete_role, role.Role, True) def test_role_find(self): self.verify_find(self.proxy.find_role, role.Role) def test_role_get(self): self.verify_get(self.proxy.get_role, role.Role) def test_roles(self): self.verify_list(self.proxy.roles, role.Role) def test_role_update(self): self.verify_update(self.proxy.update_role, role.Role) class TestIdentityProxyRoleAssignments(TestIdentityProxyBase): def test_role_assignments_filter__domain_user(self): self.verify_list( self.proxy.role_assignments_filter, role_domain_user_assignment.RoleDomainUserAssignment, method_kwargs={'domain': 'domain', 'user': 'user'}, expected_kwargs={ 'domain_id': 'domain', 'user_id': 'user', }, ) def test_role_assignments_filter__domain_group(self): self.verify_list( self.proxy.role_assignments_filter, role_domain_group_assignment.RoleDomainGroupAssignment, method_kwargs={'domain': 'domain', 'group': 'group'}, expected_kwargs={ 'domain_id': 'domain', 'group_id': 'group', }, ) def test_role_assignments_filter__project_user(self): self.verify_list( self.proxy.role_assignments_filter, role_project_user_assignment.RoleProjectUserAssignment, method_kwargs={'project': 'project', 'user': 'user'}, expected_kwargs={ 'project_id': 'project', 'user_id': 'user', }, ) def test_role_assignments_filter__project_group(self): self.verify_list( self.proxy.role_assignments_filter, role_project_group_assignment.RoleProjectGroupAssignment, method_kwargs={'project': 'project', 'group': 'group'}, expected_kwargs={ 'project_id': 'project', 'group_id': 'group', }, ) def test_role_assignments_filter__system_user(self): self.verify_list( self.proxy.role_assignments_filter, role_system_user_assignment.RoleSystemUserAssignment, method_kwargs={'system': 'system', 'user': 'user'}, expected_kwargs={ 'system_id': 'system', 'user_id': 'user', }, ) def test_role_assignments_filter__system_group(self): self.verify_list( self.proxy.role_assignments_filter, role_system_group_assignment.RoleSystemGroupAssignment, method_kwargs={'system': 'system', 'group': 'group'}, expected_kwargs={ 'system_id': 'system', 'group_id': 'group', }, ) def test_assign_domain_role_to_user(self): self._verify( "openstack.identity.v3.domain.Domain.assign_role_to_user", self.proxy.assign_domain_role_to_user, method_args=['dom_id'], method_kwargs={'user': 'uid', 'role': 'rid'}, expected_args=[ self.proxy, self.proxy._get_resource(user.User, 'uid'), self.proxy._get_resource(role.Role, 'rid'), ], ) def test_unassign_domain_role_from_user(self): self._verify( "openstack.identity.v3.domain.Domain.unassign_role_from_user", self.proxy.unassign_domain_role_from_user, method_args=['dom_id'], method_kwargs={'user': 'uid', 'role': 'rid'}, expected_args=[ self.proxy, self.proxy._get_resource(user.User, 'uid'), self.proxy._get_resource(role.Role, 'rid'), ], ) def test_validate_user_has_domain_role(self): self._verify( "openstack.identity.v3.domain.Domain.validate_user_has_role", self.proxy.validate_user_has_domain_role, method_args=['dom_id'], method_kwargs={'user': 'uid', 'role': 'rid'}, expected_args=[ self.proxy, self.proxy._get_resource(user.User, 'uid'), self.proxy._get_resource(role.Role, 'rid'), ], ) def test_assign_domain_role_to_group(self): self._verify( "openstack.identity.v3.domain.Domain.assign_role_to_group", self.proxy.assign_domain_role_to_group, method_args=['dom_id'], method_kwargs={'group': 'uid', 'role': 'rid'}, expected_args=[ self.proxy, self.proxy._get_resource(group.Group, 'uid'), self.proxy._get_resource(role.Role, 'rid'), ], ) def test_unassign_domain_role_from_group(self): self._verify( "openstack.identity.v3.domain.Domain.unassign_role_from_group", self.proxy.unassign_domain_role_from_group, method_args=['dom_id'], method_kwargs={'group': 'uid', 'role': 'rid'}, expected_args=[ self.proxy, self.proxy._get_resource(group.Group, 'uid'), self.proxy._get_resource(role.Role, 'rid'), ], ) def test_validate_group_has_domain_role(self): self._verify( "openstack.identity.v3.domain.Domain.validate_group_has_role", self.proxy.validate_group_has_domain_role, method_args=['dom_id'], method_kwargs={'group': 'uid', 'role': 'rid'}, expected_args=[ self.proxy, self.proxy._get_resource(group.Group, 'uid'), self.proxy._get_resource(role.Role, 'rid'), ], ) def test_assign_project_role_to_user(self): self._verify( "openstack.identity.v3.project.Project.assign_role_to_user", self.proxy.assign_project_role_to_user, method_args=['dom_id'], method_kwargs={'user': 'uid', 'role': 'rid'}, expected_args=[ self.proxy, self.proxy._get_resource(user.User, 'uid'), self.proxy._get_resource(role.Role, 'rid'), ], ) def test_unassign_project_role_from_user(self): self._verify( "openstack.identity.v3.project.Project.unassign_role_from_user", self.proxy.unassign_project_role_from_user, method_args=['dom_id'], method_kwargs={'user': 'uid', 'role': 'rid'}, expected_args=[ self.proxy, self.proxy._get_resource(user.User, 'uid'), self.proxy._get_resource(role.Role, 'rid'), ], ) def test_validate_user_has_project_role(self): self._verify( "openstack.identity.v3.project.Project.validate_user_has_role", self.proxy.validate_user_has_project_role, method_args=['dom_id'], method_kwargs={'user': 'uid', 'role': 'rid'}, expected_args=[ self.proxy, self.proxy._get_resource(user.User, 'uid'), self.proxy._get_resource(role.Role, 'rid'), ], ) def test_assign_project_role_to_group(self): self._verify( "openstack.identity.v3.project.Project.assign_role_to_group", self.proxy.assign_project_role_to_group, method_args=['dom_id'], method_kwargs={'group': 'uid', 'role': 'rid'}, expected_args=[ self.proxy, self.proxy._get_resource(group.Group, 'uid'), self.proxy._get_resource(role.Role, 'rid'), ], ) def test_unassign_project_role_from_group(self): self._verify( "openstack.identity.v3.project.Project.unassign_role_from_group", self.proxy.unassign_project_role_from_group, method_args=['dom_id'], method_kwargs={'group': 'uid', 'role': 'rid'}, expected_args=[ self.proxy, self.proxy._get_resource(group.Group, 'uid'), self.proxy._get_resource(role.Role, 'rid'), ], ) def test_validate_group_has_project_role(self): self._verify( "openstack.identity.v3.project.Project.validate_group_has_role", self.proxy.validate_group_has_project_role, method_args=['dom_id'], method_kwargs={'group': 'uid', 'role': 'rid'}, expected_args=[ self.proxy, self.proxy._get_resource(group.Group, 'uid'), self.proxy._get_resource(role.Role, 'rid'), ], ) def test_assign_system_role_to_user(self): self._verify( "openstack.identity.v3.system.System.assign_role_to_user", self.proxy.assign_system_role_to_user, method_kwargs={'user': 'uid', 'role': 'rid', 'system': 'all'}, expected_args=[ self.proxy, self.proxy._get_resource(user.User, 'uid'), self.proxy._get_resource(role.Role, 'rid'), ], ) def test_unassign_system_role_from_user(self): self._verify( "openstack.identity.v3.system.System.unassign_role_from_user", self.proxy.unassign_system_role_from_user, method_kwargs={'user': 'uid', 'role': 'rid', 'system': 'all'}, expected_args=[ self.proxy, self.proxy._get_resource(user.User, 'uid'), self.proxy._get_resource(role.Role, 'rid'), ], ) def test_validate_user_has_system_role(self): self._verify( "openstack.identity.v3.system.System.validate_user_has_role", self.proxy.validate_user_has_system_role, method_kwargs={'user': 'uid', 'role': 'rid', 'system': 'all'}, expected_args=[ self.proxy, self.proxy._get_resource(user.User, 'uid'), self.proxy._get_resource(role.Role, 'rid'), ], ) def test_assign_system_role_to_group(self): self._verify( "openstack.identity.v3.system.System.assign_role_to_group", self.proxy.assign_system_role_to_group, method_kwargs={'group': 'uid', 'role': 'rid', 'system': 'all'}, expected_args=[ self.proxy, self.proxy._get_resource(group.Group, 'uid'), self.proxy._get_resource(role.Role, 'rid'), ], ) def test_unassign_system_role_from_group(self): self._verify( "openstack.identity.v3.system.System.unassign_role_from_group", self.proxy.unassign_system_role_from_group, method_kwargs={'group': 'uid', 'role': 'rid', 'system': 'all'}, expected_args=[ self.proxy, self.proxy._get_resource(group.Group, 'uid'), self.proxy._get_resource(role.Role, 'rid'), ], ) def test_validate_group_has_system_role(self): self._verify( "openstack.identity.v3.system.System.validate_group_has_role", self.proxy.validate_group_has_system_role, method_kwargs={'group': 'uid', 'role': 'rid', 'system': 'all'}, expected_args=[ self.proxy, self.proxy._get_resource(group.Group, 'uid'), self.proxy._get_resource(role.Role, 'rid'), ], ) class TestAccessRule(TestIdentityProxyBase): def test_access_rule_delete(self): self.verify_delete( self.proxy.delete_access_rule, access_rule.AccessRule, False, method_args=[], method_kwargs={'user': USER_ID, 'access_rule': 'access_rule'}, expected_args=['access_rule'], expected_kwargs={'user_id': USER_ID}, ) def test_access_rule_delete_ignore(self): self.verify_delete( self.proxy.delete_access_rule, access_rule.AccessRule, True, method_args=[], method_kwargs={'user': USER_ID, 'access_rule': 'access_rule'}, expected_args=['access_rule'], expected_kwargs={'user_id': USER_ID}, ) def test_access_rule_get(self): self.verify_get( self.proxy.get_access_rule, access_rule.AccessRule, method_args=[], method_kwargs={'user': USER_ID, 'access_rule': 'access_rule'}, expected_args=['access_rule'], expected_kwargs={'user_id': USER_ID}, ) def test_access_rules(self): self.verify_list( self.proxy.access_rules, access_rule.AccessRule, method_kwargs={'user': USER_ID}, expected_kwargs={'user_id': USER_ID}, ) class TestServiceProvider(TestIdentityProxyBase): def test_service_provider_create(self): self.verify_create( self.proxy.create_service_provider, service_provider.ServiceProvider, ) def test_service_provider_delete(self): self.verify_delete( self.proxy.delete_service_provider, service_provider.ServiceProvider, False, ) def test_service_provider_delete_ignore(self): self.verify_delete( self.proxy.delete_service_provider, service_provider.ServiceProvider, True, ) def test_service_provider_find(self): self.verify_find( self.proxy.find_service_provider, service_provider.ServiceProvider ) def test_service_provider_get(self): self.verify_get( self.proxy.get_service_provider, service_provider.ServiceProvider, ) def test_service_providers(self): self.verify_list( self.proxy.service_providers, service_provider.ServiceProvider, ) def test_service_provider_update(self): self.verify_update( self.proxy.update_service_provider, service_provider.ServiceProvider, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_region.py0000664000175000017500000000351000000000000025413 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import region from openstack.tests.unit import base IDENTIFIER = 'RegionOne' EXAMPLE = { 'description': '1', 'id': IDENTIFIER, 'links': {'self': 'http://example.com/region1'}, 'parent_region_id': 'FAKE_PARENT', } class TestRegion(base.TestCase): def test_basic(self): sot = region.Region() self.assertEqual('region', sot.resource_key) self.assertEqual('regions', sot.resources_key) self.assertEqual('/regions', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) self.assertDictEqual( { 'parent_region_id': 'parent_region_id', 'limit': 'limit', 'marker': 'marker', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = region.Region(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['parent_region_id'], sot.parent_region_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_registered_limit.py0000664000175000017500000000431500000000000027467 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import registered_limit from openstack.tests.unit import base EXAMPLE = { "service_id": "8ac43bb0926245cead88676a96c750d3", "region_id": 'RegionOne', "resource_name": 'cores', "default_limit": 10, "description": "compute cores", "links": {"self": "http://example.com/v3/registered_limit_1"}, } class TestRegistered_limit(base.TestCase): def test_basic(self): sot = registered_limit.RegisteredLimit() self.assertEqual('registered_limit', sot.resource_key) self.assertEqual('registered_limits', sot.resources_key) self.assertEqual('/registered_limits', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) self.assertDictEqual( { 'service_id': 'service_id', 'region_id': 'region_id', 'resource_name': 'resource_name', 'marker': 'marker', 'limit': 'limit', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = registered_limit.RegisteredLimit(**EXAMPLE) self.assertEqual(EXAMPLE['service_id'], sot.service_id) self.assertEqual(EXAMPLE['region_id'], sot.region_id) self.assertEqual(EXAMPLE['resource_name'], sot.resource_name) self.assertEqual(EXAMPLE['default_limit'], sot.default_limit) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['links'], sot.links) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_role.py0000664000175000017500000000361300000000000025075 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import role from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'links': {'self': 'http://example.com/user1'}, 'name': '2', 'description': 'test description for role', 'domain_id': 'default', } class TestRole(base.TestCase): def test_basic(self): sot = role.Role() self.assertEqual('role', sot.resource_key) self.assertEqual('roles', sot.resources_key) self.assertEqual('/roles', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) self.assertDictEqual( { 'domain_id': 'domain_id', 'name': 'name', 'limit': 'limit', 'marker': 'marker', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = role.Role(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['domain_id'], sot.domain_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_role_assignment.py0000664000175000017500000000422200000000000027322 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import role_assignment from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'links': {'self': 'http://example.com/user1'}, 'scope': {'domain': {'id': '2'}}, 'user': {'id': '3'}, 'group': {'id': '4'}, } class TestRoleAssignment(base.TestCase): def test_basic(self): sot = role_assignment.RoleAssignment() self.assertEqual('role_assignment', sot.resource_key) self.assertEqual('role_assignments', sot.resources_key) self.assertEqual('/role_assignments', sot.base_path) self.assertTrue(sot.allow_list) self.assertDictEqual( { 'group_id': 'group.id', 'role_id': 'role.id', 'scope_domain_id': 'scope.domain.id', 'scope_project_id': 'scope.project.id', 'scope_system': 'scope.system', 'user_id': 'user.id', 'effective': 'effective', 'inherited_to': 'scope.OS-INHERIT:inherited_to', 'include_names': 'include_names', 'include_subtree': 'include_subtree', 'limit': 'limit', 'marker': 'marker', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = role_assignment.RoleAssignment(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['scope'], sot.scope) self.assertEqual(EXAMPLE['user'], sot.user) self.assertEqual(EXAMPLE['group'], sot.group) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_role_domain_group_assignment.py0000664000175000017500000000312000000000000032061 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import role_domain_group_assignment from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'links': {'self': 'http://example.com/user1'}, 'name': '2', 'domain_id': '3', 'group_id': '4', } class TestRoleDomainGroupAssignment(base.TestCase): def test_basic(self): sot = role_domain_group_assignment.RoleDomainGroupAssignment() self.assertEqual('role', sot.resource_key) self.assertEqual('roles', sot.resources_key) self.assertEqual( '/domains/%(domain_id)s/groups/%(group_id)s/roles', sot.base_path ) self.assertTrue(sot.allow_list) def test_make_it(self): sot = role_domain_group_assignment.RoleDomainGroupAssignment(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['domain_id'], sot.domain_id) self.assertEqual(EXAMPLE['group_id'], sot.group_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_role_domain_user_assignment.py0000664000175000017500000000310500000000000031706 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import role_domain_user_assignment from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'links': {'self': 'http://example.com/user1'}, 'name': '2', 'domain_id': '3', 'user_id': '4', } class TestRoleDomainUserAssignment(base.TestCase): def test_basic(self): sot = role_domain_user_assignment.RoleDomainUserAssignment() self.assertEqual('role', sot.resource_key) self.assertEqual('roles', sot.resources_key) self.assertEqual( '/domains/%(domain_id)s/users/%(user_id)s/roles', sot.base_path ) self.assertTrue(sot.allow_list) def test_make_it(self): sot = role_domain_user_assignment.RoleDomainUserAssignment(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['domain_id'], sot.domain_id) self.assertEqual(EXAMPLE['user_id'], sot.user_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_role_project_group_assignment.py0000664000175000017500000000316100000000000032265 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import role_project_group_assignment from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'links': {'self': 'http://example.com/user1'}, 'name': '2', 'project_id': '3', 'group_id': '4', } class TestRoleProjectGroupAssignment(base.TestCase): def test_basic(self): sot = role_project_group_assignment.RoleProjectGroupAssignment() self.assertEqual('role', sot.resource_key) self.assertEqual('roles', sot.resources_key) self.assertEqual( '/projects/%(project_id)s/groups/%(group_id)s/roles', sot.base_path ) self.assertTrue(sot.allow_list) def test_make_it(self): sot = role_project_group_assignment.RoleProjectGroupAssignment( **EXAMPLE ) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['group_id'], sot.group_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_role_project_user_assignment.py0000664000175000017500000000312000000000000032102 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import role_project_user_assignment from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'links': {'self': 'http://example.com/user1'}, 'name': '2', 'project_id': '3', 'user_id': '4', } class TestRoleProjectUserAssignment(base.TestCase): def test_basic(self): sot = role_project_user_assignment.RoleProjectUserAssignment() self.assertEqual('role', sot.resource_key) self.assertEqual('roles', sot.resources_key) self.assertEqual( '/projects/%(project_id)s/users/%(user_id)s/roles', sot.base_path ) self.assertTrue(sot.allow_list) def test_make_it(self): sot = role_project_user_assignment.RoleProjectUserAssignment(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['user_id'], sot.user_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_role_system_group_assignment.py0000664000175000017500000000253700000000000032151 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import role_system_group_assignment from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = {'id': IDENTIFIER, 'name': '2', 'group_id': '4'} class TestRoleSystemGroupAssignment(base.TestCase): def test_basic(self): sot = role_system_group_assignment.RoleSystemGroupAssignment() self.assertEqual('role', sot.resource_key) self.assertEqual('roles', sot.resources_key) self.assertEqual('/system/groups/%(group_id)s/roles', sot.base_path) self.assertTrue(sot.allow_list) def test_make_it(self): sot = role_system_group_assignment.RoleSystemGroupAssignment(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['group_id'], sot.group_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_role_system_user_assignment.py0000664000175000017500000000243200000000000031765 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import role_system_user_assignment from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = {'id': IDENTIFIER, 'name': '2', 'user_id': '4'} class TestRoleSystemUserAssignment(base.TestCase): def test_basic(self): sot = role_system_user_assignment.RoleSystemUserAssignment() self.assertEqual('role', sot.resource_key) self.assertEqual('roles', sot.resources_key) self.assertEqual('/system/users/%(user_id)s/roles', sot.base_path) self.assertTrue(sot.allow_list) def test_make_it(self): sot = role_system_user_assignment.RoleSystemUserAssignment(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_service.py0000664000175000017500000000365700000000000025604 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import service from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'description': '1', 'enabled': True, 'id': IDENTIFIER, 'links': {'self': 'http://example.com/service1'}, 'name': '4', 'type': '5', } class TestService(base.TestCase): def test_basic(self): sot = service.Service() self.assertEqual('service', sot.resource_key) self.assertEqual('services', sot.resources_key) self.assertEqual('/services', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) self.assertDictEqual( { 'name': 'name', 'type': 'type', 'limit': 'limit', 'marker': 'marker', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = service.Service(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertTrue(sot.is_enabled) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['type'], sot.type) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_service_provider.py0000664000175000017500000000444300000000000027510 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import service_provider from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'description': 'An example description', 'is_enabled': True, 'auth_url': ( "https://auth.example.com/v3/OS-FEDERATION/" "identity_providers/idp/protocols/saml2/auth" ), 'sp_url': 'https://auth.example.com/Shibboleth.sso/SAML2/ECP', } class TestServiceProvider(base.TestCase): def test_basic(self): sot = service_provider.ServiceProvider() self.assertEqual('service_provider', sot.resource_key) self.assertEqual('service_providers', sot.resources_key) self.assertEqual('/OS-FEDERATION/service_providers', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertTrue(sot.create_exclude_id_from_body) self.assertEqual('PATCH', sot.commit_method) self.assertEqual('PUT', sot.create_method) self.assertDictEqual( { 'id': 'id', 'limit': 'limit', 'marker': 'marker', 'is_enabled': 'enabled', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = service_provider.ServiceProvider(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['id'], sot.name) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['is_enabled'], sot.is_enabled) self.assertEqual(EXAMPLE['auth_url'], sot.auth_url) self.assertEqual(EXAMPLE['sp_url'], sot.sp_url) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_trust.py0000664000175000017500000000462400000000000025320 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import trust from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'allow_redelegation': False, 'expires_at': '2016-03-09T12:14:57.233772', 'id': IDENTIFIER, 'impersonation': True, 'links': {'self': 'fake_link'}, 'project_id': '1', 'redelegated_trust_id': None, 'redelegation_count': '0', 'remaining_uses': 10, 'role_links': {'self': 'other_fake_link'}, 'trustee_user_id': '2', 'trustor_user_id': '3', 'roles': [{'name': 'test-role'}], } class TestTrust(base.TestCase): def test_basic(self): sot = trust.Trust() self.assertEqual('trust', sot.resource_key) self.assertEqual('trusts', sot.resources_key) self.assertEqual('/OS-TRUST/trusts', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = trust.Trust(**EXAMPLE) self.assertEqual(EXAMPLE['allow_redelegation'], sot.allow_redelegation) self.assertEqual(EXAMPLE['expires_at'], sot.expires_at) self.assertEqual(EXAMPLE['id'], sot.id) self.assertTrue(sot.is_impersonation) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['role_links'], sot.role_links) self.assertEqual( EXAMPLE['redelegated_trust_id'], sot.redelegated_trust_id ) self.assertEqual(EXAMPLE['remaining_uses'], sot.remaining_uses) self.assertEqual(EXAMPLE['trustee_user_id'], sot.trustee_user_id) self.assertEqual(EXAMPLE['trustor_user_id'], sot.trustor_user_id) self.assertEqual(EXAMPLE['roles'], sot.roles) self.assertEqual(EXAMPLE['redelegation_count'], sot.redelegation_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/identity/v3/test_user.py0000664000175000017500000000464100000000000025114 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.identity.v3 import user from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'default_project_id': '1', 'description': '2', 'domain_id': '3', 'email': '4', 'enabled': True, 'id': IDENTIFIER, 'links': {'self': 'http://example.com/user1'}, 'name': '6', 'password': '7', 'password_expires_at': '8', } class TestUser(base.TestCase): def test_basic(self): sot = user.User() self.assertEqual('user', sot.resource_key) self.assertEqual('users', sot.resources_key) self.assertEqual('/users', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual('PATCH', sot.commit_method) self.assertDictEqual( { 'domain_id': 'domain_id', 'name': 'name', 'password_expires_at': 'password_expires_at', 'is_enabled': 'enabled', 'limit': 'limit', 'marker': 'marker', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = user.User(**EXAMPLE) self.assertEqual(EXAMPLE['default_project_id'], sot.default_project_id) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['domain_id'], sot.domain_id) self.assertEqual(EXAMPLE['email'], sot.email) self.assertTrue(sot.is_enabled) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['password'], sot.password) self.assertEqual( EXAMPLE['password_expires_at'], sot.password_expires_at ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4614174 openstacksdk-4.0.0/openstack/tests/unit/image/0000775000175000017500000000000000000000000021421 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/image/__init__.py0000664000175000017500000000000000000000000023520 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4614174 openstacksdk-4.0.0/openstack/tests/unit/image/v1/0000775000175000017500000000000000000000000021747 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/image/v1/__init__.py0000664000175000017500000000000000000000000024046 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/image/v1/test_image.py0000664000175000017500000000511000000000000024437 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image.v1 import image from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'checksum': '1', 'container_format': '2', 'copy_from': '3', 'disk_format': '4', 'id': IDENTIFIER, 'is_public': True, 'location': '6', 'min_disk': '7', 'min_ram': '8', 'name': '9', 'owner': '10', 'properties': '11', 'protected': True, 'size': '13', 'status': '14', 'created_at': '2015-03-09T12:14:57.233772', 'updated_at': '2015-03-09T12:15:57.233772', } class TestImage(base.TestCase): def test_basic(self): sot = image.Image() self.assertEqual('image', sot.resource_key) self.assertEqual('images', sot.resources_key) self.assertEqual('/images', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = image.Image(**EXAMPLE) self.assertEqual(EXAMPLE['checksum'], sot.checksum) self.assertEqual(EXAMPLE['container_format'], sot.container_format) self.assertEqual(EXAMPLE['copy_from'], sot.copy_from) self.assertEqual(EXAMPLE['disk_format'], sot.disk_format) self.assertEqual(IDENTIFIER, sot.id) self.assertTrue(sot.is_public) self.assertEqual(EXAMPLE['location'], sot.location) self.assertEqual(EXAMPLE['min_disk'], sot.min_disk) self.assertEqual(EXAMPLE['min_ram'], sot.min_ram) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['owner'], sot.owner) self.assertEqual(EXAMPLE['owner'], sot.owner_id) self.assertEqual(EXAMPLE['properties'], sot.properties) self.assertTrue(sot.is_protected) self.assertEqual(EXAMPLE['size'], sot.size) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/image/v1/test_proxy.py0000664000175000017500000000276700000000000024555 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image.v1 import _proxy from openstack.image.v1 import image from openstack.tests.unit import test_proxy_base as test_proxy_base class TestImageProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) def test_image_upload_attrs(self): self.verify_create(self.proxy.upload_image, image.Image) def test_image_delete(self): self.verify_delete(self.proxy.delete_image, image.Image, False) def test_image_delete_ignore(self): self.verify_delete(self.proxy.delete_image, image.Image, True) def test_image_find(self): self.verify_find(self.proxy.find_image, image.Image) def test_image_get(self): self.verify_get(self.proxy.get_image, image.Image) def test_images(self): self.verify_list(self.proxy.images, image.Image) def test_image_update(self): self.verify_update(self.proxy.update_image, image.Image) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4654195 openstacksdk-4.0.0/openstack/tests/unit/image/v2/0000775000175000017500000000000000000000000021750 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/image/v2/__init__.py0000664000175000017500000000000000000000000024047 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/image/v2/test_cache.py0000664000175000017500000000541400000000000024430 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack import exceptions from openstack.image.v2 import cache from openstack.tests.unit import base EXAMPLE = { 'cached_images': [ { 'hits': 0, 'image_id': '1a56983c-f71f-490b-a7ac-6b321a18935a', 'last_accessed': 1671699579.444378, 'last_modified': 1671699579.444378, 'size': 0, }, ], 'queued_images': [ '3a4560a1-e585-443e-9b39-553b46ec92d1', '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', ], } class TestCache(base.TestCase): def test_basic(self): sot = cache.Cache() self.assertIsNone(sot.resource_key) self.assertEqual('/cache', sot.base_path) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) def test_make_it(self): sot = cache.Cache(**EXAMPLE) self.assertEqual( [cache.CachedImage(**e) for e in EXAMPLE['cached_images']], sot.cached_images, ) self.assertEqual(EXAMPLE['queued_images'], sot.queued_images) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) def test_queue(self): sot = cache.Cache() sess = mock.Mock() sess.put = mock.Mock() sess.default_microversion = '2.14' sot.queue(sess, image='image_id') sess.put.assert_called_with( 'cache/image_id', microversion=sess.default_microversion ) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) def test_clear(self): sot = cache.Cache(**EXAMPLE) session = mock.Mock() session.delete = mock.Mock() sot.clear(session) session.delete.assert_called_with('/cache', headers={}) sot.clear(session, 'both') session.delete.assert_called_with('/cache', headers={}) sot.clear(session, 'cache') session.delete.assert_called_with( '/cache', headers={'x-image-cache-clear-target': 'cache'} ) sot.clear(session, 'queue') session.delete.assert_called_with( '/cache', headers={'x-image-cache-clear-target': 'queue'} ) self.assertRaises( exceptions.InvalidRequest, sot.clear, session, 'invalid' ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/image/v2/test_image.py0000664000175000017500000005015300000000000024447 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import operator import tempfile from unittest import mock from keystoneauth1 import adapter import requests from openstack import _log from openstack import exceptions from openstack.image.v2 import image from openstack.tests.unit import base from openstack import utils IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'checksum': '1', 'container_format': '2', 'created_at': '2015-03-09T12:14:57.233772', 'data': 'This is not an image', 'disk_format': '4', 'min_disk': 5, 'name': '6', 'owner': '7', 'properties': { 'a': 'z', 'b': 'y', }, 'protected': False, 'status': '8', 'tags': ['g', 'h', 'i'], 'updated_at': '2015-03-09T12:15:57.233772', 'os_hash_algo': 'sha512', 'os_hash_value': '073b4523583784fbe01daff81eba092a262ec3', 'os_hidden': False, 'virtual_size': '10', 'visibility': '11', 'location': '12', 'size': 13, 'store': '14', 'file': '15', 'locations': ['15', '16'], 'direct_url': '17', 'url': '20', 'metadata': {'21': '22'}, 'architecture': '23', 'hypervisor_type': '24', 'instance_type_rxtx_factor': 25.1, 'instance_uuid': '26', 'img_config_drive': '27', 'kernel_id': '28', 'os_distro': '29', 'os_version': '30', 'os_secure_boot': '31', 'ramdisk_id': '32', 'vm_mode': '33', 'hw_cpu_sockets': 34, 'hw_cpu_cores': 35, 'hw_cpu_threads': 36, 'hw_disk_bus': '37', 'hw_rng_model': '38', 'hw_machine_type': '39', 'hw_scsi_model': '40', 'hw_serial_port_count': 41, 'hw_video_model': '42', 'hw_video_ram': 43, 'hw_watchdog_action': '44', 'os_command_line': '45', 'hw_vif_model': '46', 'hw_vif_multiqueue_enabled': True, 'hw_boot_menu': True, 'vmware_adaptertype': '47', 'vmware_ostype': '48', 'auto_disk_config': True, 'os_type': '49', 'os_admin_user': 'ubuntu', 'hw_qemu_guest_agent': 'yes', 'os_require_quiesce': True, } def calculate_md5_checksum(data): checksum = utils.md5(usedforsecurity=False) for chunk in data: checksum.update(chunk) return checksum.hexdigest() class FakeResponse: def __init__(self, response, status_code=200, headers=None, reason=None): self.body = response self.content = response self.text = response self.status_code = status_code headers = headers if headers else {'content-type': 'application/json'} self.headers = requests.structures.CaseInsensitiveDict(headers) if reason: self.reason = reason # for the sake of "list" response faking self.links = [] def json(self): return self.body class TestImage(base.TestCase): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) self.sess = mock.Mock(spec=adapter.Adapter) self.sess.post = mock.Mock(return_value=self.resp) self.sess.put = mock.Mock(return_value=FakeResponse({})) self.sess.delete = mock.Mock(return_value=FakeResponse({})) self.sess.get = mock.Mock(return_value=FakeResponse({})) self.sess.default_microversion = None self.sess.retriable_status_codes = None self.sess.log = _log.setup_logging('openstack') def test_basic(self): sot = image.Image() self.assertIsNone(sot.resource_key) self.assertEqual('images', sot.resources_key) self.assertEqual('/images', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { 'created_at': 'created_at', 'id': 'id', 'is_hidden': 'os_hidden', 'limit': 'limit', 'marker': 'marker', 'member_status': 'member_status', 'name': 'name', 'owner': 'owner', 'protected': 'protected', 'size_max': 'size_max', 'size_min': 'size_min', 'sort': 'sort', 'sort_dir': 'sort_dir', 'sort_key': 'sort_key', 'status': 'status', 'tag': 'tag', 'updated_at': 'updated_at', 'visibility': 'visibility', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = image.Image(**EXAMPLE) self.assertEqual(IDENTIFIER, sot.id) self.assertEqual(EXAMPLE['checksum'], sot.checksum) self.assertEqual(EXAMPLE['container_format'], sot.container_format) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['disk_format'], sot.disk_format) self.assertEqual(EXAMPLE['min_disk'], sot.min_disk) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['owner'], sot.owner) self.assertEqual(EXAMPLE['owner'], sot.owner_id) self.assertEqual(EXAMPLE['properties'], sot.properties) self.assertFalse(sot.is_protected) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['tags'], sot.tags) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) self.assertEqual(EXAMPLE['os_hash_algo'], sot.hash_algo) self.assertEqual(EXAMPLE['os_hash_value'], sot.hash_value) self.assertEqual(EXAMPLE['os_hidden'], sot.is_hidden) self.assertEqual(EXAMPLE['virtual_size'], sot.virtual_size) self.assertEqual(EXAMPLE['visibility'], sot.visibility) self.assertEqual(EXAMPLE['size'], sot.size) self.assertEqual(EXAMPLE['store'], sot.store) self.assertEqual(EXAMPLE['file'], sot.file) self.assertEqual(EXAMPLE['locations'], sot.locations) self.assertEqual(EXAMPLE['direct_url'], sot.direct_url) self.assertEqual(EXAMPLE['url'], sot.url) self.assertEqual(EXAMPLE['metadata'], sot.metadata) self.assertEqual(EXAMPLE['architecture'], sot.architecture) self.assertEqual(EXAMPLE['hypervisor_type'], sot.hypervisor_type) self.assertEqual( EXAMPLE['instance_type_rxtx_factor'], sot.instance_type_rxtx_factor ) self.assertEqual(EXAMPLE['instance_uuid'], sot.instance_uuid) self.assertEqual(EXAMPLE['img_config_drive'], sot.needs_config_drive) self.assertEqual(EXAMPLE['kernel_id'], sot.kernel_id) self.assertEqual(EXAMPLE['os_distro'], sot.os_distro) self.assertEqual(EXAMPLE['os_version'], sot.os_version) self.assertEqual(EXAMPLE['os_secure_boot'], sot.needs_secure_boot) self.assertEqual(EXAMPLE['ramdisk_id'], sot.ramdisk_id) self.assertEqual(EXAMPLE['vm_mode'], sot.vm_mode) self.assertEqual(EXAMPLE['hw_cpu_sockets'], sot.hw_cpu_sockets) self.assertEqual(EXAMPLE['hw_cpu_cores'], sot.hw_cpu_cores) self.assertEqual(EXAMPLE['hw_cpu_threads'], sot.hw_cpu_threads) self.assertEqual(EXAMPLE['hw_disk_bus'], sot.hw_disk_bus) self.assertEqual(EXAMPLE['hw_rng_model'], sot.hw_rng_model) self.assertEqual(EXAMPLE['hw_machine_type'], sot.hw_machine_type) self.assertEqual(EXAMPLE['hw_scsi_model'], sot.hw_scsi_model) self.assertEqual( EXAMPLE['hw_serial_port_count'], sot.hw_serial_port_count ) self.assertEqual(EXAMPLE['hw_video_model'], sot.hw_video_model) self.assertEqual(EXAMPLE['hw_video_ram'], sot.hw_video_ram) self.assertEqual(EXAMPLE['hw_watchdog_action'], sot.hw_watchdog_action) self.assertEqual(EXAMPLE['os_command_line'], sot.os_command_line) self.assertEqual(EXAMPLE['hw_vif_model'], sot.hw_vif_model) self.assertEqual( EXAMPLE['hw_vif_multiqueue_enabled'], sot.is_hw_vif_multiqueue_enabled, ) self.assertEqual(EXAMPLE['hw_boot_menu'], sot.is_hw_boot_menu_enabled) self.assertEqual(EXAMPLE['vmware_adaptertype'], sot.vmware_adaptertype) self.assertEqual(EXAMPLE['vmware_ostype'], sot.vmware_ostype) self.assertEqual(EXAMPLE['auto_disk_config'], sot.has_auto_disk_config) self.assertEqual(EXAMPLE['os_type'], sot.os_type) self.assertEqual(EXAMPLE['os_admin_user'], sot.os_admin_user) self.assertEqual( EXAMPLE['hw_qemu_guest_agent'], sot.hw_qemu_guest_agent ) self.assertEqual(EXAMPLE['os_require_quiesce'], sot.os_require_quiesce) def test_deactivate(self): sot = image.Image(**EXAMPLE) self.assertIsNone(sot.deactivate(self.sess)) self.sess.post.assert_called_with( 'images/IDENTIFIER/actions/deactivate', ) def test_reactivate(self): sot = image.Image(**EXAMPLE) self.assertIsNone(sot.reactivate(self.sess)) self.sess.post.assert_called_with( 'images/IDENTIFIER/actions/reactivate', ) def test_add_tag(self): sot = image.Image(**EXAMPLE) tag = "lol" sot.add_tag(self.sess, tag) self.sess.put.assert_called_with( 'images/IDENTIFIER/tags/%s' % tag, ) def test_remove_tag(self): sot = image.Image(**EXAMPLE) tag = "lol" sot.remove_tag(self.sess, tag) self.sess.delete.assert_called_with( 'images/IDENTIFIER/tags/%s' % tag, ) def test_import_image(self): sot = image.Image(**EXAMPLE) json = {"method": {"name": "web-download", "uri": "such-a-good-uri"}} sot.import_image(self.sess, "web-download", uri="such-a-good-uri") self.sess.post.assert_called_with( 'images/IDENTIFIER/import', headers={}, json=json ) def test_import_image_with_uri_not_web_download(self): sot = image.Image(**EXAMPLE) sot.import_image(self.sess, "glance-direct") self.sess.post.assert_called_with( 'images/IDENTIFIER/import', headers={}, json={"method": {"name": "glance-direct"}}, ) def test_import_image_with_store(self): sot = image.Image(**EXAMPLE) json = { "method": { "name": "web-download", "uri": "such-a-good-uri", }, "stores": ["ceph_1"], } store = mock.MagicMock() store.id = "ceph_1" sot.import_image( self.sess, "web-download", uri="such-a-good-uri", store=store, ) self.sess.post.assert_called_with( 'images/IDENTIFIER/import', headers={'X-Image-Meta-Store': 'ceph_1'}, json=json, ) def test_import_image_with_stores(self): sot = image.Image(**EXAMPLE) json = { "method": { "name": "web-download", "uri": "such-a-good-uri", }, "stores": ["ceph_1"], } store = mock.MagicMock() store.id = "ceph_1" sot.import_image( self.sess, "web-download", uri="such-a-good-uri", stores=[store], ) self.sess.post.assert_called_with( 'images/IDENTIFIER/import', headers={}, json=json, ) def test_import_image_with_all_stores(self): sot = image.Image(**EXAMPLE) json = { "method": { "name": "web-download", "uri": "such-a-good-uri", }, "all_stores": True, } sot.import_image( self.sess, "web-download", uri="such-a-good-uri", all_stores=True, ) self.sess.post.assert_called_with( 'images/IDENTIFIER/import', headers={}, json=json, ) def test_upload(self): sot = image.Image(**EXAMPLE) self.assertIsNotNone(sot.upload(self.sess)) self.sess.put.assert_called_with( 'images/IDENTIFIER/file', data=sot.data, headers={"Content-Type": "application/octet-stream", "Accept": ""}, ) def test_stage(self): sot = image.Image(**EXAMPLE) self.assertIsNotNone(sot.stage(self.sess)) self.sess.put.assert_called_with( 'images/IDENTIFIER/stage', data=sot.data, headers={"Content-Type": "application/octet-stream", "Accept": ""}, ) def test_stage_error(self): sot = image.Image(**EXAMPLE) self.sess.put.return_value = FakeResponse("dummy", status_code=400) self.assertRaises(exceptions.SDKException, sot.stage, self.sess) def test_download_checksum_match(self): sot = image.Image(**EXAMPLE) resp = FakeResponse( b"abc", headers={ "Content-MD5": "900150983cd24fb0d6963f7d28e17f72", "Content-Type": "application/octet-stream", }, ) self.sess.get.return_value = resp rv = sot.download(self.sess) self.sess.get.assert_called_with( 'images/IDENTIFIER/file', stream=False ) self.assertEqual(rv, resp) def test_download_checksum_mismatch(self): sot = image.Image(**EXAMPLE) resp = FakeResponse( b"abc", headers={ "Content-MD5": "the wrong checksum", "Content-Type": "application/octet-stream", }, ) self.sess.get.return_value = resp self.assertRaises(exceptions.InvalidResponse, sot.download, self.sess) def test_download_no_checksum_header(self): sot = image.Image(**EXAMPLE) resp1 = FakeResponse( b"abc", headers={"Content-Type": "application/octet-stream"} ) resp2 = FakeResponse({"checksum": "900150983cd24fb0d6963f7d28e17f72"}) self.sess.get.side_effect = [resp1, resp2] rv = sot.download(self.sess) self.sess.get.assert_has_calls( [ mock.call('images/IDENTIFIER/file', stream=False), mock.call( 'images/IDENTIFIER', microversion=None, params={}, skip_cache=False, ), ] ) self.assertEqual(rv, resp1) def test_download_no_checksum_at_all2(self): sot = image.Image(**EXAMPLE) resp1 = FakeResponse( b"abc", headers={"Content-Type": "application/octet-stream"} ) resp2 = FakeResponse({"checksum": None}) self.sess.get.side_effect = [resp1, resp2] with self.assertLogs(logger='openstack', level="WARNING") as log: rv = sot.download(self.sess) self.assertEqual( len(log.records), 1, "Too many warnings were logged" ) self.assertEqual( "Unable to verify the integrity of image %s", log.records[0].msg, ) self.assertEqual((sot.id,), log.records[0].args) self.sess.get.assert_has_calls( [ mock.call('images/IDENTIFIER/file', stream=False), mock.call( 'images/IDENTIFIER', microversion=None, params={}, skip_cache=False, ), ] ) self.assertEqual(rv, resp1) def test_download_stream(self): sot = image.Image(**EXAMPLE) resp = FakeResponse( b"abc", headers={ "Content-MD5": "900150983cd24fb0d6963f7d28e17f72", "Content-Type": "application/octet-stream", }, ) self.sess.get.return_value = resp rv = sot.download(self.sess, stream=True) self.sess.get.assert_called_with('images/IDENTIFIER/file', stream=True) self.assertEqual(rv, resp) def test_image_download_output_fd(self): output_file = io.BytesIO() sot = image.Image(**EXAMPLE) response = mock.Mock() response.status_code = 200 response.iter_content.return_value = [b'01', b'02'] response.headers = { 'Content-MD5': calculate_md5_checksum( response.iter_content.return_value ) } self.sess.get = mock.Mock(return_value=response) sot.download(self.sess, output=output_file) output_file.seek(0) self.assertEqual(b'0102', output_file.read()) def test_image_download_output_file(self): sot = image.Image(**EXAMPLE) response = mock.Mock() response.status_code = 200 response.iter_content.return_value = [b'01', b'02'] response.headers = { 'Content-MD5': calculate_md5_checksum( response.iter_content.return_value ) } self.sess.get = mock.Mock(return_value=response) output_file = tempfile.NamedTemporaryFile() sot.download(self.sess, output=output_file.name) output_file.seek(0) self.assertEqual(b'0102', output_file.read()) def test_image_update(self): values = EXAMPLE.copy() del values['instance_uuid'] sot = image.Image.existing(**values) # Let the translate pass through, that portion is tested elsewhere sot._translate_response = mock.Mock() resp = mock.Mock() resp.content = b"abc" headers = { 'Content-Type': 'application/openstack-images-v2.1-json-patch', 'Accept': '', } resp.headers = headers resp.status_code = 200 self.sess.patch.return_value = resp value = [ {"value": "fake_name", "op": "replace", "path": "/name"}, {"value": "fake_value", "op": "add", "path": "/instance_uuid"}, ] sot.name = 'fake_name' sot.instance_uuid = 'fake_value' sot.commit(self.sess) url = 'images/' + IDENTIFIER self.sess.patch.assert_called_once() call = self.sess.patch.call_args call_args, call_kwargs = call self.assertEqual(url, call_args[0]) self.assertEqual( sorted(value, key=operator.itemgetter('value')), sorted(call_kwargs['json'], key=operator.itemgetter('value')), ) def test_image_find(self): sot = image.Image() self.sess._get_connection = mock.Mock(return_value=self.cloud) self.sess.get.side_effect = [ # First fetch by name FakeResponse(None, 404, headers={}, reason='dummy'), # Then list with no results FakeResponse({'images': []}), # And finally new list of hidden images with one searched FakeResponse({'images': [EXAMPLE]}), ] result = sot.find(self.sess, EXAMPLE['name']) self.sess.get.assert_has_calls( [ mock.call( 'images/' + EXAMPLE['name'], microversion=None, params={}, skip_cache=False, ), mock.call( '/images', headers={'Accept': 'application/json'}, microversion=None, params={'name': EXAMPLE['name']}, ), mock.call( '/images', headers={'Accept': 'application/json'}, microversion=None, params={'os_hidden': True}, ), ] ) self.assertIsInstance(result, image.Image) self.assertEqual(IDENTIFIER, result.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/image/v2/test_member.py0000664000175000017500000000325700000000000024637 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image.v2 import member from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'created_at': '2015-03-09T12:14:57.233772', 'image_id': '2', 'member_id': IDENTIFIER, 'status': '4', 'updated_at': '2015-03-09T12:15:57.233772', } class TestMember(base.TestCase): def test_basic(self): sot = member.Member() self.assertIsNone(sot.resource_key) self.assertEqual('members', sot.resources_key) self.assertEqual('/images/%(image_id)s/members', sot.base_path) self.assertEqual('member', sot._alternate_id()) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = member.Member(**EXAMPLE) self.assertEqual(IDENTIFIER, sot.id) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['image_id'], sot.image_id) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/image/v2/test_metadef_namespace.py0000664000175000017500000000720500000000000027006 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack import exceptions from openstack.image.v2 import metadef_namespace from openstack.tests.unit import base EXAMPLE = { 'display_name': 'Cinder Volume Type', 'created_at': '2022-08-24T17:46:24Z', 'protected': True, 'namespace': 'OS::Cinder::Volumetype', 'description': ( 'The Cinder volume type configuration option. Volume type ' 'assignment provides a mechanism not only to provide scheduling to a ' 'specific storage back-end, but also can be used to specify specific ' 'information for a back-end storage device to act upon.' ), 'visibility': 'public', 'owner': 'admin', 'resource_type_associations': [ { 'name': 'OS::Glance::Image', 'prefix': 'cinder_', 'created_at': '2022-08-24T17:46:24Z', }, ], } class TestMetadefNamespace(base.TestCase): def test_basic(self): sot = metadef_namespace.MetadefNamespace() self.assertIsNone(sot.resource_key) self.assertEqual('namespaces', sot.resources_key) self.assertEqual('/metadefs/namespaces', sot.base_path) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_delete) def test_make_it(self): sot = metadef_namespace.MetadefNamespace(**EXAMPLE) self.assertEqual(EXAMPLE['namespace'], sot.namespace) self.assertEqual(EXAMPLE['visibility'], sot.visibility) self.assertEqual(EXAMPLE['owner'], sot.owner) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['protected'], sot.is_protected) self.assertEqual(EXAMPLE['display_name'], sot.display_name) self.assertEqual( EXAMPLE['resource_type_associations'], sot.resource_type_associations, ) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'resource_types': 'resource_types', 'sort_dir': 'sort_dir', 'sort_key': 'sort_key', 'visibility': 'visibility', }, sot._query_mapping._mapping, ) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) def test_delete_all_properties(self): sot = metadef_namespace.MetadefNamespace(**EXAMPLE) session = mock.Mock(spec=adapter.Adapter) sot._translate_response = mock.Mock() sot.delete_all_properties(session) session.delete.assert_called_with( 'metadefs/namespaces/OS::Cinder::Volumetype/properties' ) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) def test_delete_all_objects(self): sot = metadef_namespace.MetadefNamespace(**EXAMPLE) session = mock.Mock(spec=adapter.Adapter) sot._translate_response = mock.Mock() sot.delete_all_objects(session) session.delete.assert_called_with( 'metadefs/namespaces/OS::Cinder::Volumetype/objects' ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/image/v2/test_metadef_object.py0000664000175000017500000000543100000000000026317 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image.v2 import metadef_object from openstack.tests.unit import base EXAMPLE = { 'created_at': '2014-09-19T18:20:56Z', 'description': 'The CPU limits with control parameters.', 'name': 'CPU Limits', 'properties': { 'quota:cpu_period': { 'description': 'The enforcement interval', 'maximum': 1000000, 'minimum': 1000, 'title': 'Quota: CPU Period', 'type': 'integer', }, 'quota:cpu_quota': { 'description': 'The maximum allowed bandwidth', 'title': 'Quota: CPU Quota', 'type': 'integer', }, 'quota:cpu_shares': { 'description': 'The proportional weighted', 'title': 'Quota: CPU Shares', 'type': 'integer', }, }, 'required': [], 'schema': '/v2/schemas/metadefs/object', 'updated_at': '2014-09-19T18:20:56Z', } class TestMetadefObject(base.TestCase): def test_basic(self): sot = metadef_object.MetadefObject() self.assertIsNone(sot.resource_key) self.assertEqual('objects', sot.resources_key) test_base_path = '/metadefs/namespaces/%(namespace_name)s/objects' self.assertEqual(test_base_path, sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = metadef_object.MetadefObject(**EXAMPLE) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['properties'], sot.properties) self.assertEqual(EXAMPLE['required'], sot.required) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) self.assertDictEqual( { "limit": "limit", "marker": "marker", "visibility": "visibility", "resource_types": "resource_types", "sort_key": "sort_key", "sort_dir": "sort_dir", }, sot._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/image/v2/test_metadef_property.py0000664000175000017500000000601400000000000026733 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image.v2 import metadef_property from openstack.tests.unit import base EXAMPLE = { 'namespace_name': 'CIM::StorageAllocationSettingData', 'name': 'Access', 'type': 'string', 'title': 'Access', 'description': ( 'Access describes whether the allocated storage extent is ' '1 (readable), 2 (writeable), or 3 (both).' ), 'operators': [''], 'default': None, 'readonly': None, 'minimum': None, 'maximum': None, 'enum': [ 'Unknown', 'Readable', 'Writeable', 'Read/Write Supported', 'DMTF Reserved', ], 'pattern': None, 'min_length': 0, 'max_length': None, 'items': None, 'unique_items': False, 'min_items': 0, 'max_items': None, 'additional_items': None, } class TestMetadefProperty(base.TestCase): def test_basic(self): sot = metadef_property.MetadefProperty() self.assertEqual( '/metadefs/namespaces/%(namespace_name)s/properties', sot.base_path ) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = metadef_property.MetadefProperty(**EXAMPLE) self.assertEqual(EXAMPLE['namespace_name'], sot.namespace_name) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['type'], sot.type) self.assertEqual(EXAMPLE['title'], sot.title) self.assertEqual(EXAMPLE['description'], sot.description) self.assertListEqual(EXAMPLE['operators'], sot.operators) self.assertEqual(EXAMPLE['default'], sot.default) self.assertEqual(EXAMPLE['readonly'], sot.is_readonly) self.assertEqual(EXAMPLE['minimum'], sot.minimum) self.assertEqual(EXAMPLE['maximum'], sot.maximum) self.assertListEqual(EXAMPLE['enum'], sot.enum) self.assertEqual(EXAMPLE['pattern'], sot.pattern) self.assertEqual(EXAMPLE['min_length'], sot.min_length) self.assertEqual(EXAMPLE['max_length'], sot.max_length) self.assertEqual(EXAMPLE['items'], sot.items) self.assertEqual(EXAMPLE['unique_items'], sot.require_unique_items) self.assertEqual(EXAMPLE['min_items'], sot.min_items) self.assertEqual(EXAMPLE['max_items'], sot.max_items) self.assertEqual( EXAMPLE['additional_items'], sot.allow_additional_items ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/image/v2/test_metadef_resource_type.py0000664000175000017500000000265000000000000027741 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image.v2 import metadef_resource_type from openstack.tests.unit import base EXAMPLE = {"name": "OS::Nova::Aggregate", "created_at": "2022-07-09T04:10:37Z"} class TestMetadefResourceType(base.TestCase): def test_basic(self): sot = metadef_resource_type.MetadefResourceType() self.assertIsNone(sot.resource_key) self.assertEqual('resource_types', sot.resources_key) self.assertEqual('/metadefs/resource_types', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = metadef_resource_type.MetadefResourceType(**EXAMPLE) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['created_at'], sot.created_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/image/v2/test_metadef_resource_type_association.py0000664000175000017500000000334100000000000032333 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image.v2 import metadef_resource_type from openstack.tests.unit import base EXAMPLE = { "name": "OS::Cinder::Volume", "prefix": "CIM_PASD_", "properties_target": "image", "created_at": "2022-07-09T04:10:38Z", } class TestMetadefResourceTypeAssociation(base.TestCase): def test_basic(self): sot = metadef_resource_type.MetadefResourceTypeAssociation() self.assertIsNone(sot.resource_key) self.assertEqual('resource_type_associations', sot.resources_key) self.assertEqual( '/metadefs/namespaces/%(namespace_name)s/resource_types', sot.base_path, ) self.assertTrue(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = metadef_resource_type.MetadefResourceTypeAssociation(**EXAMPLE) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['prefix'], sot.prefix) self.assertEqual(EXAMPLE['properties_target'], sot.properties_target) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/image/v2/test_metadef_schema.py0000664000175000017500000000711100000000000026306 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image.v2 import metadef_schema from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'name': 'namespace', 'properties': { 'namespace': { 'type': 'string', 'description': 'The unique namespace text.', 'maxLength': 80, }, 'visibility': { 'type': 'string', 'description': 'Scope of namespace accessibility.', 'enum': ['public', 'private'], }, 'created_at': { 'type': 'string', 'readOnly': True, 'description': 'Date and time of namespace creation', 'format': 'date-time', }, 'resource_type_associations': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'prefix': {'type': 'string'}, 'properties_target': {'type': 'string'}, }, }, }, 'properties': {'$ref': '#/definitions/property'}, 'objects': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'description': {'type': 'string'}, 'required': {'$ref': '#/definitions/stringArray'}, 'properties': {'$ref': '#/definitions/property'}, }, }, }, 'tags': { 'type': 'array', 'items': { 'type': 'object', 'properties': {'name': {'type': 'string'}}, }, }, }, 'additionalProperties': False, 'definitions': { 'positiveInteger': {'type': 'integer', 'minimum': 0}, 'positiveIntegerDefault0': { 'allOf': [ {'$ref': '#/definitions/positiveInteger'}, {'default': 0}, ] }, 'stringArray': { 'type': 'array', 'items': {'type': 'string'}, 'uniqueItems': True, }, }, 'required': ['namespace'], } class TestMetadefSchema(base.TestCase): def test_basic(self): sot = metadef_schema.MetadefSchema() self.assertIsNone(sot.resource_key) self.assertIsNone(sot.resources_key) self.assertEqual('/schemas/metadefs', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) def test_make_it(self): sot = metadef_schema.MetadefSchema(**EXAMPLE) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['properties'], sot.properties) self.assertEqual( EXAMPLE['additionalProperties'], sot.additional_properties ) self.assertEqual(EXAMPLE['definitions'], sot.definitions) self.assertEqual(EXAMPLE['required'], sot.required) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/image/v2/test_proxy.py0000664000175000017500000011165000000000000024546 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import os.path import tempfile from unittest import mock import requests from openstack import exceptions from openstack.image.v2 import _proxy from openstack.image.v2 import cache as _cache from openstack.image.v2 import image as _image from openstack.image.v2 import member as _member from openstack.image.v2 import metadef_namespace as _metadef_namespace from openstack.image.v2 import metadef_object as _metadef_object from openstack.image.v2 import metadef_resource_type as _metadef_resource_type from openstack.image.v2 import metadef_schema as _metadef_schema from openstack.image.v2 import schema as _schema from openstack.image.v2 import service_info as _service_info from openstack.image.v2 import task as _task from openstack import proxy as proxy_base from openstack.tests.unit.image.v2 import test_image as fake_image from openstack.tests.unit import test_proxy_base EXAMPLE = fake_image.EXAMPLE class FakeResponse: def __init__(self, response, status_code=200, headers=None): self.body = response self.status_code = status_code headers = headers if headers else {'content-type': 'application/json'} self.headers = requests.structures.CaseInsensitiveDict(headers) def json(self): return self.body class TestImageProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) self.proxy._connection = self.cloud class TestImage(TestImageProxy): def test_image_import_no_required_attrs(self): # container_format and disk_format are required attrs of the image existing_image = _image.Image(id="id") self.assertRaises( exceptions.InvalidRequest, self.proxy.import_image, existing_image, ) def test_image_import(self): original_image = _image.Image(**EXAMPLE) self._verify( "openstack.image.v2.image.Image.import_image", self.proxy.import_image, method_args=[original_image, "method"], method_kwargs={ "uri": "uri", }, expected_args=[self.proxy], expected_kwargs={ "method": "method", "store": None, "uri": "uri", "remote_region": None, "remote_image_id": None, "remote_service_interface": None, "stores": [], "all_stores": None, "all_stores_must_succeed": None, }, ) def test_image_create_conflicting_options(self): exc = self.assertRaises( exceptions.SDKException, self.proxy.create_image, name='fake', filename='fake', data='fake', container='bare', disk_format='raw', ) self.assertIn('filename and data are mutually exclusive', str(exc)) def test_image_create(self): self.verify_create( self.proxy.create_image, _image.Image, method_kwargs={ 'name': 'fake', 'disk_format': 'fake_dformat', 'container_format': 'fake_cformat', 'allow_duplicates': True, 'is_protected': True, }, expected_kwargs={ 'name': 'fake', 'disk_format': 'fake_dformat', 'container_format': 'fake_cformat', 'is_protected': True, 'owner_specified.openstack.md5': '', 'owner_specified.openstack.object': 'images/fake', 'owner_specified.openstack.sha256': '', }, ) def test_image_create_file_as_name(self): # if we pass a filename as an image name, we should upload the file # itself (and use the upload flow) with tempfile.NamedTemporaryFile() as tmpfile: name = os.path.basename(tmpfile.name) self._verify( 'openstack.image.v2._proxy.Proxy._upload_image', self.proxy.create_image, method_kwargs={ 'name': tmpfile.name, 'allow_duplicates': True, }, expected_args=[ name, ], expected_kwargs={ 'filename': tmpfile.name, 'data': None, 'meta': {}, 'wait': False, 'timeout': 3600, 'validate_checksum': False, 'use_import': False, 'stores': None, 'all_stores': None, 'all_stores_must_succeed': None, 'disk_format': 'qcow2', 'container_format': 'bare', 'properties': { 'owner_specified.openstack.md5': '', 'owner_specified.openstack.object': f'images/{name}', 'owner_specified.openstack.sha256': '', }, }, ) # but not if we use a directory... with tempfile.TemporaryDirectory() as tmpdir: self.verify_create( self.proxy.create_image, _image.Image, method_kwargs={ 'name': tmpdir, 'allow_duplicates': True, }, expected_kwargs={ 'container_format': 'bare', 'disk_format': 'qcow2', 'name': tmpdir, 'owner_specified.openstack.md5': '', 'owner_specified.openstack.object': f'images/{tmpdir}', 'owner_specified.openstack.sha256': '', }, ) def test_image_create_checksum_match(self): fake_image = _image.Image( id="fake", properties={ self.proxy._IMAGE_MD5_KEY: 'fake_md5', self.proxy._IMAGE_SHA256_KEY: 'fake_sha256', }, ) self.proxy.find_image = mock.Mock(return_value=fake_image) self.proxy._upload_image = mock.Mock() res = self.proxy.create_image( name='fake', md5='fake_md5', sha256='fake_sha256' ) self.assertEqual(fake_image, res) self.proxy._upload_image.assert_not_called() def test_image_create_checksum_mismatch(self): fake_image = _image.Image( id="fake", properties={ self.proxy._IMAGE_MD5_KEY: 'fake_md5', self.proxy._IMAGE_SHA256_KEY: 'fake_sha256', }, ) self.proxy.find_image = mock.Mock(return_value=fake_image) self.proxy._upload_image = mock.Mock() self.proxy.create_image( name='fake', data=b'fake', md5='fake2_md5', sha256='fake2_sha256' ) self.proxy._upload_image.assert_called() def test_image_create_allow_duplicates_find_not_called(self): self.proxy.find_image = mock.Mock() self.proxy._upload_image = mock.Mock() self.proxy.create_image( name='fake', data=b'fake', allow_duplicates=True, ) self.proxy.find_image.assert_not_called() def test_image_create_validate_checksum_data_binary(self): """Pass real data as binary""" self.proxy.find_image = mock.Mock() self.proxy._upload_image = mock.Mock() self.proxy.create_image( name='fake', data=b'fake', validate_checksum=True, container='bare', disk_format='raw', ) self.proxy.find_image.assert_called_with('fake') self.proxy._upload_image.assert_called_with( 'fake', container_format='bare', disk_format='raw', filename=None, data=b'fake', meta={}, properties={ self.proxy._IMAGE_MD5_KEY: '144c9defac04969c7bfad8efaa8ea194', self.proxy._IMAGE_SHA256_KEY: 'b5d54c39e66671c9731b9f471e585d8262cd4f54963f0c93082d8dcf334d4c78', # noqa: E501 self.proxy._IMAGE_OBJECT_KEY: 'bare/fake', }, timeout=3600, validate_checksum=True, use_import=False, stores=None, all_stores=None, all_stores_must_succeed=None, wait=False, ) def test_image_create_validate_checksum_data_not_binary(self): self.assertRaises( exceptions.SDKException, self.proxy.create_image, name='fake', data=io.StringIO(), validate_checksum=True, container='bare', disk_format='raw', ) def test_image_create_data_binary(self): """Pass binary file-like object""" self.proxy.find_image = mock.Mock() self.proxy._upload_image = mock.Mock() data = io.BytesIO(b'\0\0') self.proxy.create_image( name='fake', data=data, validate_checksum=False, container='bare', disk_format='raw', ) self.proxy._upload_image.assert_called_with( 'fake', container_format='bare', disk_format='raw', filename=None, data=data, meta={}, properties={ self.proxy._IMAGE_MD5_KEY: '', self.proxy._IMAGE_SHA256_KEY: '', self.proxy._IMAGE_OBJECT_KEY: 'bare/fake', }, timeout=3600, validate_checksum=False, use_import=False, stores=None, all_stores=None, all_stores_must_succeed=None, wait=False, ) def test_image_create_protected(self): self.proxy.find_image = mock.Mock() created_image = mock.Mock(spec=_image.Image(id="id")) self.proxy._create = mock.Mock() self.proxy._create.return_value = created_image self.proxy._create.return_value.image_import_methods = [] created_image.upload = mock.Mock() created_image.upload.return_value = FakeResponse( response="", status_code=200 ) properties = {"is_protected": True} self.proxy.create_image( name="fake", data="data", container_format="bare", disk_format="raw", **properties, ) args, kwargs = self.proxy._create.call_args self.assertEqual(kwargs["is_protected"], True) def test_image_create_with_stores(self): self.proxy.find_image = mock.Mock() self.proxy._upload_image = mock.Mock() self.proxy.create_image( name='fake', data=b'fake', container='bare', disk_format='raw', use_import=True, stores=['cinder', 'swift'], ) self.proxy.find_image.assert_called_with('fake') self.proxy._upload_image.assert_called_with( 'fake', container_format='bare', disk_format='raw', filename=None, data=b'fake', meta={}, properties={ self.proxy._IMAGE_MD5_KEY: '', self.proxy._IMAGE_SHA256_KEY: '', # noqa: E501 self.proxy._IMAGE_OBJECT_KEY: 'bare/fake', }, timeout=3600, validate_checksum=False, use_import=True, stores=['cinder', 'swift'], all_stores=None, all_stores_must_succeed=None, wait=False, ) def test_image_create_with_all_stores(self): self.proxy.find_image = mock.Mock() self.proxy._upload_image = mock.Mock() self.proxy.create_image( name='fake', data=b'fake', container='bare', disk_format='raw', use_import=True, all_stores=True, all_stores_must_succeed=True, ) self.proxy.find_image.assert_called_with('fake') self.proxy._upload_image.assert_called_with( 'fake', container_format='bare', disk_format='raw', filename=None, data=b'fake', meta={}, properties={ self.proxy._IMAGE_MD5_KEY: '', self.proxy._IMAGE_SHA256_KEY: '', # noqa: E501 self.proxy._IMAGE_OBJECT_KEY: 'bare/fake', }, timeout=3600, validate_checksum=False, use_import=True, stores=None, all_stores=True, all_stores_must_succeed=True, wait=False, ) def test_image_upload_no_args(self): # container_format and disk_format are required args self.assertRaises(exceptions.InvalidRequest, self.proxy.upload_image) def test_image_upload(self): # NOTE: This doesn't use any of the base class verify methods # because it ends up making two separate calls to complete the # operation. created_image = mock.Mock(spec=_image.Image(id="id")) self.proxy._create = mock.Mock() self.proxy._create.return_value = created_image rv = self.proxy.upload_image( data="data", container_format="x", disk_format="y", name="z" ) self.proxy._create.assert_called_with( _image.Image, container_format="x", disk_format="y", name="z", ) created_image.upload.assert_called_with(self.proxy) self.assertEqual(rv, created_image) def test_image_download(self): original_image = _image.Image(**EXAMPLE) self._verify( 'openstack.image.v2.image.Image.download', self.proxy.download_image, method_args=[original_image], method_kwargs={ 'output': 'some_output', 'chunk_size': 1, 'stream': True, }, expected_args=[self.proxy], expected_kwargs={ 'output': 'some_output', 'chunk_size': 1, 'stream': True, }, ) @mock.patch("openstack.image.v2.image.Image.fetch") def test_image_stage(self, mock_fetch): image = _image.Image(id="id", status="queued") image.stage = mock.Mock() self.proxy.stage_image(image) mock_fetch.assert_called() image.stage.assert_called_with(self.proxy) @mock.patch("openstack.image.v2.image.Image.fetch") def test_image_stage_with_data(self, mock_fetch): image = _image.Image(id="id", status="queued") image.stage = mock.Mock() mock_fetch.return_value = image rv = self.proxy.stage_image(image, data="data") image.stage.assert_called_with(self.proxy) mock_fetch.assert_called() self.assertEqual(rv.data, "data") def test_image_stage_conflicting_options(self): image = _image.Image(id="id", status="queued") image.stage = mock.Mock() exc = self.assertRaises( exceptions.SDKException, self.proxy.stage_image, image, filename='foo', data='data', ) self.assertIn( 'filename and data are mutually exclusive', str(exc), ) image.stage.assert_not_called() def test_image_stage_wrong_status(self): image = _image.Image(id="id", status="active") image.stage = mock.Mock() exc = self.assertRaises( exceptions.SDKException, self.proxy.stage_image, image, data="data", ) self.assertIn( 'Image stage is only possible for images in the queued state.', str(exc), ) image.stage.assert_not_called() def test_image_delete(self): self.verify_delete(self.proxy.delete_image, _image.Image, False) def test_image_delete__ignore(self): self.verify_delete(self.proxy.delete_image, _image.Image, True) def test_delete_image__from_store(self): store = _service_info.Store(id='fast', is_default=True) store.delete_image = mock.Mock() image = _image.Image(id="id", status="queued") self.proxy.delete_image(image, store=store) store.delete_image.assert_called_with( self.proxy, image, ignore_missing=True, ) @mock.patch("openstack.resource.Resource._translate_response") @mock.patch("openstack.proxy.Proxy._get") @mock.patch("openstack.image.v2.image.Image.commit") def test_image_update( self, mock_commit_image, mock_get_image, mock_transpose ): original_image = _image.Image(**EXAMPLE) mock_get_image.return_value = original_image EXAMPLE['name'] = 'fake_name' updated_image = _image.Image(**EXAMPLE) mock_commit_image.return_value = updated_image.to_dict() result = self.proxy.update_image( original_image, **updated_image.to_dict() ) self.assertEqual('fake_name', result.get('name')) def test_image_get(self): self.verify_get(self.proxy.get_image, _image.Image) def test_images(self): self.verify_list(self.proxy.images, _image.Image) def test_add_tag(self): self._verify( "openstack.image.v2.image.Image.add_tag", self.proxy.add_tag, method_args=["image", "tag"], expected_args=[self.proxy, "tag"], ) def test_remove_tag(self): self._verify( "openstack.image.v2.image.Image.remove_tag", self.proxy.remove_tag, method_args=["image", "tag"], expected_args=[self.proxy, "tag"], ) def test_deactivate_image(self): self._verify( "openstack.image.v2.image.Image.deactivate", self.proxy.deactivate_image, method_args=["image"], expected_args=[self.proxy], ) def test_reactivate_image(self): self._verify( "openstack.image.v2.image.Image.reactivate", self.proxy.reactivate_image, method_args=["image"], expected_args=[self.proxy], ) class TestMember(TestImageProxy): def test_member_create(self): self.verify_create( self.proxy.add_member, _member.Member, method_kwargs={"image": "test_id"}, expected_kwargs={"image_id": "test_id"}, ) def test_member_delete(self): self._verify( "openstack.proxy.Proxy._delete", self.proxy.remove_member, method_args=["member_id"], method_kwargs={"image": "image_id", "ignore_missing": False}, expected_args=[_member.Member], expected_kwargs={ "member_id": "member_id", "image_id": "image_id", "ignore_missing": False, }, ) def test_member_delete_ignore(self): self._verify( "openstack.proxy.Proxy._delete", self.proxy.remove_member, method_args=["member_id"], method_kwargs={"image": "image_id"}, expected_args=[_member.Member], expected_kwargs={ "member_id": "member_id", "image_id": "image_id", "ignore_missing": True, }, ) def test_member_update(self): self._verify( "openstack.proxy.Proxy._update", self.proxy.update_member, method_args=['member_id', 'image_id'], expected_args=[_member.Member], expected_kwargs={'member_id': 'member_id', 'image_id': 'image_id'}, ) def test_member_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_member, method_args=['member_id'], method_kwargs={"image": "image_id"}, expected_args=[_member.Member], expected_kwargs={'member_id': 'member_id', 'image_id': 'image_id'}, ) def test_member_find(self): self._verify( "openstack.proxy.Proxy._find", self.proxy.find_member, method_args=['member_id'], method_kwargs={"image": "image_id"}, expected_args=[_member.Member, "member_id"], expected_kwargs={'ignore_missing': True, 'image_id': 'image_id'}, ) def test_members(self): self.verify_list( self.proxy.members, _member.Member, method_kwargs={'image': 'image_1'}, expected_kwargs={'image_id': 'image_1'}, ) class TestMetadefNamespace(TestImageProxy): def test_metadef_namespace_create(self): self.verify_create( self.proxy.create_metadef_namespace, _metadef_namespace.MetadefNamespace, ) def test_metadef_namespace_delete(self): self.verify_delete( self.proxy.delete_metadef_namespace, _metadef_namespace.MetadefNamespace, False, ) def test_metadef_namespace_delete__ignore(self): self.verify_delete( self.proxy.delete_metadef_namespace, _metadef_namespace.MetadefNamespace, True, ) def test_metadef_namespace_get(self): self.verify_get( self.proxy.get_metadef_namespace, _metadef_namespace.MetadefNamespace, ) def test_metadef_namespaces(self): self.verify_list( self.proxy.metadef_namespaces, _metadef_namespace.MetadefNamespace, ) def test_metadef_namespace_update(self): # we're (intentionally) adding an additional field, 'namespace', to the # request body self.verify_update( self.proxy.update_metadef_namespace, _metadef_namespace.MetadefNamespace, method_kwargs={'is_protected': True}, expected_kwargs={'namespace': 'resource_id', 'is_protected': True}, ) class TestMetadefObject(TestImageProxy): def test_create_metadef_object(self): self.verify_create( self.proxy.create_metadef_object, _metadef_object.MetadefObject, method_kwargs={"namespace": "test_namespace_name"}, expected_kwargs={"namespace_name": "test_namespace_name"}, ) def test_get_metadef_object(self): self.verify_get( self.proxy.get_metadef_object, _metadef_object.MetadefObject, method_kwargs={"namespace": "test_namespace_name"}, expected_kwargs={ "namespace_name": "test_namespace_name", 'name': 'resource_id', }, expected_args=[], ) def test_metadef_objects(self): self.verify_list( self.proxy.metadef_objects, _metadef_object.MetadefObject, method_kwargs={"namespace": "test_namespace_name"}, expected_kwargs={"namespace_name": "test_namespace_name"}, ) def test_update_metadef_object(self): self._verify( "openstack.proxy.Proxy._update", self.proxy.update_metadef_object, method_args=["test_metadef_object", "test_namespace_name"], method_kwargs={"name": "new_object"}, expected_args=[ _metadef_object.MetadefObject, 'test_metadef_object', ], expected_kwargs={ "name": "new_object", "namespace_name": "test_namespace_name", }, ) def test_delete_metadef_object(self): self.verify_delete( self.proxy.delete_metadef_object, _metadef_object.MetadefObject, False, method_kwargs={"namespace": "test_namespace_name"}, expected_kwargs={"namespace_name": "test_namespace_name"}, ) @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_delete_all_metadef_objects(self, mock_get_resource): fake_object = _metadef_namespace.MetadefNamespace() mock_get_resource.return_value = fake_object self._verify( "openstack.image.v2.metadef_namespace.MetadefNamespace.delete_all_objects", self.proxy.delete_all_metadef_objects, method_args=['test_namespace'], expected_args=[self.proxy], ) mock_get_resource.assert_called_once_with( _metadef_namespace.MetadefNamespace, 'test_namespace' ) class TestMetadefResourceType(TestImageProxy): def test_metadef_resource_types(self): self.verify_list( self.proxy.metadef_resource_types, _metadef_resource_type.MetadefResourceType, ) class TestMetadefResourceTypeAssociation(TestImageProxy): def test_create_metadef_resource_type_association(self): self.verify_create( self.proxy.create_metadef_resource_type_association, _metadef_resource_type.MetadefResourceTypeAssociation, method_kwargs={'metadef_namespace': 'namespace_name'}, expected_kwargs={'namespace_name': 'namespace_name'}, ) def test_delete_metadef_resource_type_association(self): self.verify_delete( self.proxy.delete_metadef_resource_type_association, _metadef_resource_type.MetadefResourceTypeAssociation, False, method_kwargs={'metadef_namespace': 'namespace_name'}, expected_kwargs={'namespace_name': 'namespace_name'}, ) def test_delete_metadef_resource_type_association_ignore(self): self.verify_delete( self.proxy.delete_metadef_resource_type_association, _metadef_resource_type.MetadefResourceTypeAssociation, True, method_kwargs={'metadef_namespace': 'namespace_name'}, expected_kwargs={'namespace_name': 'namespace_name'}, ) def test_metadef_resource_type_associations(self): self.verify_list( self.proxy.metadef_resource_type_associations, _metadef_resource_type.MetadefResourceTypeAssociation, method_kwargs={'metadef_namespace': 'namespace_name'}, expected_kwargs={'namespace_name': 'namespace_name'}, ) class TestSchema(TestImageProxy): def test_images_schema_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_images_schema, expected_args=[_schema.Schema], expected_kwargs={ 'base_path': '/schemas/images', 'requires_id': False, }, ) def test_image_schema_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_image_schema, expected_args=[_schema.Schema], expected_kwargs={ 'base_path': '/schemas/image', 'requires_id': False, }, ) def test_members_schema_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_members_schema, expected_args=[_schema.Schema], expected_kwargs={ 'base_path': '/schemas/members', 'requires_id': False, }, ) def test_member_schema_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_member_schema, expected_args=[_schema.Schema], expected_kwargs={ 'base_path': '/schemas/member', 'requires_id': False, }, ) class TestTask(TestImageProxy): def test_task_get(self): self.verify_get(self.proxy.get_task, _task.Task) def test_tasks(self): self.verify_list(self.proxy.tasks, _task.Task) def test_task_create(self): self.verify_create(self.proxy.create_task, _task.Task) def test_wait_for_task_immediate_status(self): status = 'success' res = _task.Task(id='1234', status=status) result = self.proxy.wait_for_task(res, status, "failure", 0.01, 0.1) self.assertEqual(res, result) def test_wait_for_task_immediate_status_case(self): status = "SUCcess" res = _task.Task(id='1234', status=status) result = self.proxy.wait_for_task(res, status, "failure", 0.01, 0.1) self.assertEqual(res, result) def test_wait_for_task_error_396(self): # Ensure we create a new task when we get 396 error res = _task.Task( id='id', status='waiting', type='some_type', input='some_input', result='some_result', ) mock_fetch = mock.Mock() mock_fetch.side_effect = [ _task.Task( id='id', status='failure', type='some_type', input='some_input', result='some_result', message=_proxy._IMAGE_ERROR_396, ), _task.Task(id='fake', status='waiting'), _task.Task(id='fake', status='success'), ] self.proxy._create = mock.Mock() self.proxy._create.side_effect = [ _task.Task(id='fake', status='success') ] with mock.patch.object(_task.Task, 'fetch', mock_fetch): result = self.proxy.wait_for_task(res, interval=0.01, wait=0.5) self.assertEqual('success', result.status) self.proxy._create.assert_called_with( mock.ANY, input=res.input, type=res.type ) def test_wait_for_task_wait(self): res = _task.Task(id='id', status='waiting') mock_fetch = mock.Mock() mock_fetch.side_effect = [ _task.Task(id='id', status='waiting'), _task.Task(id='id', status='waiting'), _task.Task(id='id', status='success'), ] with mock.patch.object(_task.Task, 'fetch', mock_fetch): result = self.proxy.wait_for_task(res, interval=0.01, wait=0.5) self.assertEqual('success', result.status) def test_tasks_schema_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_tasks_schema, expected_args=[_schema.Schema], expected_kwargs={ 'base_path': '/schemas/tasks', 'requires_id': False, }, ) def test_task_schema_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_task_schema, expected_args=[_schema.Schema], expected_kwargs={ 'base_path': '/schemas/task', 'requires_id': False, }, ) class TestMisc(TestImageProxy): def test_stores(self): self.verify_list(self.proxy.stores, _service_info.Store) def test_import_info(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_import_info, method_args=[], method_kwargs={}, expected_args=[_service_info.Import], expected_kwargs={'requires_id': False}, ) class TestMetadefSchema(TestImageProxy): def test_metadef_namespace_schema_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_metadef_namespace_schema, expected_args=[_metadef_schema.MetadefSchema], expected_kwargs={ 'base_path': '/schemas/metadefs/namespace', 'requires_id': False, }, ) def test_metadef_namespaces_schema_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_metadef_namespaces_schema, expected_args=[_metadef_schema.MetadefSchema], expected_kwargs={ 'base_path': '/schemas/metadefs/namespaces', 'requires_id': False, }, ) def test_metadef_resource_type_schema_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_metadef_resource_type_schema, expected_args=[_metadef_schema.MetadefSchema], expected_kwargs={ 'base_path': '/schemas/metadefs/resource_type', 'requires_id': False, }, ) def test_metadef_resource_types_schema_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_metadef_resource_types_schema, expected_args=[_metadef_schema.MetadefSchema], expected_kwargs={ 'base_path': '/schemas/metadefs/resource_types', 'requires_id': False, }, ) def test_metadef_object_schema_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_metadef_object_schema, expected_args=[_metadef_schema.MetadefSchema], expected_kwargs={ 'base_path': '/schemas/metadefs/object', 'requires_id': False, }, ) def test_metadef_objects_schema_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_metadef_objects_schema, expected_args=[_metadef_schema.MetadefSchema], expected_kwargs={ 'base_path': '/schemas/metadefs/objects', 'requires_id': False, }, ) def test_metadef_property_schema_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_metadef_property_schema, expected_args=[_metadef_schema.MetadefSchema], expected_kwargs={ 'base_path': '/schemas/metadefs/property', 'requires_id': False, }, ) def test_metadef_properties_schema_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_metadef_properties_schema, expected_args=[_metadef_schema.MetadefSchema], expected_kwargs={ 'base_path': '/schemas/metadefs/properties', 'requires_id': False, }, ) def test_metadef_tag_schema_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_metadef_tag_schema, expected_args=[_metadef_schema.MetadefSchema], expected_kwargs={ 'base_path': '/schemas/metadefs/tag', 'requires_id': False, }, ) def test_metadef_tags_schema_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_metadef_tags_schema, expected_args=[_metadef_schema.MetadefSchema], expected_kwargs={ 'base_path': '/schemas/metadefs/tags', 'requires_id': False, }, ) class TestCache(TestImageProxy): def test_image_cache_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_image_cache, expected_args=[_cache.Cache], expected_kwargs={'requires_id': False}, ) def test_cache_image_delete(self): self.verify_delete( self.proxy.cache_delete_image, _cache.Cache, ) @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_image_queue(self, mock_get_resource): fake_cache = _cache.Cache() mock_get_resource.return_value = fake_cache self._verify( "openstack.image.v2.cache.Cache.queue", self.proxy.queue_image, method_args=['image-id'], expected_args=[self.proxy, 'image-id'], ) mock_get_resource.assert_called_once_with(_cache.Cache, None) @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_image_clear_cache(self, mock_get_resource): fake_cache = _cache.Cache() mock_get_resource.return_value = fake_cache self._verify( "openstack.image.v2.cache.Cache.clear", self.proxy.clear_cache, method_args=['both'], expected_args=[self.proxy, 'both'], ) mock_get_resource.assert_called_once_with(_cache.Cache, None) mock_get_resource.reset_mock() self._verify( "openstack.image.v2.cache.Cache.clear", self.proxy.clear_cache, method_args=[], expected_args=[self.proxy, 'both'], ) mock_get_resource.assert_called_once_with(_cache.Cache, None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/image/v2/test_schema.py0000664000175000017500000000367700000000000024636 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image.v2 import schema from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'additionalProperties': {'type': 'string'}, 'links': [ {'href': '{self}', 'rel': 'self'}, {'href': '{file}', 'rel': 'enclosure'}, {'href': '{schema}', 'rel': 'describedby'}, ], 'name': 'image', 'properties': { 'architecture': { 'description': 'Operating system architecture', 'is_base': False, 'type': 'string', }, 'visibility': { 'description': 'Scope of image accessibility', 'enum': ['public', 'private'], 'type': 'string', }, }, } class TestSchema(base.TestCase): def test_basic(self): sot = schema.Schema() self.assertIsNone(sot.resource_key) self.assertIsNone(sot.resources_key) self.assertEqual('/schemas', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) def test_make_it(self): sot = schema.Schema(**EXAMPLE) self.assertEqual(EXAMPLE['properties'], sot.properties) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual( EXAMPLE['additionalProperties'], sot.additional_properties ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/image/v2/test_service_info.py0000664000175000017500000000543200000000000026040 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack import exceptions from openstack.image.v2 import service_info as si from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE_IMPORT = { 'import-methods': { 'description': 'Import methods available.', 'type': 'array', 'value': ['glance-direct', 'web-download'], } } EXAMPLE_STORE = { 'id': IDENTIFIER, 'description': 'Fast access to rbd store', 'default': True, 'properties': { "pool": "pool1", "chunk_size": 65536, "thin_provisioning": False, }, } class TestStore(base.TestCase): def test_basic(self): sot = si.Store() self.assertIsNone(sot.resource_key) self.assertEqual('stores', sot.resources_key) self.assertEqual('/info/stores', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = si.Store(**EXAMPLE_STORE) self.assertEqual(EXAMPLE_STORE['id'], sot.id) self.assertEqual(EXAMPLE_STORE['description'], sot.description) self.assertEqual(EXAMPLE_STORE['default'], sot.is_default) self.assertEqual(EXAMPLE_STORE['properties'], sot.properties) @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) def test_delete_image(self): sot = si.Store(**EXAMPLE_STORE) session = mock.Mock() session.delete = mock.Mock() sot.delete_image(session, image='image_id') session.delete.assert_called_with('stores/IDENTIFIER/image_id') class TestImport(base.TestCase): def test_basic(self): sot = si.Import() self.assertIsNone(sot.resource_key) self.assertIsNone(sot.resources_key) self.assertEqual('/info/import', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) def test_make_it(self): sot = si.Import(**EXAMPLE_IMPORT) self.assertEqual(EXAMPLE_IMPORT['import-methods'], sot.import_methods) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/image/v2/test_task.py0000664000175000017500000000475100000000000024332 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image.v2 import task from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'created_at': '2016-06-24T14:40:19Z', 'id': IDENTIFIER, 'input': { 'image_properties': {'container_format': 'ovf', 'disk_format': 'vhd'}, 'import_from': 'http://example.com', 'import_from_format': 'qcow2', }, 'message': 'message', 'owner': 'fa6c8c1600f4444281658a23ee6da8e8', 'result': 'some result', 'schema': '/v2/schemas/task', 'status': 'processing', 'type': 'import', 'updated_at': '2016-06-24T14:40:20Z', } class TestTask(base.TestCase): def test_basic(self): sot = task.Task() self.assertIsNone(sot.resource_key) self.assertEqual('tasks', sot.resources_key) self.assertEqual('/tasks', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'sort_dir': 'sort_dir', 'sort_key': 'sort_key', 'status': 'status', 'type': 'type', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = task.Task(**EXAMPLE) self.assertEqual(IDENTIFIER, sot.id) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['input'], sot.input) self.assertEqual(EXAMPLE['message'], sot.message) self.assertEqual(EXAMPLE['owner'], sot.owner_id) self.assertEqual(EXAMPLE['result'], sot.result) self.assertEqual(EXAMPLE['schema'], sot.schema) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['type'], sot.type) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4654195 openstacksdk-4.0.0/openstack/tests/unit/instance_ha/0000775000175000017500000000000000000000000022613 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/instance_ha/__init__.py0000664000175000017500000000000000000000000024712 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4694214 openstacksdk-4.0.0/openstack/tests/unit/instance_ha/v1/0000775000175000017500000000000000000000000023141 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/instance_ha/v1/__init__.py0000664000175000017500000000000000000000000025240 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/instance_ha/v1/test_host.py0000664000175000017500000000556400000000000025541 0ustar00zuulzuul00000000000000# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack.instance_ha.v1 import host from openstack.tests.unit import base FAKE_ID = "1c2f1795-ce78-4d4c-afd0-ce141fdb3952" FAKE_UUID = "11f7597f-87d2-4057-b754-ba611f989807" FAKE_HOST_ID = "c27dec16-ed4d-4ebe-8e77-f1e28ec32417" FAKE_CONTROL_ATTRIBUTES = {"mcastaddr": "239.255.1.1", "mcastport": "5405"} HOST = { "id": FAKE_ID, "uuid": FAKE_UUID, "segment_id": FAKE_HOST_ID, "created_at": "2018-03-22T00:00:00.000000", "updated_at": "2018-03-23T00:00:00.000000", "name": "my_host", "type": "pacemaker", "control_attributes": FAKE_CONTROL_ATTRIBUTES, "on_maintenance": False, "reserved": False, "failover_segment_id": FAKE_HOST_ID, } class TestHost(base.TestCase): def test_basic(self): sot = host.Host(HOST) self.assertEqual("host", sot.resource_key) self.assertEqual("hosts", sot.resources_key) self.assertEqual("/segments/%(segment_id)s/hosts", sot.base_path) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertDictEqual( { "failover_segment_id": "failover_segment_id", "limit": "limit", "marker": "marker", "on_maintenance": "on_maintenance", "reserved": "reserved", "sort_dir": "sort_dir", "sort_key": "sort_key", "type": "type", }, sot._query_mapping._mapping, ) def test_create(self): sot = host.Host(**HOST) self.assertEqual(HOST["id"], sot.id) self.assertEqual(HOST["uuid"], sot.uuid) self.assertEqual(HOST["segment_id"], sot.segment_id) self.assertEqual(HOST["created_at"], sot.created_at) self.assertEqual(HOST["updated_at"], sot.updated_at) self.assertEqual(HOST["name"], sot.name) self.assertEqual(HOST["type"], sot.type) self.assertEqual(HOST["control_attributes"], sot.control_attributes) self.assertEqual(HOST["on_maintenance"], sot.on_maintenance) self.assertEqual(HOST["reserved"], sot.reserved) self.assertEqual(HOST["failover_segment_id"], sot.failover_segment_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/instance_ha/v1/test_notification.py0000664000175000017500000001166200000000000027246 0ustar00zuulzuul00000000000000# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack.instance_ha.v1 import notification from openstack.tests.unit import base FAKE_ID = "569429e9-7f14-41be-a38e-920277e637db" FAKE_UUID = "a0e70d3a-b3a2-4616-b65d-a7c03a2c85fc" FAKE_HOST_UUID = "cad9ff01-c354-4414-ba3c-31b925be67f1" PAYLOAD = { "instance_uuid": "4032bc1d-d723-47f6-b5ac-b9b3e6dbb795", "vir_domain_event": "STOPPED_FAILED", "event": "LIFECYCLE", } PROGRESS_DETAILS = [ { "timestamp": "2019-02-28 07:21:33.291810", "progress": 1.0, "message": "Skipping recovery for process " "nova-compute as it is already disabled", } ] RECOVERY_WORKFLOW_DETAILS = [ { "progress": 1.0, "state": "SUCCESS", "name": "DisableComputeNodeTask", "progress_details": PROGRESS_DETAILS, } ] NOTIFICATION = { "id": FAKE_ID, "notification_uuid": FAKE_UUID, "created_at": "2018-03-22T00:00:00.000000", "updated_at": "2018-03-23T00:00:00.000000", "type": "pacemaker", "hostname": "fake_host", "status": "new", "generated_time": "2018-03-21T00:00:00.000000", "payload": PAYLOAD, "source_host_uuid": FAKE_HOST_UUID, "recovery_workflow_details": RECOVERY_WORKFLOW_DETAILS, } class TestNotification(base.TestCase): def test_basic(self): sot = notification.Notification(NOTIFICATION) self.assertEqual("notification", sot.resource_key) self.assertEqual("notifications", sot.resources_key) self.assertEqual("/notifications", sot.base_path) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_create) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertDictEqual( { "generated_since": "generated-since", "limit": "limit", "marker": "marker", "sort_dir": "sort_dir", "sort_key": "sort_key", "source_host_uuid": "source_host_uuid", "status": "status", "type": "type", }, sot._query_mapping._mapping, ) def test_create(self): sot = notification.Notification(**NOTIFICATION) rec_workflow_details = NOTIFICATION["recovery_workflow_details"][0] self.assertEqual(NOTIFICATION["id"], sot.id) self.assertEqual( NOTIFICATION["notification_uuid"], sot.notification_uuid ) self.assertEqual(NOTIFICATION["created_at"], sot.created_at) self.assertEqual(NOTIFICATION["updated_at"], sot.updated_at) self.assertEqual(NOTIFICATION["type"], sot.type) self.assertEqual(NOTIFICATION["hostname"], sot.hostname) self.assertEqual(NOTIFICATION["status"], sot.status) self.assertEqual(NOTIFICATION["generated_time"], sot.generated_time) self.assertEqual(NOTIFICATION["payload"], sot.payload) self.assertEqual( NOTIFICATION["source_host_uuid"], sot.source_host_uuid ) self.assertEqual( rec_workflow_details["name"], sot.recovery_workflow_details[0].name ) self.assertEqual( rec_workflow_details["state"], sot.recovery_workflow_details[0].state, ) self.assertEqual( rec_workflow_details["progress"], sot.recovery_workflow_details[0].progress, ) self.assertEqual( rec_workflow_details["progress_details"][0]['progress'], sot.recovery_workflow_details[0].progress_details[0].progress, ) self.assertEqual( rec_workflow_details["progress_details"][0]['message'], sot.recovery_workflow_details[0].progress_details[0].message, ) self.assertEqual( rec_workflow_details["progress_details"][0]['timestamp'], sot.recovery_workflow_details[0].progress_details[0].timestamp, ) self.assertIsInstance(sot.recovery_workflow_details, list) self.assertIsInstance( sot.recovery_workflow_details[0].progress_details, list ) self.assertIsInstance( sot.recovery_workflow_details[0], notification.RecoveryWorkflowDetailItem, ) self.assertIsInstance( sot.recovery_workflow_details[0].progress_details[0], notification.ProgressDetailsItem, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/instance_ha/v1/test_proxy.py0000664000175000017500000001003400000000000025731 0ustar00zuulzuul00000000000000# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack.instance_ha.v1 import _proxy from openstack.instance_ha.v1 import host from openstack.instance_ha.v1 import notification from openstack.instance_ha.v1 import segment from openstack.instance_ha.v1 import vmove from openstack.tests.unit import test_proxy_base SEGMENT_ID = "c50b96eb-2a66-40f8-bca8-c5fa90d595c0" HOST_ID = "52d05e43-d08e-42b8-ae33-e47c8ea2ad47" NOTIFICATION_ID = "a0e70d3a-b3a2-4616-b65d-a7c03a2c85fc" VMOVE_ID = "16a7c91f-8342-49a7-c731-3a632293f845" class TestInstanceHaProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) class TestInstanceHaHosts(TestInstanceHaProxy): def test_hosts(self): self.verify_list( self.proxy.hosts, host.Host, method_args=[SEGMENT_ID], expected_args=[], expected_kwargs={"segment_id": SEGMENT_ID}, ) def test_host_get(self): self.verify_get( self.proxy.get_host, host.Host, method_args=[HOST_ID], method_kwargs={"segment_id": SEGMENT_ID}, expected_kwargs={"segment_id": SEGMENT_ID}, ) def test_host_create(self): self.verify_create( self.proxy.create_host, host.Host, method_args=[SEGMENT_ID], method_kwargs={}, expected_args=[], expected_kwargs={"segment_id": SEGMENT_ID}, ) def test_host_update(self): self.verify_update( self.proxy.update_host, host.Host, method_kwargs={"segment_id": SEGMENT_ID}, ) def test_host_delete(self): self.verify_delete( self.proxy.delete_host, host.Host, True, method_kwargs={"segment_id": SEGMENT_ID}, expected_kwargs={"segment_id": SEGMENT_ID}, ) class TestInstanceHaNotifications(TestInstanceHaProxy): def test_notifications(self): self.verify_list(self.proxy.notifications, notification.Notification) def test_notification_get(self): self.verify_get(self.proxy.get_notification, notification.Notification) def test_notification_create(self): self.verify_create( self.proxy.create_notification, notification.Notification ) class TestInstanceHaSegments(TestInstanceHaProxy): def test_segments(self): self.verify_list(self.proxy.segments, segment.Segment) def test_segment_get(self): self.verify_get(self.proxy.get_segment, segment.Segment) def test_segment_create(self): self.verify_create(self.proxy.create_segment, segment.Segment) def test_segment_update(self): self.verify_update(self.proxy.update_segment, segment.Segment) def test_segment_delete(self): self.verify_delete(self.proxy.delete_segment, segment.Segment, True) class TestInstanceHaVMoves(TestInstanceHaProxy): def test_vmoves(self): self.verify_list( self.proxy.vmoves, vmove.VMove, method_args=[NOTIFICATION_ID], expected_args=[], expected_kwargs={"notification_id": NOTIFICATION_ID}, ) def test_vmove_get(self): self.verify_get( self.proxy.get_vmove, vmove.VMove, method_args=[VMOVE_ID, NOTIFICATION_ID], expected_args=[VMOVE_ID], expected_kwargs={"notification_id": NOTIFICATION_ID}, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/instance_ha/v1/test_segment.py0000664000175000017500000000503100000000000026213 0ustar00zuulzuul00000000000000# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack.instance_ha.v1 import segment from openstack.tests.unit import base FAKE_ID = "1c2f1795-ce78-4d4c-afd0-ce141fdb3952" FAKE_UUID = "11f7597f-87d2-4057-b754-ba611f989807" SEGMENT = { "id": FAKE_ID, "uuid": FAKE_UUID, "created_at": "2018-03-22T00:00:00.000000", "updated_at": "2018-03-23T00:00:00.000000", "name": "my_segment", "description": "something", "recovery_method": "auto", "service_type": "COMPUTE_HOST", "enabled": True, } class TestSegment(base.TestCase): def test_basic(self): sot = segment.Segment(SEGMENT) self.assertEqual("segment", sot.resource_key) self.assertEqual("segments", sot.resources_key) self.assertEqual("/segments", sot.base_path) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertDictEqual( { "limit": "limit", "marker": "marker", "recovery_method": "recovery_method", "service_type": "service_type", "is_enabled": "enabled", "sort_dir": "sort_dir", "sort_key": "sort_key", }, sot._query_mapping._mapping, ) def test_create(self): sot = segment.Segment(**SEGMENT) self.assertEqual(SEGMENT["id"], sot.id) self.assertEqual(SEGMENT["uuid"], sot.uuid) self.assertEqual(SEGMENT["created_at"], sot.created_at) self.assertEqual(SEGMENT["updated_at"], sot.updated_at) self.assertEqual(SEGMENT["name"], sot.name) self.assertEqual(SEGMENT["description"], sot.description) self.assertEqual(SEGMENT["recovery_method"], sot.recovery_method) self.assertEqual(SEGMENT["service_type"], sot.service_type) self.assertEqual(SEGMENT["enabled"], sot.is_enabled) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/instance_ha/v1/test_vmove.py0000664000175000017500000000554300000000000025715 0ustar00zuulzuul00000000000000# Copyright(c) 2022 Inspur # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack.instance_ha.v1 import vmove from openstack.tests.unit import base FAKE_ID = "1" FAKE_UUID = "16a7c91f-8342-49a7-c731-3a632293f845" FAKE_NOTIFICATION_ID = "a0e70d3a-b3a2-4616-b65d-a7c03a2c85fc" FAKE_SERVER_ID = "1c2f1795-ce78-4d4c-afd0-ce141fdb3952" VMOVE = { 'id': FAKE_ID, 'uuid': FAKE_UUID, 'notification_id': FAKE_NOTIFICATION_ID, 'created_at': "2023-01-28T14:55:26.000000", 'updated_at': "2023-01-28T14:55:31.000000", 'server_id': FAKE_SERVER_ID, 'server_name': 'vm1', 'source_host': 'host1', 'dest_host': 'host2', 'start_time': "2023-01-28T14:55:27.000000", 'end_time': "2023-01-28T14:55:31.000000", 'status': 'succeeded', 'type': 'evacuation', 'message': None, } class TestVMove(base.TestCase): def test_basic(self): sot = vmove.VMove(VMOVE) self.assertEqual("vmove", sot.resource_key) self.assertEqual("vmoves", sot.resources_key) self.assertEqual( "/notifications/%(notification_id)s/vmoves", sot.base_path ) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_fetch) self.assertDictEqual( { "status": "status", "type": "type", "limit": "limit", "marker": "marker", "sort_dir": "sort_dir", "sort_key": "sort_key", }, sot._query_mapping._mapping, ) def test_create(self): sot = vmove.VMove(**VMOVE) self.assertEqual(VMOVE["id"], sot.id) self.assertEqual(VMOVE["uuid"], sot.uuid) self.assertEqual(VMOVE["notification_id"], sot.notification_id) self.assertEqual(VMOVE["created_at"], sot.created_at) self.assertEqual(VMOVE["updated_at"], sot.updated_at) self.assertEqual(VMOVE["server_id"], sot.server_id) self.assertEqual(VMOVE["server_name"], sot.server_name) self.assertEqual(VMOVE["source_host"], sot.source_host) self.assertEqual(VMOVE["dest_host"], sot.dest_host) self.assertEqual(VMOVE["start_time"], sot.start_time) self.assertEqual(VMOVE["end_time"], sot.end_time) self.assertEqual(VMOVE["status"], sot.status) self.assertEqual(VMOVE["type"], sot.type) self.assertEqual(VMOVE["message"], sot.message) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4694214 openstacksdk-4.0.0/openstack/tests/unit/key_manager/0000775000175000017500000000000000000000000022621 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/key_manager/__init__.py0000664000175000017500000000000000000000000024720 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4694214 openstacksdk-4.0.0/openstack/tests/unit/key_manager/v1/0000775000175000017500000000000000000000000023147 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/key_manager/v1/__init__.py0000664000175000017500000000000000000000000025246 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/key_manager/v1/test_container.py0000664000175000017500000000402700000000000026545 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.key_manager.v1 import container from openstack.tests.unit import base ID_VAL = "123" IDENTIFIER = 'http://localhost/containers/%s' % ID_VAL EXAMPLE = { 'container_ref': IDENTIFIER, 'created': '2015-03-09T12:14:57.233772', 'name': '3', 'secret_refs': ['4'], 'status': '5', 'type': '6', 'updated': '2015-03-09T12:15:57.233772', 'consumers': ['7'], } class TestContainer(base.TestCase): def test_basic(self): sot = container.Container() self.assertIsNone(sot.resource_key) self.assertEqual('containers', sot.resources_key) self.assertEqual('/containers', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = container.Container(**EXAMPLE) self.assertEqual(EXAMPLE['created'], sot.created_at) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['secret_refs'], sot.secret_refs) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['type'], sot.type) self.assertEqual(EXAMPLE['updated'], sot.updated_at) self.assertEqual(EXAMPLE['container_ref'], sot.id) self.assertEqual(EXAMPLE['container_ref'], sot.container_ref) self.assertEqual(ID_VAL, sot.container_id) self.assertEqual(EXAMPLE['consumers'], sot.consumers) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/key_manager/v1/test_order.py0000664000175000017500000000425300000000000025677 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.key_manager.v1 import order from openstack.tests.unit import base ID_VAL = "123" SECRET_ID = "5" IDENTIFIER = 'http://localhost/orders/%s' % ID_VAL EXAMPLE = { 'created': '1', 'creator_id': '2', 'meta': {'key': '3'}, 'order_ref': IDENTIFIER, 'secret_ref': 'http://localhost/secrets/%s' % SECRET_ID, 'status': '6', 'sub_status': '7', 'sub_status_message': '8', 'type': '9', 'updated': '10', } class TestOrder(base.TestCase): def test_basic(self): sot = order.Order() self.assertIsNone(sot.resource_key) self.assertEqual('orders', sot.resources_key) self.assertEqual('/orders', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = order.Order(**EXAMPLE) self.assertEqual(EXAMPLE['created'], sot.created_at) self.assertEqual(EXAMPLE['creator_id'], sot.creator_id) self.assertEqual(EXAMPLE['meta'], sot.meta) self.assertEqual(EXAMPLE['order_ref'], sot.order_ref) self.assertEqual(ID_VAL, sot.order_id) self.assertEqual(EXAMPLE['secret_ref'], sot.secret_ref) self.assertEqual(SECRET_ID, sot.secret_id) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['sub_status'], sot.sub_status) self.assertEqual(EXAMPLE['sub_status_message'], sot.sub_status_message) self.assertEqual(EXAMPLE['type'], sot.type) self.assertEqual(EXAMPLE['updated'], sot.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/key_manager/v1/test_proxy.py0000664000175000017500000000657600000000000025757 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.key_manager.v1 import _proxy from openstack.key_manager.v1 import container from openstack.key_manager.v1 import order from openstack.key_manager.v1 import secret from openstack.tests.unit import test_proxy_base class TestKeyManagerProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) class TestKeyManagerContainer(TestKeyManagerProxy): def test_server_create_attrs(self): self.verify_create(self.proxy.create_container, container.Container) def test_container_delete(self): self.verify_delete( self.proxy.delete_container, container.Container, False ) def test_container_delete_ignore(self): self.verify_delete( self.proxy.delete_container, container.Container, True ) def test_container_find(self): self.verify_find(self.proxy.find_container, container.Container) def test_container_get(self): self.verify_get(self.proxy.get_container, container.Container) def test_containers(self): self.verify_list(self.proxy.containers, container.Container) def test_container_update(self): self.verify_update(self.proxy.update_container, container.Container) class TestKeyManagerOrder(TestKeyManagerProxy): def test_order_create_attrs(self): self.verify_create(self.proxy.create_order, order.Order) def test_order_delete(self): self.verify_delete(self.proxy.delete_order, order.Order, False) def test_order_delete_ignore(self): self.verify_delete(self.proxy.delete_order, order.Order, True) def test_order_find(self): self.verify_find(self.proxy.find_order, order.Order) def test_order_get(self): self.verify_get(self.proxy.get_order, order.Order) def test_orders(self): self.verify_list(self.proxy.orders, order.Order) def test_order_update(self): self.verify_update(self.proxy.update_order, order.Order) class TestKeyManagerSecret(TestKeyManagerProxy): def test_secret_create_attrs(self): self.verify_create(self.proxy.create_secret, secret.Secret) def test_secret_delete(self): self.verify_delete(self.proxy.delete_secret, secret.Secret, False) def test_secret_delete_ignore(self): self.verify_delete(self.proxy.delete_secret, secret.Secret, True) def test_secret_find(self): self.verify_find(self.proxy.find_secret, secret.Secret) def test_secret_get(self): self.verify_get(self.proxy.get_secret, secret.Secret) self.verify_get_overrided( self.proxy, secret.Secret, 'openstack.key_manager.v1.secret.Secret' ) def test_secrets(self): self.verify_list(self.proxy.secrets, secret.Secret) def test_secret_update(self): self.verify_update(self.proxy.update_secret, secret.Secret) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/key_manager/v1/test_secret.py0000664000175000017500000001172600000000000026054 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.key_manager.v1 import secret from openstack.tests.unit import base ID_VAL = "123" IDENTIFIER = 'http://localhost:9311/v1/secrets/%s' % ID_VAL EXAMPLE = { 'algorithm': '1', 'bit_length': '2', 'content_types': {'default': '3'}, 'expiration': '2017-03-09T12:14:57.233772', 'mode': '5', 'name': '6', 'secret_ref': IDENTIFIER, 'status': '8', 'updated': '2015-03-09T12:15:57.233773', 'created': '2015-03-09T12:15:57.233774', 'secret_type': '9', 'payload': '10', 'payload_content_type': '11', 'payload_content_encoding': '12', } class TestSecret(base.TestCase): def test_basic(self): sot = secret.Secret() self.assertIsNone(sot.resource_key) self.assertEqual('secrets', sot.resources_key) self.assertEqual('/secrets', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { "name": "name", "mode": "mode", "bits": "bits", "secret_type": "secret_type", "acl_only": "acl_only", "created": "created", "updated": "updated", "expiration": "expiration", "sort": "sort", "algorithm": "alg", "limit": "limit", "marker": "marker", }, sot._query_mapping._mapping, ) def test_make_it(self): sot = secret.Secret(**EXAMPLE) self.assertEqual(EXAMPLE['algorithm'], sot.algorithm) self.assertEqual(EXAMPLE['bit_length'], sot.bit_length) self.assertEqual(EXAMPLE['content_types'], sot.content_types) self.assertEqual(EXAMPLE['expiration'], sot.expires_at) self.assertEqual(EXAMPLE['mode'], sot.mode) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['secret_ref'], sot.secret_ref) self.assertEqual(EXAMPLE['secret_ref'], sot.id) self.assertEqual(ID_VAL, sot.secret_id) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['updated'], sot.updated_at) self.assertEqual(EXAMPLE['secret_type'], sot.secret_type) self.assertEqual(EXAMPLE['payload'], sot.payload) self.assertEqual( EXAMPLE['payload_content_type'], sot.payload_content_type ) self.assertEqual( EXAMPLE['payload_content_encoding'], sot.payload_content_encoding ) def test_get_no_payload(self): sot = secret.Secret(id="id") sess = mock.Mock() rv = mock.Mock() return_body = {"status": "cool"} rv.json = mock.Mock(return_value=return_body) sess.get = mock.Mock(return_value=rv) sot.fetch(sess) sess.get.assert_called_once_with("secrets/id") def _test_payload(self, sot, metadata, content_type): content_type = "some/type" metadata_response = mock.Mock() # Use copy because the dict gets consumed. metadata_response.json = mock.Mock(return_value=metadata.copy()) payload_response = mock.Mock() payload = "secret info" payload_response.text = payload sess = mock.Mock() sess.get = mock.Mock(side_effect=[metadata_response, payload_response]) rv = sot.fetch(sess) sess.get.assert_has_calls( [ mock.call( "secrets/id", ), mock.call( "secrets/id/payload", headers={"Accept": content_type}, skip_cache=False, ), ] ) self.assertEqual(rv.payload, payload) self.assertEqual(rv.status, metadata["status"]) def test_get_with_payload_from_argument(self): metadata = {"status": "great"} content_type = "some/type" sot = secret.Secret(id="id", payload_content_type=content_type) self._test_payload(sot, metadata, content_type) def test_get_with_payload_from_content_types(self): content_type = "some/type" metadata = { "status": "fine", "content_types": {"default": content_type}, } sot = secret.Secret(id="id") self._test_payload(sot, metadata, content_type) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4734232 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/0000775000175000017500000000000000000000000023105 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/__init__.py0000664000175000017500000000000000000000000025204 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/test_amphora.py0000664000175000017500000001316100000000000026147 0ustar00zuulzuul00000000000000# Copyright 2019 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.load_balancer.v2 import amphora from openstack.tests.unit import base IDENTIFIER = uuid.uuid4() LB_ID = uuid.uuid4() LISTENER_ID = uuid.uuid4() COMPUTE_ID = uuid.uuid4() VRRP_PORT_ID = uuid.uuid4() HA_PORT_ID = uuid.uuid4() IMAGE_ID = uuid.uuid4() COMPUTE_FLAVOR = uuid.uuid4() AMPHORA_ID = uuid.uuid4() EXAMPLE = { 'id': IDENTIFIER, 'loadbalancer_id': LB_ID, 'compute_id': COMPUTE_ID, 'lb_network_ip': '192.168.1.2', 'vrrp_ip': '192.168.1.5', 'ha_ip': '192.168.1.10', 'vrrp_port_id': VRRP_PORT_ID, 'ha_port_id': HA_PORT_ID, 'cert_expiration': '2019-09-19 00:34:51', 'cert_busy': 0, 'role': 'MASTER', 'status': 'ALLOCATED', 'vrrp_interface': 'eth1', 'vrrp_id': 1, 'vrrp_priority': 100, 'cached_zone': 'zone1', 'created_at': '2017-05-10T18:14:44', 'updated_at': '2017-05-10T23:08:12', 'image_id': IMAGE_ID, 'compute_flavor': COMPUTE_FLAVOR, } class TestAmphora(base.TestCase): def test_basic(self): test_amphora = amphora.Amphora() self.assertEqual('amphora', test_amphora.resource_key) self.assertEqual('amphorae', test_amphora.resources_key) self.assertEqual('/octavia/amphorae', test_amphora.base_path) self.assertFalse(test_amphora.allow_create) self.assertTrue(test_amphora.allow_fetch) self.assertFalse(test_amphora.allow_commit) self.assertFalse(test_amphora.allow_delete) self.assertTrue(test_amphora.allow_list) def test_make_it(self): test_amphora = amphora.Amphora(**EXAMPLE) self.assertEqual(IDENTIFIER, test_amphora.id) self.assertEqual(LB_ID, test_amphora.loadbalancer_id) self.assertEqual(COMPUTE_ID, test_amphora.compute_id) self.assertEqual(EXAMPLE['lb_network_ip'], test_amphora.lb_network_ip) self.assertEqual(EXAMPLE['vrrp_ip'], test_amphora.vrrp_ip) self.assertEqual(EXAMPLE['ha_ip'], test_amphora.ha_ip) self.assertEqual(VRRP_PORT_ID, test_amphora.vrrp_port_id) self.assertEqual(HA_PORT_ID, test_amphora.ha_port_id) self.assertEqual( EXAMPLE['cert_expiration'], test_amphora.cert_expiration ) self.assertEqual(EXAMPLE['cert_busy'], test_amphora.cert_busy) self.assertEqual(EXAMPLE['role'], test_amphora.role) self.assertEqual(EXAMPLE['status'], test_amphora.status) self.assertEqual( EXAMPLE['vrrp_interface'], test_amphora.vrrp_interface ) self.assertEqual(EXAMPLE['vrrp_id'], test_amphora.vrrp_id) self.assertEqual(EXAMPLE['vrrp_priority'], test_amphora.vrrp_priority) self.assertEqual(EXAMPLE['cached_zone'], test_amphora.cached_zone) self.assertEqual(EXAMPLE['created_at'], test_amphora.created_at) self.assertEqual(EXAMPLE['updated_at'], test_amphora.updated_at) self.assertEqual(IMAGE_ID, test_amphora.image_id) self.assertEqual(COMPUTE_FLAVOR, test_amphora.compute_flavor) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'id': 'id', 'loadbalancer_id': 'loadbalancer_id', 'compute_flavor': 'compute_flavor', 'compute_id': 'compute_id', 'lb_network_ip': 'lb_network_ip', 'vrrp_ip': 'vrrp_ip', 'ha_ip': 'ha_ip', 'vrrp_port_id': 'vrrp_port_id', 'ha_port_id': 'ha_port_id', 'cert_expiration': 'cert_expiration', 'cert_busy': 'cert_busy', 'role': 'role', 'status': 'status', 'vrrp_interface': 'vrrp_interface', 'vrrp_id': 'vrrp_id', 'vrrp_priority': 'vrrp_priority', 'cached_zone': 'cached_zone', 'created_at': 'created_at', 'updated_at': 'updated_at', 'image_id': 'image_id', 'image_id': 'image_id', }, test_amphora._query_mapping._mapping, ) class TestAmphoraConfig(base.TestCase): def test_basic(self): test_amp_config = amphora.AmphoraConfig() self.assertEqual( '/octavia/amphorae/%(amphora_id)s/config', test_amp_config.base_path, ) self.assertFalse(test_amp_config.allow_create) self.assertFalse(test_amp_config.allow_fetch) self.assertTrue(test_amp_config.allow_commit) self.assertFalse(test_amp_config.allow_delete) self.assertFalse(test_amp_config.allow_list) class TestAmphoraFailover(base.TestCase): def test_basic(self): test_amp_failover = amphora.AmphoraFailover() self.assertEqual( '/octavia/amphorae/%(amphora_id)s/failover', test_amp_failover.base_path, ) self.assertFalse(test_amp_failover.allow_create) self.assertFalse(test_amp_failover.allow_fetch) self.assertTrue(test_amp_failover.allow_commit) self.assertFalse(test_amp_failover.allow_delete) self.assertFalse(test_amp_failover.allow_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/test_availability_zone.py0000664000175000017500000000473400000000000030233 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.load_balancer.v2 import availability_zone from openstack.tests.unit import base AVAILABILITY_ZONE_PROFILE_ID = uuid.uuid4() EXAMPLE = { 'name': 'strawberry', 'description': 'tasty', 'is_enabled': False, 'availability_zone_profile_id': AVAILABILITY_ZONE_PROFILE_ID, } class TestAvailabilityZone(base.TestCase): def test_basic(self): test_availability_zone = availability_zone.AvailabilityZone() self.assertEqual( 'availability_zone', test_availability_zone.resource_key ) self.assertEqual( 'availability_zones', test_availability_zone.resources_key ) self.assertEqual( '/lbaas/availabilityzones', test_availability_zone.base_path ) self.assertTrue(test_availability_zone.allow_create) self.assertTrue(test_availability_zone.allow_fetch) self.assertTrue(test_availability_zone.allow_commit) self.assertTrue(test_availability_zone.allow_delete) self.assertTrue(test_availability_zone.allow_list) def test_make_it(self): test_availability_zone = availability_zone.AvailabilityZone(**EXAMPLE) self.assertEqual(EXAMPLE['name'], test_availability_zone.name) self.assertEqual( EXAMPLE['description'], test_availability_zone.description ) self.assertFalse(test_availability_zone.is_enabled) self.assertEqual( EXAMPLE['availability_zone_profile_id'], test_availability_zone.availability_zone_profile_id, ) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'name': 'name', 'description': 'description', 'is_enabled': 'enabled', 'availability_zone_profile_id': 'availability_zone_profile_id', }, test_availability_zone._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/test_availability_zone_profile.py0000664000175000017500000000453700000000000031754 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.load_balancer.v2 import availability_zone_profile from openstack.tests.unit import base IDENTIFIER = uuid.uuid4() EXAMPLE = { 'id': IDENTIFIER, 'name': 'acidic', 'provider_name': 'best', 'availability_zone_data': '{"loadbalancer_topology": "SINGLE"}', } class TestAvailabilityZoneProfile(base.TestCase): def test_basic(self): test_profile = availability_zone_profile.AvailabilityZoneProfile() self.assertEqual( 'availability_zone_profile', test_profile.resource_key ) self.assertEqual( 'availability_zone_profiles', test_profile.resources_key ) self.assertEqual( '/lbaas/availabilityzoneprofiles', test_profile.base_path ) self.assertTrue(test_profile.allow_create) self.assertTrue(test_profile.allow_fetch) self.assertTrue(test_profile.allow_commit) self.assertTrue(test_profile.allow_delete) self.assertTrue(test_profile.allow_list) def test_make_it(self): test_profile = availability_zone_profile.AvailabilityZoneProfile( **EXAMPLE ) self.assertEqual(EXAMPLE['id'], test_profile.id) self.assertEqual(EXAMPLE['name'], test_profile.name) self.assertEqual(EXAMPLE['provider_name'], test_profile.provider_name) self.assertEqual( EXAMPLE['availability_zone_data'], test_profile.availability_zone_data, ) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'id': 'id', 'name': 'name', 'provider_name': 'provider_name', 'availability_zone_data': 'availability_zone_data', }, test_profile._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/test_flavor.py0000664000175000017500000000431600000000000026013 0ustar00zuulzuul00000000000000# Copyright 2019 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.load_balancer.v2 import flavor from openstack.tests.unit import base IDENTIFIER = uuid.uuid4() FLAVOR_PROFILE_ID = uuid.uuid4() EXAMPLE = { 'id': IDENTIFIER, 'name': 'strawberry', 'description': 'tasty', 'is_enabled': False, 'flavor_profile_id': FLAVOR_PROFILE_ID, } class TestFlavor(base.TestCase): def test_basic(self): test_flavor = flavor.Flavor() self.assertEqual('flavor', test_flavor.resource_key) self.assertEqual('flavors', test_flavor.resources_key) self.assertEqual('/lbaas/flavors', test_flavor.base_path) self.assertTrue(test_flavor.allow_create) self.assertTrue(test_flavor.allow_fetch) self.assertTrue(test_flavor.allow_commit) self.assertTrue(test_flavor.allow_delete) self.assertTrue(test_flavor.allow_list) def test_make_it(self): test_flavor = flavor.Flavor(**EXAMPLE) self.assertEqual(EXAMPLE['id'], test_flavor.id) self.assertEqual(EXAMPLE['name'], test_flavor.name) self.assertEqual(EXAMPLE['description'], test_flavor.description) self.assertFalse(test_flavor.is_enabled) self.assertEqual( EXAMPLE['flavor_profile_id'], test_flavor.flavor_profile_id ) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'id': 'id', 'name': 'name', 'description': 'description', 'is_enabled': 'enabled', 'flavor_profile_id': 'flavor_profile_id', }, test_flavor._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/test_flavor_profile.py0000664000175000017500000000416000000000000027530 0ustar00zuulzuul00000000000000# Copyright 2019 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.load_balancer.v2 import flavor_profile from openstack.tests.unit import base IDENTIFIER = uuid.uuid4() EXAMPLE = { 'id': IDENTIFIER, 'name': 'acidic', 'provider_name': 'best', 'flavor_data': '{"loadbalancer_topology": "SINGLE"}', } class TestFlavorProfile(base.TestCase): def test_basic(self): test_profile = flavor_profile.FlavorProfile() self.assertEqual('flavorprofile', test_profile.resource_key) self.assertEqual('flavorprofiles', test_profile.resources_key) self.assertEqual('/lbaas/flavorprofiles', test_profile.base_path) self.assertTrue(test_profile.allow_create) self.assertTrue(test_profile.allow_fetch) self.assertTrue(test_profile.allow_commit) self.assertTrue(test_profile.allow_delete) self.assertTrue(test_profile.allow_list) def test_make_it(self): test_profile = flavor_profile.FlavorProfile(**EXAMPLE) self.assertEqual(EXAMPLE['id'], test_profile.id) self.assertEqual(EXAMPLE['name'], test_profile.name) self.assertEqual(EXAMPLE['provider_name'], test_profile.provider_name) self.assertEqual(EXAMPLE['flavor_data'], test_profile.flavor_data) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'id': 'id', 'name': 'name', 'provider_name': 'provider_name', 'flavor_data': 'flavor_data', }, test_profile._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/test_health_monitor.py0000664000175000017500000001010200000000000027524 0ustar00zuulzuul00000000000000# Copyright 2017 Rackspace, US Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.load_balancer.v2 import health_monitor from openstack.tests.unit import base EXAMPLE = { 'admin_state_up': True, 'created_at': '2017-07-17T12:14:57.233772', 'delay': 10, 'expected_codes': '200, 202', 'http_method': 'HEAD', 'id': uuid.uuid4(), 'max_retries': 2, 'max_retries_down': 3, 'name': 'test_health_monitor', 'operating_status': 'ONLINE', 'pools': [{'id': uuid.uuid4()}], 'pool_id': uuid.uuid4(), 'project_id': uuid.uuid4(), 'provisioning_status': 'ACTIVE', 'timeout': 4, 'type': 'HTTP', 'updated_at': '2017-07-17T12:16:57.233772', 'url_path': '/health_page.html', } class TestPoolHealthMonitor(base.TestCase): def test_basic(self): test_hm = health_monitor.HealthMonitor() self.assertEqual('healthmonitor', test_hm.resource_key) self.assertEqual('healthmonitors', test_hm.resources_key) self.assertEqual('/lbaas/healthmonitors', test_hm.base_path) self.assertTrue(test_hm.allow_create) self.assertTrue(test_hm.allow_fetch) self.assertTrue(test_hm.allow_commit) self.assertTrue(test_hm.allow_delete) self.assertTrue(test_hm.allow_list) def test_make_it(self): test_hm = health_monitor.HealthMonitor(**EXAMPLE) self.assertTrue(test_hm.is_admin_state_up) self.assertEqual(EXAMPLE['created_at'], test_hm.created_at) self.assertEqual(EXAMPLE['delay'], test_hm.delay) self.assertEqual(EXAMPLE['expected_codes'], test_hm.expected_codes) self.assertEqual(EXAMPLE['http_method'], test_hm.http_method) self.assertEqual(EXAMPLE['id'], test_hm.id) self.assertEqual(EXAMPLE['max_retries'], test_hm.max_retries) self.assertEqual(EXAMPLE['max_retries_down'], test_hm.max_retries_down) self.assertEqual(EXAMPLE['name'], test_hm.name) self.assertEqual(EXAMPLE['operating_status'], test_hm.operating_status) self.assertEqual(EXAMPLE['pools'], test_hm.pools) self.assertEqual(EXAMPLE['pool_id'], test_hm.pool_id) self.assertEqual(EXAMPLE['project_id'], test_hm.project_id) self.assertEqual( EXAMPLE['provisioning_status'], test_hm.provisioning_status ) self.assertEqual(EXAMPLE['timeout'], test_hm.timeout) self.assertEqual(EXAMPLE['type'], test_hm.type) self.assertEqual(EXAMPLE['updated_at'], test_hm.updated_at) self.assertEqual(EXAMPLE['url_path'], test_hm.url_path) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'created_at': 'created_at', 'updated_at': 'updated_at', 'name': 'name', 'project_id': 'project_id', 'tags': 'tags', 'any_tags': 'tags-any', 'not_tags': 'not-tags', 'not_any_tags': 'not-tags-any', 'operating_status': 'operating_status', 'provisioning_status': 'provisioning_status', 'is_admin_state_up': 'admin_state_up', 'delay': 'delay', 'expected_codes': 'expected_codes', 'http_method': 'http_method', 'max_retries': 'max_retries', 'max_retries_down': 'max_retries_down', 'pool_id': 'pool_id', 'timeout': 'timeout', 'type': 'type', 'url_path': 'url_path', }, test_hm._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/test_l7policy.py0000664000175000017500000000776600000000000026300 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.load_balancer.v2 import l7_policy from openstack.tests.unit import base EXAMPLE = { 'action': 'REJECT', 'admin_state_up': True, 'created_at': '2017-07-17T12:14:57.233772', 'description': 'test_description', 'id': uuid.uuid4(), 'listener_id': uuid.uuid4(), 'name': 'test_l7_policy', 'operating_status': 'ONLINE', 'position': 7, 'project_id': uuid.uuid4(), 'provisioning_status': 'ACTIVE', 'redirect_pool_id': uuid.uuid4(), 'redirect_prefix': 'https://www.example.com', 'redirect_url': '/test_url', 'rules': [{'id': uuid.uuid4()}], 'updated_at': '2017-07-17T12:16:57.233772', } class TestL7Policy(base.TestCase): def test_basic(self): test_l7_policy = l7_policy.L7Policy() self.assertEqual('l7policy', test_l7_policy.resource_key) self.assertEqual('l7policies', test_l7_policy.resources_key) self.assertEqual('/lbaas/l7policies', test_l7_policy.base_path) self.assertTrue(test_l7_policy.allow_create) self.assertTrue(test_l7_policy.allow_fetch) self.assertTrue(test_l7_policy.allow_commit) self.assertTrue(test_l7_policy.allow_delete) self.assertTrue(test_l7_policy.allow_list) def test_make_it(self): test_l7_policy = l7_policy.L7Policy(**EXAMPLE) self.assertTrue(test_l7_policy.is_admin_state_up) self.assertEqual(EXAMPLE['action'], test_l7_policy.action) self.assertEqual(EXAMPLE['created_at'], test_l7_policy.created_at) self.assertEqual(EXAMPLE['description'], test_l7_policy.description) self.assertEqual(EXAMPLE['id'], test_l7_policy.id) self.assertEqual(EXAMPLE['listener_id'], test_l7_policy.listener_id) self.assertEqual(EXAMPLE['name'], test_l7_policy.name) self.assertEqual( EXAMPLE['operating_status'], test_l7_policy.operating_status ) self.assertEqual(EXAMPLE['position'], test_l7_policy.position) self.assertEqual(EXAMPLE['project_id'], test_l7_policy.project_id) self.assertEqual( EXAMPLE['provisioning_status'], test_l7_policy.provisioning_status ) self.assertEqual( EXAMPLE['redirect_pool_id'], test_l7_policy.redirect_pool_id ) self.assertEqual( EXAMPLE['redirect_prefix'], test_l7_policy.redirect_prefix ) self.assertEqual(EXAMPLE['redirect_url'], test_l7_policy.redirect_url) self.assertEqual(EXAMPLE['rules'], test_l7_policy.rules) self.assertEqual(EXAMPLE['updated_at'], test_l7_policy.updated_at) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'name': 'name', 'description': 'description', 'project_id': 'project_id', 'tags': 'tags', 'any_tags': 'tags-any', 'not_tags': 'not-tags', 'not_any_tags': 'not-tags-any', 'operating_status': 'operating_status', 'provisioning_status': 'provisioning_status', 'is_admin_state_up': 'admin_state_up', 'action': 'action', 'listener_id': 'listener_id', 'position': 'position', 'redirect_pool_id': 'redirect_pool_id', 'redirect_url': 'redirect_url', 'redirect_prefix': 'redirect_prefix', }, test_l7_policy._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/test_l7rule.py0000664000175000017500000000675100000000000025741 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.load_balancer.v2 import l7_rule from openstack.tests.unit import base EXAMPLE = { 'admin_state_up': True, 'compare_type': 'REGEX', 'created_at': '2017-08-17T12:14:57.233772', 'id': uuid.uuid4(), 'invert': False, 'key': 'my_cookie', 'l7_policy_id': uuid.uuid4(), 'operating_status': 'ONLINE', 'project_id': uuid.uuid4(), 'provisioning_status': 'ACTIVE', 'type': 'COOKIE', 'updated_at': '2017-08-17T12:16:57.233772', 'value': 'chocolate', } class TestL7Rule(base.TestCase): def test_basic(self): test_l7rule = l7_rule.L7Rule() self.assertEqual('rule', test_l7rule.resource_key) self.assertEqual('rules', test_l7rule.resources_key) self.assertEqual( '/lbaas/l7policies/%(l7policy_id)s/rules', test_l7rule.base_path ) self.assertTrue(test_l7rule.allow_create) self.assertTrue(test_l7rule.allow_fetch) self.assertTrue(test_l7rule.allow_commit) self.assertTrue(test_l7rule.allow_delete) self.assertTrue(test_l7rule.allow_list) def test_make_it(self): test_l7rule = l7_rule.L7Rule(**EXAMPLE) self.assertTrue(test_l7rule.is_admin_state_up) self.assertEqual(EXAMPLE['compare_type'], test_l7rule.compare_type) self.assertEqual(EXAMPLE['created_at'], test_l7rule.created_at) self.assertEqual(EXAMPLE['id'], test_l7rule.id) self.assertEqual(EXAMPLE['invert'], test_l7rule.invert) self.assertEqual(EXAMPLE['key'], test_l7rule.key) self.assertEqual(EXAMPLE['l7_policy_id'], test_l7rule.l7_policy_id) self.assertEqual( EXAMPLE['operating_status'], test_l7rule.operating_status ) self.assertEqual(EXAMPLE['project_id'], test_l7rule.project_id) self.assertEqual( EXAMPLE['provisioning_status'], test_l7rule.provisioning_status ) self.assertEqual(EXAMPLE['type'], test_l7rule.type) self.assertEqual(EXAMPLE['updated_at'], test_l7rule.updated_at) self.assertEqual(EXAMPLE['value'], test_l7rule.rule_value) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'created_at': 'created_at', 'updated_at': 'updated_at', 'project_id': 'project_id', 'tags': 'tags', 'any_tags': 'tags-any', 'not_tags': 'not-tags', 'not_any_tags': 'not-tags-any', 'operating_status': 'operating_status', 'provisioning_status': 'provisioning_status', 'is_admin_state_up': 'admin_state_up', 'compare_type': 'compare_type', 'invert': 'invert', 'key': 'key', 'type': 'type', 'rule_value': 'rule_value', 'l7_policy_id': 'l7policy_id', }, test_l7rule._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/test_listener.py0000664000175000017500000002005400000000000026344 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.load_balancer.v2 import listener from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'admin_state_up': True, 'allowed_cidrs': ['192.168.1.0/24'], 'connection_limit': '2', 'default_pool_id': uuid.uuid4(), 'description': 'test description', 'id': IDENTIFIER, 'insert_headers': {"X-Forwarded-For": "true"}, 'l7policies': [{'id': uuid.uuid4()}], 'loadbalancers': [{'id': uuid.uuid4()}], 'name': 'test_listener', 'project_id': uuid.uuid4(), 'protocol': 'TEST_PROTOCOL', 'protocol_port': 10, 'default_tls_container_ref': ( 'http://198.51.100.10:9311/v1/containers/' 'a570068c-d295-4780-91d4-3046a325db51' ), 'sni_container_refs': [], 'created_at': '2017-07-17T12:14:57.233772', 'updated_at': '2017-07-17T12:16:57.233772', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'hsts_include_subdomains': True, 'hsts_max_age': 30_000_000, 'hsts_preload': False, 'timeout_client_data': 50000, 'timeout_member_connect': 5000, 'timeout_member_data': 50000, 'timeout_tcp_inspect': 0, 'tls_ciphers': 'ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256', 'tls_versions': ['TLSv1.1', 'TLSv1.2'], 'alpn_protocols': ['h2', 'http/1.1', 'http/1.0'], } EXAMPLE_STATS = { 'active_connections': 1, 'bytes_in': 2, 'bytes_out': 3, 'request_errors': 4, 'total_connections': 5, } class TestListener(base.TestCase): def test_basic(self): test_listener = listener.Listener() self.assertEqual('listener', test_listener.resource_key) self.assertEqual('listeners', test_listener.resources_key) self.assertEqual('/lbaas/listeners', test_listener.base_path) self.assertTrue(test_listener.allow_create) self.assertTrue(test_listener.allow_fetch) self.assertTrue(test_listener.allow_commit) self.assertTrue(test_listener.allow_delete) self.assertTrue(test_listener.allow_list) def test_make_it(self): test_listener = listener.Listener(**EXAMPLE) self.assertTrue(test_listener.is_admin_state_up) self.assertEqual(EXAMPLE['allowed_cidrs'], test_listener.allowed_cidrs) self.assertEqual( EXAMPLE['connection_limit'], test_listener.connection_limit ) self.assertEqual( EXAMPLE['default_pool_id'], test_listener.default_pool_id ) self.assertEqual(EXAMPLE['description'], test_listener.description) self.assertEqual(EXAMPLE['id'], test_listener.id) self.assertEqual( EXAMPLE['insert_headers'], test_listener.insert_headers ) self.assertEqual(EXAMPLE['l7policies'], test_listener.l7_policies) self.assertEqual( EXAMPLE['loadbalancers'], test_listener.load_balancers ) self.assertEqual(EXAMPLE['name'], test_listener.name) self.assertEqual(EXAMPLE['project_id'], test_listener.project_id) self.assertEqual(EXAMPLE['protocol'], test_listener.protocol) self.assertEqual(EXAMPLE['protocol_port'], test_listener.protocol_port) self.assertEqual( EXAMPLE['default_tls_container_ref'], test_listener.default_tls_container_ref, ) self.assertEqual( EXAMPLE['sni_container_refs'], test_listener.sni_container_refs ) self.assertEqual(EXAMPLE['created_at'], test_listener.created_at) self.assertEqual(EXAMPLE['updated_at'], test_listener.updated_at) self.assertTrue(test_listener.is_hsts_include_subdomains) self.assertEqual(EXAMPLE['hsts_max_age'], test_listener.hsts_max_age) self.assertFalse(test_listener.is_hsts_preload) self.assertEqual( EXAMPLE['provisioning_status'], test_listener.provisioning_status ) self.assertEqual( EXAMPLE['operating_status'], test_listener.operating_status ) self.assertEqual( EXAMPLE['timeout_client_data'], test_listener.timeout_client_data ) self.assertEqual( EXAMPLE['timeout_member_connect'], test_listener.timeout_member_connect, ) self.assertEqual( EXAMPLE['timeout_member_data'], test_listener.timeout_member_data ) self.assertEqual( EXAMPLE['timeout_tcp_inspect'], test_listener.timeout_tcp_inspect ) self.assertEqual(EXAMPLE['tls_ciphers'], test_listener.tls_ciphers) self.assertEqual(EXAMPLE['tls_versions'], test_listener.tls_versions) self.assertEqual( EXAMPLE['alpn_protocols'], test_listener.alpn_protocols ) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'created_at': 'created_at', 'updated_at': 'updated_at', 'description': 'description', 'name': 'name', 'project_id': 'project_id', 'tags': 'tags', 'any_tags': 'tags-any', 'not_tags': 'not-tags', 'not_any_tags': 'not-tags-any', 'operating_status': 'operating_status', 'provisioning_status': 'provisioning_status', 'is_admin_state_up': 'admin_state_up', 'is_hsts_include_subdomains': 'hsts_include_subdomains', 'hsts_max_age': 'hsts_max_age', 'is_hsts_preload': 'hsts_preload', 'allowed_cidrs': 'allowed_cidrs', 'connection_limit': 'connection_limit', 'default_pool_id': 'default_pool_id', 'default_tls_container_ref': 'default_tls_container_ref', 'sni_container_refs': 'sni_container_refs', 'insert_headers': 'insert_headers', 'load_balancer_id': 'load_balancer_id', 'protocol': 'protocol', 'protocol_port': 'protocol_port', 'timeout_client_data': 'timeout_client_data', 'timeout_member_connect': 'timeout_member_connect', 'timeout_member_data': 'timeout_member_data', 'timeout_tcp_inspect': 'timeout_tcp_inspect', 'tls_ciphers': 'tls_ciphers', 'tls_versions': 'tls_versions', 'alpn_protocols': 'alpn_protocols', }, test_listener._query_mapping._mapping, ) class TestListenerStats(base.TestCase): def test_basic(self): test_listener = listener.ListenerStats() self.assertEqual('stats', test_listener.resource_key) self.assertEqual( '/lbaas/listeners/%(listener_id)s/stats', test_listener.base_path ) self.assertFalse(test_listener.allow_create) self.assertTrue(test_listener.allow_fetch) self.assertFalse(test_listener.allow_delete) self.assertFalse(test_listener.allow_list) self.assertFalse(test_listener.allow_commit) def test_make_it(self): test_listener = listener.ListenerStats(**EXAMPLE_STATS) self.assertEqual( EXAMPLE_STATS['active_connections'], test_listener.active_connections, ) self.assertEqual(EXAMPLE_STATS['bytes_in'], test_listener.bytes_in) self.assertEqual(EXAMPLE_STATS['bytes_out'], test_listener.bytes_out) self.assertEqual( EXAMPLE_STATS['request_errors'], test_listener.request_errors ) self.assertEqual( EXAMPLE_STATS['total_connections'], test_listener.total_connections ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/test_load_balancer.py0000664000175000017500000002024000000000000027262 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid from openstack.load_balancer.v2 import load_balancer from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'admin_state_up': True, 'availability_zone': 'my_fake_az', 'created_at': '2017-07-17T12:14:57.233772', 'description': 'fake_description', 'flavor_id': uuid.uuid4(), 'id': IDENTIFIER, 'listeners': [{'id', uuid.uuid4()}], 'name': 'test_load_balancer', 'operating_status': 'ONLINE', 'pools': [{'id', uuid.uuid4()}], 'project_id': uuid.uuid4(), 'provider': 'fake_provider', 'provisioning_status': 'ACTIVE', 'updated_at': '2017-07-17T12:16:57.233772', 'vip_address': '192.0.2.5', 'vip_network_id': uuid.uuid4(), 'vip_port_id': uuid.uuid4(), 'vip_subnet_id': uuid.uuid4(), 'vip_qos_policy_id': uuid.uuid4(), 'additional_vips': [ {'subnet_id': uuid.uuid4(), 'ip_address': '192.0.2.6'}, {'subnet_id': uuid.uuid4(), 'ip_address': '192.0.2.7'}, ], } EXAMPLE_STATS = { 'active_connections': 1, 'bytes_in': 2, 'bytes_out': 3, 'request_errors': 4, 'total_connections': 5, } class TestLoadBalancer(base.TestCase): def test_basic(self): test_load_balancer = load_balancer.LoadBalancer() self.assertEqual('loadbalancer', test_load_balancer.resource_key) self.assertEqual('loadbalancers', test_load_balancer.resources_key) self.assertEqual('/lbaas/loadbalancers', test_load_balancer.base_path) self.assertTrue(test_load_balancer.allow_create) self.assertTrue(test_load_balancer.allow_fetch) self.assertTrue(test_load_balancer.allow_delete) self.assertTrue(test_load_balancer.allow_list) self.assertTrue(test_load_balancer.allow_commit) def test_make_it(self): test_load_balancer = load_balancer.LoadBalancer(**EXAMPLE) self.assertTrue(test_load_balancer.is_admin_state_up) self.assertEqual( EXAMPLE['availability_zone'], test_load_balancer.availability_zone ) self.assertEqual(EXAMPLE['created_at'], test_load_balancer.created_at) self.assertEqual( EXAMPLE['description'], test_load_balancer.description ) self.assertEqual(EXAMPLE['flavor_id'], test_load_balancer.flavor_id) self.assertEqual(EXAMPLE['id'], test_load_balancer.id) self.assertEqual(EXAMPLE['listeners'], test_load_balancer.listeners) self.assertEqual(EXAMPLE['name'], test_load_balancer.name) self.assertEqual( EXAMPLE['operating_status'], test_load_balancer.operating_status ) self.assertEqual(EXAMPLE['pools'], test_load_balancer.pools) self.assertEqual(EXAMPLE['project_id'], test_load_balancer.project_id) self.assertEqual(EXAMPLE['provider'], test_load_balancer.provider) self.assertEqual( EXAMPLE['provisioning_status'], test_load_balancer.provisioning_status, ) self.assertEqual(EXAMPLE['updated_at'], test_load_balancer.updated_at) self.assertEqual( EXAMPLE['vip_address'], test_load_balancer.vip_address ) self.assertEqual( EXAMPLE['vip_network_id'], test_load_balancer.vip_network_id ) self.assertEqual( EXAMPLE['vip_port_id'], test_load_balancer.vip_port_id ) self.assertEqual( EXAMPLE['vip_subnet_id'], test_load_balancer.vip_subnet_id ) self.assertEqual( EXAMPLE['vip_qos_policy_id'], test_load_balancer.vip_qos_policy_id ) self.assertEqual( EXAMPLE['additional_vips'], test_load_balancer.additional_vips ) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'availability_zone': 'availability_zone', 'description': 'description', 'flavor_id': 'flavor_id', 'name': 'name', 'project_id': 'project_id', 'provider': 'provider', 'operating_status': 'operating_status', 'provisioning_status': 'provisioning_status', 'is_admin_state_up': 'admin_state_up', 'vip_address': 'vip_address', 'vip_network_id': 'vip_network_id', 'vip_port_id': 'vip_port_id', 'vip_subnet_id': 'vip_subnet_id', 'vip_qos_policy_id': 'vip_qos_policy_id', 'tags': 'tags', 'any_tags': 'tags-any', 'not_tags': 'not-tags', 'not_any_tags': 'not-tags-any', }, test_load_balancer._query_mapping._mapping, ) def test_delete_non_cascade(self): sess = mock.Mock() resp = mock.Mock() sess.delete.return_value = resp sot = load_balancer.LoadBalancer(**EXAMPLE) sot.cascade = False sot._translate_response = mock.Mock() sot.delete(sess) url = 'lbaas/loadbalancers/{lb}'.format(lb=EXAMPLE['id']) params = {} sess.delete.assert_called_with(url, params=params) sot._translate_response.assert_called_once_with( resp, error_message=None, has_body=False, ) def test_delete_cascade(self): sess = mock.Mock() resp = mock.Mock() sess.delete.return_value = resp sot = load_balancer.LoadBalancer(**EXAMPLE) sot.cascade = True sot._translate_response = mock.Mock() sot.delete(sess) url = 'lbaas/loadbalancers/{lb}'.format(lb=EXAMPLE['id']) params = {'cascade': True} sess.delete.assert_called_with(url, params=params) sot._translate_response.assert_called_once_with( resp, error_message=None, has_body=False, ) class TestLoadBalancerStats(base.TestCase): def test_basic(self): test_load_balancer = load_balancer.LoadBalancerStats() self.assertEqual('stats', test_load_balancer.resource_key) self.assertEqual( '/lbaas/loadbalancers/%(lb_id)s/stats', test_load_balancer.base_path, ) self.assertFalse(test_load_balancer.allow_create) self.assertTrue(test_load_balancer.allow_fetch) self.assertFalse(test_load_balancer.allow_delete) self.assertFalse(test_load_balancer.allow_list) self.assertFalse(test_load_balancer.allow_commit) def test_make_it(self): test_load_balancer = load_balancer.LoadBalancerStats(**EXAMPLE_STATS) self.assertEqual( EXAMPLE_STATS['active_connections'], test_load_balancer.active_connections, ) self.assertEqual( EXAMPLE_STATS['bytes_in'], test_load_balancer.bytes_in ) self.assertEqual( EXAMPLE_STATS['bytes_out'], test_load_balancer.bytes_out ) self.assertEqual( EXAMPLE_STATS['request_errors'], test_load_balancer.request_errors ) self.assertEqual( EXAMPLE_STATS['total_connections'], test_load_balancer.total_connections, ) class TestLoadBalancerFailover(base.TestCase): def test_basic(self): test_load_balancer = load_balancer.LoadBalancerFailover() self.assertEqual( '/lbaas/loadbalancers/%(lb_id)s/failover', test_load_balancer.base_path, ) self.assertFalse(test_load_balancer.allow_create) self.assertFalse(test_load_balancer.allow_fetch) self.assertFalse(test_load_balancer.allow_delete) self.assertFalse(test_load_balancer.allow_list) self.assertTrue(test_load_balancer.allow_commit) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/test_member.py0000664000175000017500000000660700000000000025776 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.load_balancer.v2 import member from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'address': '192.0.2.16', 'admin_state_up': True, 'id': IDENTIFIER, 'monitor_address': '192.0.2.17', 'monitor_port': 9, 'name': 'test_member', 'pool_id': uuid.uuid4(), 'project_id': uuid.uuid4(), 'protocol_port': 5, 'subnet_id': uuid.uuid4(), 'weight': 7, 'backup': False, } class TestPoolMember(base.TestCase): def test_basic(self): test_member = member.Member() self.assertEqual('member', test_member.resource_key) self.assertEqual('members', test_member.resources_key) self.assertEqual( '/lbaas/pools/%(pool_id)s/members', test_member.base_path ) self.assertTrue(test_member.allow_create) self.assertTrue(test_member.allow_fetch) self.assertTrue(test_member.allow_commit) self.assertTrue(test_member.allow_delete) self.assertTrue(test_member.allow_list) def test_make_it(self): test_member = member.Member(**EXAMPLE) self.assertEqual(EXAMPLE['address'], test_member.address) self.assertTrue(test_member.is_admin_state_up) self.assertEqual(EXAMPLE['id'], test_member.id) self.assertEqual( EXAMPLE['monitor_address'], test_member.monitor_address ) self.assertEqual(EXAMPLE['monitor_port'], test_member.monitor_port) self.assertEqual(EXAMPLE['name'], test_member.name) self.assertEqual(EXAMPLE['pool_id'], test_member.pool_id) self.assertEqual(EXAMPLE['project_id'], test_member.project_id) self.assertEqual(EXAMPLE['protocol_port'], test_member.protocol_port) self.assertEqual(EXAMPLE['subnet_id'], test_member.subnet_id) self.assertEqual(EXAMPLE['weight'], test_member.weight) self.assertFalse(test_member.backup) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'created_at': 'created_at', 'updated_at': 'updated_at', 'name': 'name', 'project_id': 'project_id', 'tags': 'tags', 'any_tags': 'tags-any', 'not_tags': 'not-tags', 'not_any_tags': 'not-tags-any', 'operating_status': 'operating_status', 'provisioning_status': 'provisioning_status', 'is_admin_state_up': 'admin_state_up', 'address': 'address', 'protocol_port': 'protocol_port', 'subnet_id': 'subnet_id', 'weight': 'weight', 'monitor_address': 'monitor_address', 'monitor_port': 'monitor_port', 'backup': 'backup', }, test_member._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/test_pool.py0000664000175000017500000001271200000000000025472 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.load_balancer.v2 import pool from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'name': 'test_pool', 'description': 'fake_description', 'admin_state_up': True, 'provisioning_status': 'ACTIVE', 'operating_status': 'ONLINE', 'protocol': 'HTTP', 'listener_id': uuid.uuid4(), 'loadbalancer_id': uuid.uuid4(), 'lb_algorithm': 'ROUND_ROBIN', 'session_persistence': {"type": "SOURCE_IP"}, 'project_id': uuid.uuid4(), 'loadbalancers': [{'id': uuid.uuid4()}], 'listeners': [{'id': uuid.uuid4()}], 'created_at': '2017-07-17T12:14:57.233772', 'updated_at': '2017-07-17T12:16:57.233772', 'health_monitor': 'healthmonitor', 'health_monitor_id': uuid.uuid4(), 'members': [{'id': uuid.uuid4()}], 'tls_enabled': True, 'tls_ciphers': 'ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256', 'tls_versions': ['TLSv1.1', 'TLSv1.2'], 'alpn_protocols': ['h2', 'http/1.1', 'http/1.0'], 'ca_tls_container_ref': ( 'http://198.51.100.10:9311/v1/containers/' 'a570068c-d295-4780-91d4-3046a325db52' ), 'crl_container_ref': ( 'http://198.51.100.10:9311/v1/containers/' 'a570068c-d295-4780-91d4-3046a325db53' ), } class TestPool(base.TestCase): def test_basic(self): test_pool = pool.Pool() self.assertEqual('pool', test_pool.resource_key) self.assertEqual('pools', test_pool.resources_key) self.assertEqual('/lbaas/pools', test_pool.base_path) self.assertTrue(test_pool.allow_create) self.assertTrue(test_pool.allow_fetch) self.assertTrue(test_pool.allow_delete) self.assertTrue(test_pool.allow_list) self.assertTrue(test_pool.allow_commit) def test_make_it(self): test_pool = pool.Pool(**EXAMPLE) self.assertEqual(EXAMPLE['name'], test_pool.name), self.assertEqual(EXAMPLE['description'], test_pool.description) self.assertEqual( EXAMPLE['admin_state_up'], test_pool.is_admin_state_up ) self.assertEqual( EXAMPLE['provisioning_status'], test_pool.provisioning_status ) self.assertEqual(EXAMPLE['protocol'], test_pool.protocol) self.assertEqual( EXAMPLE['operating_status'], test_pool.operating_status ) self.assertEqual(EXAMPLE['listener_id'], test_pool.listener_id) self.assertEqual(EXAMPLE['loadbalancer_id'], test_pool.loadbalancer_id) self.assertEqual(EXAMPLE['lb_algorithm'], test_pool.lb_algorithm) self.assertEqual( EXAMPLE['session_persistence'], test_pool.session_persistence ) self.assertEqual(EXAMPLE['project_id'], test_pool.project_id) self.assertEqual(EXAMPLE['loadbalancers'], test_pool.loadbalancers) self.assertEqual(EXAMPLE['listeners'], test_pool.listeners) self.assertEqual(EXAMPLE['created_at'], test_pool.created_at) self.assertEqual(EXAMPLE['updated_at'], test_pool.updated_at) self.assertEqual( EXAMPLE['health_monitor_id'], test_pool.health_monitor_id ) self.assertEqual(EXAMPLE['members'], test_pool.members) self.assertEqual(EXAMPLE['tls_enabled'], test_pool.tls_enabled) self.assertEqual(EXAMPLE['tls_ciphers'], test_pool.tls_ciphers) self.assertEqual(EXAMPLE['tls_versions'], test_pool.tls_versions) self.assertEqual(EXAMPLE['alpn_protocols'], test_pool.alpn_protocols) self.assertEqual( EXAMPLE['ca_tls_container_ref'], test_pool.ca_tls_container_ref ) self.assertEqual( EXAMPLE['crl_container_ref'], test_pool.crl_container_ref ) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'created_at': 'created_at', 'updated_at': 'updated_at', 'description': 'description', 'name': 'name', 'project_id': 'project_id', 'tags': 'tags', 'any_tags': 'tags-any', 'not_tags': 'not-tags', 'not_any_tags': 'not-tags-any', 'operating_status': 'operating_status', 'provisioning_status': 'provisioning_status', 'is_admin_state_up': 'admin_state_up', 'health_monitor_id': 'health_monitor_id', 'lb_algorithm': 'lb_algorithm', 'listener_id': 'listener_id', 'loadbalancer_id': 'loadbalancer_id', 'protocol': 'protocol', 'tls_enabled': 'tls_enabled', 'tls_ciphers': 'tls_ciphers', 'tls_versions': 'tls_versions', 'alpn_protocols': 'alpn_protocols', 'ca_tls_container_ref': 'ca_tls_container_ref', 'crl_container_ref': 'crl_container_ref', }, test_pool._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/test_provider.py0000664000175000017500000000544700000000000026362 0ustar00zuulzuul00000000000000# Copyright 2019 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.load_balancer.v2 import provider from openstack.tests.unit import base EXAMPLE = {'name': 'best', 'description': 'The best provider'} class TestProvider(base.TestCase): def test_basic(self): test_provider = provider.Provider() self.assertEqual('providers', test_provider.resources_key) self.assertEqual('/lbaas/providers', test_provider.base_path) self.assertFalse(test_provider.allow_create) self.assertFalse(test_provider.allow_fetch) self.assertFalse(test_provider.allow_commit) self.assertFalse(test_provider.allow_delete) self.assertTrue(test_provider.allow_list) def test_make_it(self): test_provider = provider.Provider(**EXAMPLE) self.assertEqual(EXAMPLE['name'], test_provider.name) self.assertEqual(EXAMPLE['description'], test_provider.description) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'name': 'name', 'description': 'description', }, test_provider._query_mapping._mapping, ) class TestProviderFlavorCapabilities(base.TestCase): def test_basic(self): test_flav_cap = provider.ProviderFlavorCapabilities() self.assertEqual('flavor_capabilities', test_flav_cap.resources_key) self.assertEqual( '/lbaas/providers/%(provider)s/flavor_capabilities', test_flav_cap.base_path, ) self.assertFalse(test_flav_cap.allow_create) self.assertFalse(test_flav_cap.allow_fetch) self.assertFalse(test_flav_cap.allow_commit) self.assertFalse(test_flav_cap.allow_delete) self.assertTrue(test_flav_cap.allow_list) def test_make_it(self): test_flav_cap = provider.ProviderFlavorCapabilities(**EXAMPLE) self.assertEqual(EXAMPLE['name'], test_flav_cap.name) self.assertEqual(EXAMPLE['description'], test_flav_cap.description) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'name': 'name', 'description': 'description', }, test_flav_cap._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/test_quota.py0000664000175000017500000000566300000000000025661 0ustar00zuulzuul00000000000000# Copyright (c) 2018 China Telecom Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.load_balancer.v2 import quota from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'load_balancer': 1, 'listener': 2, 'pool': 3, 'health_monitor': 4, 'member': 5, 'project_id': 6, } class TestQuota(base.TestCase): def test_basic(self): sot = quota.Quota() self.assertEqual('quota', sot.resource_key) self.assertEqual('quotas', sot.resources_key) self.assertEqual('/lbaas/quotas', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = quota.Quota(**EXAMPLE) self.assertEqual(EXAMPLE['load_balancer'], sot.load_balancers) self.assertEqual(EXAMPLE['listener'], sot.listeners) self.assertEqual(EXAMPLE['pool'], sot.pools) self.assertEqual(EXAMPLE['health_monitor'], sot.health_monitors) self.assertEqual(EXAMPLE['member'], sot.members) self.assertEqual(EXAMPLE['project_id'], sot.project_id) def test_prepare_request(self): body = {'id': 'ABCDEFGH', 'load_balancer': '12345'} quota_obj = quota.Quota(**body) response = quota_obj._prepare_request() self.assertNotIn('id', response) class TestQuotaDefault(base.TestCase): def test_basic(self): sot = quota.QuotaDefault() self.assertEqual('quota', sot.resource_key) self.assertEqual('quotas', sot.resources_key) self.assertEqual('/lbaas/quotas/defaults', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) self.assertTrue(sot.allow_retrieve) def test_make_it(self): sot = quota.Quota(**EXAMPLE) self.assertEqual(EXAMPLE['load_balancer'], sot.load_balancers) self.assertEqual(EXAMPLE['listener'], sot.listeners) self.assertEqual(EXAMPLE['pool'], sot.pools) self.assertEqual(EXAMPLE['health_monitor'], sot.health_monitors) self.assertEqual(EXAMPLE['member'], sot.members) self.assertEqual(EXAMPLE['project_id'], sot.project_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/test_version.py0000664000175000017500000000261100000000000026203 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.load_balancer import version from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'links': '2', 'status': '3', } class TestVersion(base.TestCase): def test_basic(self): sot = version.Version() self.assertEqual('version', sot.resource_key) self.assertEqual('versions', sot.resources_key) self.assertEqual('/', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = version.Version(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['status'], sot.status) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4734232 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/v2/0000775000175000017500000000000000000000000023434 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/v2/__init__.py0000664000175000017500000000000000000000000025533 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/load_balancer/v2/test_proxy.py0000664000175000017500000004074400000000000026237 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid from openstack.load_balancer.v2 import _proxy from openstack.load_balancer.v2 import amphora from openstack.load_balancer.v2 import availability_zone from openstack.load_balancer.v2 import availability_zone_profile from openstack.load_balancer.v2 import flavor from openstack.load_balancer.v2 import flavor_profile from openstack.load_balancer.v2 import health_monitor from openstack.load_balancer.v2 import l7_policy from openstack.load_balancer.v2 import l7_rule from openstack.load_balancer.v2 import listener from openstack.load_balancer.v2 import load_balancer as lb from openstack.load_balancer.v2 import member from openstack.load_balancer.v2 import pool from openstack.load_balancer.v2 import provider from openstack.load_balancer.v2 import quota from openstack import proxy as proxy_base from openstack.tests.unit import test_proxy_base class TestLoadBalancerProxy(test_proxy_base.TestProxyBase): LB_ID = uuid.uuid4() LISTENER_ID = uuid.uuid4() POOL_ID = uuid.uuid4() L7_POLICY_ID = uuid.uuid4() AMPHORA = 'amphora' AMPHORA_ID = uuid.uuid4() def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) def test_load_balancers(self): self.verify_list(self.proxy.load_balancers, lb.LoadBalancer) def test_load_balancer_get(self): self.verify_get(self.proxy.get_load_balancer, lb.LoadBalancer) def test_load_balancer_stats_get(self): self.verify_get( self.proxy.get_load_balancer_statistics, lb.LoadBalancerStats, method_args=[self.LB_ID], expected_args=[], expected_kwargs={'lb_id': self.LB_ID, 'requires_id': False}, ) def test_load_balancer_create(self): self.verify_create(self.proxy.create_load_balancer, lb.LoadBalancer) @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_load_balancer_delete_non_cascade(self, mock_get_resource): fake_load_balancer = mock.Mock() fake_load_balancer.id = "load_balancer_id" mock_get_resource.return_value = fake_load_balancer self._verify( "openstack.proxy.Proxy._delete", self.proxy.delete_load_balancer, method_args=["resource_or_id", True, False], expected_args=[lb.LoadBalancer, fake_load_balancer], expected_kwargs={"ignore_missing": True}, ) self.assertFalse(fake_load_balancer.cascade) mock_get_resource.assert_called_once_with( lb.LoadBalancer, "resource_or_id" ) @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_load_balancer_delete_cascade(self, mock_get_resource): fake_load_balancer = mock.Mock() fake_load_balancer.id = "load_balancer_id" mock_get_resource.return_value = fake_load_balancer self._verify( "openstack.proxy.Proxy._delete", self.proxy.delete_load_balancer, method_args=["resource_or_id", True, True], expected_args=[lb.LoadBalancer, fake_load_balancer], expected_kwargs={"ignore_missing": True}, ) self.assertTrue(fake_load_balancer.cascade) mock_get_resource.assert_called_once_with( lb.LoadBalancer, "resource_or_id" ) def test_load_balancer_find(self): self.verify_find(self.proxy.find_load_balancer, lb.LoadBalancer) def test_load_balancer_update(self): self.verify_update(self.proxy.update_load_balancer, lb.LoadBalancer) def test_load_balancer_failover(self): self.verify_update( self.proxy.failover_load_balancer, lb.LoadBalancerFailover, method_args=[self.LB_ID], expected_args=[], expected_kwargs={'lb_id': self.LB_ID}, ) def test_listeners(self): self.verify_list(self.proxy.listeners, listener.Listener) def test_listener_get(self): self.verify_get(self.proxy.get_listener, listener.Listener) def test_listener_stats_get(self): self.verify_get( self.proxy.get_listener_statistics, listener.ListenerStats, method_args=[self.LISTENER_ID], expected_args=[], expected_kwargs={ 'listener_id': self.LISTENER_ID, 'requires_id': False, }, ) def test_listener_create(self): self.verify_create(self.proxy.create_listener, listener.Listener) def test_listener_delete(self): self.verify_delete(self.proxy.delete_listener, listener.Listener, True) def test_listener_find(self): self.verify_find(self.proxy.find_listener, listener.Listener) def test_listener_update(self): self.verify_update(self.proxy.update_listener, listener.Listener) def test_pools(self): self.verify_list(self.proxy.pools, pool.Pool) def test_pool_get(self): self.verify_get(self.proxy.get_pool, pool.Pool) def test_pool_create(self): self.verify_create(self.proxy.create_pool, pool.Pool) def test_pool_delete(self): self.verify_delete(self.proxy.delete_pool, pool.Pool, True) def test_pool_find(self): self.verify_find(self.proxy.find_pool, pool.Pool) def test_pool_update(self): self.verify_update(self.proxy.update_pool, pool.Pool) def test_members(self): self.verify_list( self.proxy.members, member.Member, method_kwargs={'pool': self.POOL_ID}, expected_kwargs={'pool_id': self.POOL_ID}, ) def test_member_get(self): self.verify_get( self.proxy.get_member, member.Member, method_kwargs={'pool': self.POOL_ID}, expected_kwargs={'pool_id': self.POOL_ID}, ) def test_member_create(self): self.verify_create( self.proxy.create_member, member.Member, method_kwargs={'pool': self.POOL_ID}, expected_kwargs={'pool_id': self.POOL_ID}, ) def test_member_delete(self): self.verify_delete( self.proxy.delete_member, member.Member, ignore_missing=True, method_kwargs={'pool': self.POOL_ID}, expected_kwargs={'pool_id': self.POOL_ID, 'ignore_missing': True}, ) def test_member_find(self): self._verify( 'openstack.proxy.Proxy._find', self.proxy.find_member, method_args=["MEMBER", self.POOL_ID], expected_args=[member.Member, "MEMBER"], expected_kwargs={"pool_id": self.POOL_ID, "ignore_missing": True}, ) def test_member_update(self): self._verify( 'openstack.proxy.Proxy._update', self.proxy.update_member, method_args=["MEMBER", self.POOL_ID], expected_args=[member.Member, "MEMBER"], expected_kwargs={"pool_id": self.POOL_ID}, ) def test_health_monitors(self): self.verify_list( self.proxy.health_monitors, health_monitor.HealthMonitor ) def test_health_monitor_get(self): self.verify_get( self.proxy.get_health_monitor, health_monitor.HealthMonitor ) def test_health_monitor_create(self): self.verify_create( self.proxy.create_health_monitor, health_monitor.HealthMonitor ) def test_health_monitor_delete(self): self.verify_delete( self.proxy.delete_health_monitor, health_monitor.HealthMonitor, True, ) def test_health_monitor_find(self): self.verify_find( self.proxy.find_health_monitor, health_monitor.HealthMonitor ) def test_health_monitor_update(self): self.verify_update( self.proxy.update_health_monitor, health_monitor.HealthMonitor ) def test_l7_policies(self): self.verify_list(self.proxy.l7_policies, l7_policy.L7Policy) def test_l7_policy_get(self): self.verify_get(self.proxy.get_l7_policy, l7_policy.L7Policy) def test_l7_policy_create(self): self.verify_create(self.proxy.create_l7_policy, l7_policy.L7Policy) def test_l7_policy_delete(self): self.verify_delete( self.proxy.delete_l7_policy, l7_policy.L7Policy, True ) def test_l7_policy_find(self): self.verify_find(self.proxy.find_l7_policy, l7_policy.L7Policy) def test_l7_policy_update(self): self.verify_update(self.proxy.update_l7_policy, l7_policy.L7Policy) def test_l7_rules(self): self.verify_list( self.proxy.l7_rules, l7_rule.L7Rule, method_kwargs={'l7_policy': self.L7_POLICY_ID}, expected_kwargs={'l7policy_id': self.L7_POLICY_ID}, ) def test_l7_rule_get(self): self.verify_get( self.proxy.get_l7_rule, l7_rule.L7Rule, method_kwargs={'l7_policy': self.L7_POLICY_ID}, expected_kwargs={'l7policy_id': self.L7_POLICY_ID}, ) def test_l7_rule_create(self): self.verify_create( self.proxy.create_l7_rule, l7_rule.L7Rule, method_kwargs={'l7_policy': self.L7_POLICY_ID}, expected_kwargs={'l7policy_id': self.L7_POLICY_ID}, ) def test_l7_rule_delete(self): self.verify_delete( self.proxy.delete_l7_rule, l7_rule.L7Rule, ignore_missing=True, method_kwargs={'l7_policy': self.L7_POLICY_ID}, expected_kwargs={'l7policy_id': self.L7_POLICY_ID}, ) def test_l7_rule_find(self): self._verify( 'openstack.proxy.Proxy._find', self.proxy.find_l7_rule, method_args=["RULE", self.L7_POLICY_ID], expected_args=[l7_rule.L7Rule, "RULE"], expected_kwargs={ "l7policy_id": self.L7_POLICY_ID, "ignore_missing": True, }, ) def test_l7_rule_update(self): self._verify( 'openstack.proxy.Proxy._update', self.proxy.update_l7_rule, method_args=["RULE", self.L7_POLICY_ID], expected_args=[l7_rule.L7Rule, "RULE"], expected_kwargs={"l7policy_id": self.L7_POLICY_ID}, ) def test_quotas(self): self.verify_list(self.proxy.quotas, quota.Quota) def test_quota_get(self): self.verify_get(self.proxy.get_quota, quota.Quota) def test_quota_update(self): self.verify_update(self.proxy.update_quota, quota.Quota) def test_quota_default_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_quota_default, expected_args=[quota.QuotaDefault], expected_kwargs={'requires_id': False}, ) def test_quota_delete(self): self.verify_delete(self.proxy.delete_quota, quota.Quota, False) def test_quota_delete_ignore(self): self.verify_delete(self.proxy.delete_quota, quota.Quota, True) def test_providers(self): self.verify_list(self.proxy.providers, provider.Provider) def test_provider_flavor_capabilities(self): self.verify_list( self.proxy.provider_flavor_capabilities, provider.ProviderFlavorCapabilities, method_args=[self.AMPHORA], expected_args=[], expected_kwargs={'provider': self.AMPHORA}, ) def test_flavor_profiles(self): self.verify_list( self.proxy.flavor_profiles, flavor_profile.FlavorProfile ) def test_flavor_profile_get(self): self.verify_get( self.proxy.get_flavor_profile, flavor_profile.FlavorProfile ) def test_flavor_profile_create(self): self.verify_create( self.proxy.create_flavor_profile, flavor_profile.FlavorProfile ) def test_flavor_profile_delete(self): self.verify_delete( self.proxy.delete_flavor_profile, flavor_profile.FlavorProfile, True, ) def test_flavor_profile_find(self): self.verify_find( self.proxy.find_flavor_profile, flavor_profile.FlavorProfile ) def test_flavor_profile_update(self): self.verify_update( self.proxy.update_flavor_profile, flavor_profile.FlavorProfile ) def test_flavors(self): self.verify_list(self.proxy.flavors, flavor.Flavor) def test_flavor_get(self): self.verify_get(self.proxy.get_flavor, flavor.Flavor) def test_flavor_create(self): self.verify_create(self.proxy.create_flavor, flavor.Flavor) def test_flavor_delete(self): self.verify_delete(self.proxy.delete_flavor, flavor.Flavor, True) def test_flavor_find(self): self.verify_find(self.proxy.find_flavor, flavor.Flavor) def test_flavor_update(self): self.verify_update(self.proxy.update_flavor, flavor.Flavor) def test_amphorae(self): self.verify_list(self.proxy.amphorae, amphora.Amphora) def test_amphora_get(self): self.verify_get(self.proxy.get_amphora, amphora.Amphora) def test_amphora_find(self): self.verify_find(self.proxy.find_amphora, amphora.Amphora) def test_amphora_configure(self): self.verify_update( self.proxy.configure_amphora, amphora.AmphoraConfig, method_args=[self.AMPHORA_ID], expected_args=[], expected_kwargs={'amphora_id': self.AMPHORA_ID}, ) def test_amphora_failover(self): self.verify_update( self.proxy.failover_amphora, amphora.AmphoraFailover, method_args=[self.AMPHORA_ID], expected_args=[], expected_kwargs={'amphora_id': self.AMPHORA_ID}, ) def test_availability_zone_profiles(self): self.verify_list( self.proxy.availability_zone_profiles, availability_zone_profile.AvailabilityZoneProfile, ) def test_availability_zone_profile_get(self): self.verify_get( self.proxy.get_availability_zone_profile, availability_zone_profile.AvailabilityZoneProfile, ) def test_availability_zone_profile_create(self): self.verify_create( self.proxy.create_availability_zone_profile, availability_zone_profile.AvailabilityZoneProfile, ) def test_availability_zone_profile_delete(self): self.verify_delete( self.proxy.delete_availability_zone_profile, availability_zone_profile.AvailabilityZoneProfile, True, ) def test_availability_zone_profile_find(self): self.verify_find( self.proxy.find_availability_zone_profile, availability_zone_profile.AvailabilityZoneProfile, ) def test_availability_zone_profile_update(self): self.verify_update( self.proxy.update_availability_zone_profile, availability_zone_profile.AvailabilityZoneProfile, ) def test_availability_zones(self): self.verify_list( self.proxy.availability_zones, availability_zone.AvailabilityZone ) def test_availability_zone_get(self): self.verify_get( self.proxy.get_availability_zone, availability_zone.AvailabilityZone, ) def test_availability_zone_create(self): self.verify_create( self.proxy.create_availability_zone, availability_zone.AvailabilityZone, ) def test_availability_zone_delete(self): self.verify_delete( self.proxy.delete_availability_zone, availability_zone.AvailabilityZone, True, ) def test_availability_zone_find(self): self.verify_find( self.proxy.find_availability_zone, availability_zone.AvailabilityZone, ) def test_availability_zone_update(self): self.verify_update( self.proxy.update_availability_zone, availability_zone.AvailabilityZone, ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4734232 openstacksdk-4.0.0/openstack/tests/unit/message/0000775000175000017500000000000000000000000021763 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/message/__init__.py0000664000175000017500000000000000000000000024062 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/message/test_version.py0000664000175000017500000000260300000000000025062 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.message import version from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'links': '2', 'status': '3', } class TestVersion(base.TestCase): def test_basic(self): sot = version.Version() self.assertEqual('version', sot.resource_key) self.assertEqual('versions', sot.resources_key) self.assertEqual('/', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = version.Version(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['status'], sot.status) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.477425 openstacksdk-4.0.0/openstack/tests/unit/message/v2/0000775000175000017500000000000000000000000022312 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/message/v2/__init__.py0000664000175000017500000000000000000000000024411 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/message/v2/test_claim.py0000664000175000017500000002120100000000000025004 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock import uuid from openstack.message.v2 import claim from openstack.tests.unit import base FAKE1 = { "age": 1632, "id": "576b54963990b48c644bb7e7", "grace": 3600, "limit": 10, "messages": [{"id": "1"}, {"id": "2"}], "ttl": 3600, "queue_name": "queue1", } FAKE2 = { "age": 1632, "id": "576b54963990b48c644bb7e7", "grace": 3600, "limit": 10, "messages": [{"id": "1"}, {"id": "2"}], "ttl": 3600, "queue_name": "queue1", "client_id": "OLD_CLIENT_ID", "project_id": "OLD_PROJECT_ID", } class TestClaim(base.TestCase): def test_basic(self): sot = claim.Claim() self.assertEqual("claims", sot.resources_key) self.assertEqual("/queues/%(queue_name)s/claims", sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_commit) def test_make_it(self): sot = claim.Claim.new(**FAKE2) self.assertEqual(FAKE2["age"], sot.age) self.assertEqual(FAKE2["id"], sot.id) self.assertEqual(FAKE2["grace"], sot.grace) self.assertEqual(FAKE2["limit"], sot.limit) self.assertEqual(FAKE2["messages"], sot.messages) self.assertEqual(FAKE2["ttl"], sot.ttl) self.assertEqual(FAKE2["queue_name"], sot.queue_name) self.assertEqual(FAKE2["client_id"], sot.client_id) self.assertEqual(FAKE2["project_id"], sot.project_id) @mock.patch.object(uuid, "uuid4") def test_create_204_resp(self, mock_uuid): sess = mock.Mock() resp = mock.Mock() sess.post.return_value = resp resp.status_code = 204 sess.get_project_id.return_value = "NEW_PROJECT_ID" mock_uuid.return_value = "NEW_CLIENT_ID" FAKE = copy.deepcopy(FAKE1) sot = claim.Claim(**FAKE1) res = sot.create(sess) url = "/queues/{queue}/claims".format(queue=FAKE.pop("queue_name")) headers = { "Client-ID": "NEW_CLIENT_ID", "X-PROJECT-ID": "NEW_PROJECT_ID", } sess.post.assert_called_once_with(url, headers=headers, json=FAKE) sess.get_project_id.assert_called_once_with() self.assertEqual(sot, res) @mock.patch.object(uuid, "uuid4") def test_create_non_204_resp(self, mock_uuid): sess = mock.Mock() resp = mock.Mock() sess.post.return_value = resp resp.status_code = 200 sess.get_project_id.return_value = "NEW_PROJECT_ID" mock_uuid.return_value = "NEW_CLIENT_ID" FAKE = copy.deepcopy(FAKE1) sot = claim.Claim(**FAKE1) sot._translate_response = mock.Mock() res = sot.create(sess) url = "/queues/{queue}/claims".format(queue=FAKE.pop("queue_name")) headers = { "Client-ID": "NEW_CLIENT_ID", "X-PROJECT-ID": "NEW_PROJECT_ID", } sess.post.assert_called_once_with(url, headers=headers, json=FAKE) sess.get_project_id.assert_called_once_with() self.assertEqual(sot, res) sot._translate_response.assert_called_once_with(resp) def test_create_client_id_project_id_exist(self): sess = mock.Mock() resp = mock.Mock() sess.post.return_value = resp resp.status_code = 200 FAKE = copy.deepcopy(FAKE2) sot = claim.Claim(**FAKE2) sot._translate_response = mock.Mock() res = sot.create(sess) url = "/queues/{queue}/claims".format(queue=FAKE.pop("queue_name")) headers = { "Client-ID": FAKE.pop("client_id"), "X-PROJECT-ID": FAKE.pop("project_id"), } sess.post.assert_called_once_with(url, headers=headers, json=FAKE) self.assertEqual(sot, res) @mock.patch.object(uuid, "uuid4") def test_get(self, mock_uuid): sess = mock.Mock() resp = mock.Mock() sess.get.return_value = resp sess.get_project_id.return_value = "NEW_PROJECT_ID" mock_uuid.return_value = "NEW_CLIENT_ID" sot = claim.Claim(**FAKE1) sot._translate_response = mock.Mock() res = sot.fetch(sess) url = "queues/{queue}/claims/{claim}".format( queue=FAKE1["queue_name"], claim=FAKE1["id"], ) headers = { "Client-ID": "NEW_CLIENT_ID", "X-PROJECT-ID": "NEW_PROJECT_ID", } sess.get.assert_called_with(url, headers=headers, skip_cache=False) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp) self.assertEqual(sot, res) def test_get_client_id_project_id_exist(self): sess = mock.Mock() resp = mock.Mock() sess.get.return_value = resp sot = claim.Claim(**FAKE2) sot._translate_response = mock.Mock() res = sot.fetch(sess) url = "queues/{queue}/claims/{claim}".format( queue=FAKE2["queue_name"], claim=FAKE2["id"], ) headers = { "Client-ID": "OLD_CLIENT_ID", "X-PROJECT-ID": "OLD_PROJECT_ID", } sess.get.assert_called_with(url, headers=headers, skip_cache=False) sot._translate_response.assert_called_once_with(resp) self.assertEqual(sot, res) @mock.patch.object(uuid, "uuid4") def test_update(self, mock_uuid): sess = mock.Mock() resp = mock.Mock() sess.update.return_value = resp sess.get_project_id.return_value = "NEW_PROJECT_ID" mock_uuid.return_value = "NEW_CLIENT_ID" FAKE = copy.deepcopy(FAKE1) sot = claim.Claim(**FAKE1) res = sot.commit(sess) url = "queues/{queue}/claims/{claim}".format( queue=FAKE.pop("queue_name"), claim=FAKE["id"], ) headers = { "Client-ID": "NEW_CLIENT_ID", "X-PROJECT-ID": "NEW_PROJECT_ID", } sess.patch.assert_called_with(url, headers=headers, json=FAKE) sess.get_project_id.assert_called_once_with() self.assertEqual(sot, res) def test_update_client_id_project_id_exist(self): sess = mock.Mock() resp = mock.Mock() sess.get.return_value = resp FAKE = copy.deepcopy(FAKE2) sot = claim.Claim(**FAKE2) res = sot.commit(sess) url = "queues/{queue}/claims/{claim}".format( queue=FAKE.pop("queue_name"), claim=FAKE["id"], ) headers = { "Client-ID": FAKE.pop("client_id"), "X-PROJECT-ID": FAKE.pop("project_id"), } sess.patch.assert_called_with(url, headers=headers, json=FAKE) self.assertEqual(sot, res) @mock.patch.object(uuid, "uuid4") def test_delete(self, mock_uuid): sess = mock.Mock() resp = mock.Mock() sess.delete.return_value = resp sess.get_project_id.return_value = "NEW_PROJECT_ID" mock_uuid.return_value = "NEW_CLIENT_ID" sot = claim.Claim(**FAKE1) sot._translate_response = mock.Mock() sot.delete(sess) url = "queues/{queue}/claims/{claim}".format( queue=FAKE1["queue_name"], claim=FAKE1["id"], ) headers = { "Client-ID": "NEW_CLIENT_ID", "X-PROJECT-ID": "NEW_PROJECT_ID", } sess.delete.assert_called_with(url, headers=headers) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp, has_body=False) def test_delete_client_id_project_id_exist(self): sess = mock.Mock() resp = mock.Mock() sess.delete.return_value = resp sot = claim.Claim(**FAKE2) sot._translate_response = mock.Mock() sot.delete(sess) url = "queues/{queue}/claims/{claim}".format( queue=FAKE2["queue_name"], claim=FAKE2["id"], ) headers = { "Client-ID": "OLD_CLIENT_ID", "X-PROJECT-ID": "OLD_PROJECT_ID", } sess.delete.assert_called_with(url, headers=headers) sot._translate_response.assert_called_once_with(resp, has_body=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/message/v2/test_message.py0000664000175000017500000002053700000000000025356 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid from openstack.message.v2 import message from openstack.tests.unit import base FAKE1 = { 'age': 456, 'body': { 'current_bytes': '0', 'event': 'BackupProgress', 'total_bytes': '99614720', }, 'id': '578ee000508f153f256f717d', 'href': '/v2/queues/queue1/messages/578ee000508f153f256f717d', 'ttl': 3600, 'queue_name': 'queue1', } FAKE2 = { 'age': 456, 'body': { 'current_bytes': '0', 'event': 'BackupProgress', 'total_bytes': '99614720', }, 'id': '578ee000508f153f256f717d', 'href': '/v2/queues/queue1/messages/578ee000508f153f256f717d', 'ttl': 3600, 'queue_name': 'queue1', 'client_id': 'OLD_CLIENT_ID', 'project_id': 'OLD_PROJECT_ID', } class TestMessage(base.TestCase): def test_basic(self): sot = message.Message() self.assertEqual('messages', sot.resources_key) self.assertEqual('/queues/%(queue_name)s/messages', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = message.Message.new(**FAKE2) self.assertEqual(FAKE2['age'], sot.age) self.assertEqual(FAKE2['body'], sot.body) self.assertEqual(FAKE2['id'], sot.id) self.assertEqual(FAKE2['href'], sot.href) self.assertEqual(FAKE2['ttl'], sot.ttl) self.assertEqual(FAKE2['queue_name'], sot.queue_name) self.assertEqual(FAKE2['client_id'], sot.client_id) self.assertEqual(FAKE2['project_id'], sot.project_id) @mock.patch.object(uuid, 'uuid4') def test_post(self, mock_uuid): sess = mock.Mock() resp = mock.Mock() sess.post.return_value = resp resources = [ '/v2/queues/queue1/messages/578ee000508f153f256f717d' '/v2/queues/queue1/messages/579edd6c368cb61de9a7e233' ] resp.json.return_value = {'resources': resources} sess.get_project_id.return_value = 'NEW_PROJECT_ID' mock_uuid.return_value = 'NEW_CLIENT_ID' messages = [ {'body': {'key': 'value1'}, 'ttl': 3600}, {'body': {'key': 'value2'}, 'ttl': 1800}, ] sot = message.Message(**FAKE1) res = sot.post(sess, messages) url = '/queues/{queue}/messages'.format(queue=FAKE1['queue_name']) headers = { 'Client-ID': 'NEW_CLIENT_ID', 'X-PROJECT-ID': 'NEW_PROJECT_ID', } sess.post.assert_called_once_with( url, headers=headers, json={'messages': messages} ) sess.get_project_id.assert_called_once_with() resp.json.assert_called_once_with() self.assertEqual(resources, res) def test_post_client_id_project_id_exist(self): sess = mock.Mock() resp = mock.Mock() sess.post.return_value = resp resources = [ '/v2/queues/queue1/messages/578ee000508f153f256f717d' '/v2/queues/queue1/messages/579edd6c368cb61de9a7e233' ] resp.json.return_value = {'resources': resources} messages = [ {'body': {'key': 'value1'}, 'ttl': 3600}, {'body': {'key': 'value2'}, 'ttl': 1800}, ] sot = message.Message(**FAKE2) res = sot.post(sess, messages) url = '/queues/{queue}/messages'.format(queue=FAKE2['queue_name']) headers = { 'Client-ID': 'OLD_CLIENT_ID', 'X-PROJECT-ID': 'OLD_PROJECT_ID', } sess.post.assert_called_once_with( url, headers=headers, json={'messages': messages} ) resp.json.assert_called_once_with() self.assertEqual(resources, res) @mock.patch.object(uuid, 'uuid4') def test_get(self, mock_uuid): sess = mock.Mock() resp = mock.Mock() sess.get.return_value = resp sess.get_project_id.return_value = 'NEW_PROJECT_ID' mock_uuid.return_value = 'NEW_CLIENT_ID' sot = message.Message(**FAKE1) sot._translate_response = mock.Mock() res = sot.fetch(sess) url = 'queues/{queue}/messages/{message}'.format( queue=FAKE1['queue_name'], message=FAKE1['id'], ) headers = { 'Client-ID': 'NEW_CLIENT_ID', 'X-PROJECT-ID': 'NEW_PROJECT_ID', } sess.get.assert_called_with(url, headers=headers, skip_cache=False) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp) self.assertEqual(sot, res) def test_get_client_id_project_id_exist(self): sess = mock.Mock() resp = mock.Mock() sess.get.return_value = resp sot = message.Message(**FAKE1) sot._translate_response = mock.Mock() res = sot.fetch(sess) url = 'queues/{queue}/messages/{message}'.format( queue=FAKE2['queue_name'], message=FAKE2['id'], ) sot = message.Message(**FAKE2) sot._translate_response = mock.Mock() res = sot.fetch(sess) headers = { 'Client-ID': 'OLD_CLIENT_ID', 'X-PROJECT-ID': 'OLD_PROJECT_ID', } sess.get.assert_called_with(url, headers=headers, skip_cache=False) sot._translate_response.assert_called_once_with(resp) self.assertEqual(sot, res) @mock.patch.object(uuid, 'uuid4') def test_delete_unclaimed(self, mock_uuid): sess = mock.Mock() resp = mock.Mock() sess.delete.return_value = resp sess.get_project_id.return_value = 'NEW_PROJECT_ID' mock_uuid.return_value = 'NEW_CLIENT_ID' sot = message.Message(**FAKE1) sot.claim_id = None sot._translate_response = mock.Mock() sot.delete(sess) url = 'queues/{queue}/messages/{message}'.format( queue=FAKE1['queue_name'], message=FAKE1['id'], ) headers = { 'Client-ID': 'NEW_CLIENT_ID', 'X-PROJECT-ID': 'NEW_PROJECT_ID', } sess.delete.assert_called_with(url, headers=headers) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp, has_body=False) @mock.patch.object(uuid, 'uuid4') def test_delete_claimed(self, mock_uuid): sess = mock.Mock() resp = mock.Mock() sess.delete.return_value = resp sess.get_project_id.return_value = 'NEW_PROJECT_ID' mock_uuid.return_value = 'NEW_CLIENT_ID' sot = message.Message(**FAKE1) sot.claim_id = 'CLAIM_ID' sot._translate_response = mock.Mock() sot.delete(sess) url = 'queues/{queue}/messages/{message}?claim_id={cid}'.format( queue=FAKE1['queue_name'], message=FAKE1['id'], cid='CLAIM_ID', ) headers = { 'Client-ID': 'NEW_CLIENT_ID', 'X-PROJECT-ID': 'NEW_PROJECT_ID', } sess.delete.assert_called_with(url, headers=headers) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp, has_body=False) def test_delete_client_id_project_id_exist(self): sess = mock.Mock() resp = mock.Mock() sess.delete.return_value = resp sot = message.Message(**FAKE2) sot.claim_id = None sot._translate_response = mock.Mock() sot.delete(sess) url = 'queues/{queue}/messages/{message}'.format( queue=FAKE2['queue_name'], message=FAKE2['id'], ) headers = { 'Client-ID': 'OLD_CLIENT_ID', 'X-PROJECT-ID': 'OLD_PROJECT_ID', } sess.delete.assert_called_with(url, headers=headers) sot._translate_response.assert_called_once_with(resp, has_body=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/message/v2/test_proxy.py0000664000175000017500000002335300000000000025112 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.message.v2 import _proxy from openstack.message.v2 import claim from openstack.message.v2 import message from openstack.message.v2 import queue from openstack.message.v2 import subscription from openstack import proxy as proxy_base from openstack.tests.unit import test_proxy_base QUEUE_NAME = 'test_queue' class TestMessageProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) class TestMessageQueue(TestMessageProxy): def test_queue_create(self): self.verify_create(self.proxy.create_queue, queue.Queue) def test_queue_get(self): self.verify_get(self.proxy.get_queue, queue.Queue) self.verify_get_overrided( self.proxy, queue.Queue, 'openstack.message.v2.queue.Queue' ) def test_queues(self): self.verify_list(self.proxy.queues, queue.Queue) def test_queue_delete(self): self.verify_delete(self.proxy.delete_queue, queue.Queue, False) def test_queue_delete_ignore(self): self.verify_delete(self.proxy.delete_queue, queue.Queue, True) class TestMessageMessage(TestMessageProxy): @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_message_post(self, mock_get_resource): message_obj = message.Message(queue_name="test_queue") mock_get_resource.return_value = message_obj self._verify( "openstack.message.v2.message.Message.post", self.proxy.post_message, method_args=["test_queue", ["msg1", "msg2"]], expected_args=[self.proxy, ["msg1", "msg2"]], ) mock_get_resource.assert_called_once_with( message.Message, None, queue_name="test_queue" ) @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_message_get(self, mock_get_resource): mock_get_resource.return_value = "resource_or_id" self._verify( "openstack.proxy.Proxy._get", self.proxy.get_message, method_args=["test_queue", "resource_or_id"], expected_args=[message.Message, "resource_or_id"], ) mock_get_resource.assert_called_once_with( message.Message, "resource_or_id", queue_name="test_queue" ) self.verify_get_overrided( self.proxy, message.Message, 'openstack.message.v2.message.Message' ) def test_messages(self): self.verify_list( self.proxy.messages, message.Message, method_kwargs={"queue_name": "test_queue"}, expected_kwargs={"queue_name": "test_queue"}, ) @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_message_delete(self, mock_get_resource): fake_message = mock.Mock() fake_message.id = "message_id" mock_get_resource.return_value = fake_message self._verify( "openstack.proxy.Proxy._delete", self.proxy.delete_message, method_args=["test_queue", "resource_or_id", None, False], expected_args=[message.Message, fake_message], expected_kwargs={"ignore_missing": False}, ) self.assertIsNone(fake_message.claim_id) mock_get_resource.assert_called_once_with( message.Message, "resource_or_id", queue_name="test_queue" ) @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_message_delete_claimed(self, mock_get_resource): fake_message = mock.Mock() fake_message.id = "message_id" mock_get_resource.return_value = fake_message self._verify( "openstack.proxy.Proxy._delete", self.proxy.delete_message, method_args=["test_queue", "resource_or_id", "claim_id", False], expected_args=[message.Message, fake_message], expected_kwargs={"ignore_missing": False}, ) self.assertEqual("claim_id", fake_message.claim_id) mock_get_resource.assert_called_once_with( message.Message, "resource_or_id", queue_name="test_queue" ) @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_message_delete_ignore(self, mock_get_resource): fake_message = mock.Mock() fake_message.id = "message_id" mock_get_resource.return_value = fake_message self._verify( "openstack.proxy.Proxy._delete", self.proxy.delete_message, method_args=["test_queue", "resource_or_id", None, True], expected_args=[message.Message, fake_message], expected_kwargs={"ignore_missing": True}, ) self.assertIsNone(fake_message.claim_id) mock_get_resource.assert_called_once_with( message.Message, "resource_or_id", queue_name="test_queue" ) class TestMessageSubscription(TestMessageProxy): def test_subscription_create(self): self._verify( "openstack.message.v2.subscription.Subscription.create", self.proxy.create_subscription, method_args=["test_queue"], expected_args=[self.proxy], expected_kwargs={"base_path": None}, ) @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_subscription_get(self, mock_get_resource): mock_get_resource.return_value = "resource_or_id" self._verify( "openstack.proxy.Proxy._get", self.proxy.get_subscription, method_args=["test_queue", "resource_or_id"], expected_args=[subscription.Subscription, "resource_or_id"], ) mock_get_resource.assert_called_once_with( subscription.Subscription, "resource_or_id", queue_name="test_queue", ) self.verify_get_overrided( self.proxy, subscription.Subscription, 'openstack.message.v2.subscription.Subscription', ) def test_subscriptions(self): self.verify_list( self.proxy.subscriptions, subscription.Subscription, method_kwargs={"queue_name": "test_queue"}, expected_kwargs={"queue_name": "test_queue"}, ) @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_subscription_delete(self, mock_get_resource): mock_get_resource.return_value = "test_subscription" self.verify_delete( self.proxy.delete_subscription, subscription.Subscription, ignore_missing=False, method_args=["test_queue", "resource_or_id"], expected_args=["test_subscription"], ) mock_get_resource.assert_called_once_with( subscription.Subscription, "resource_or_id", queue_name="test_queue", ) @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_subscription_delete_ignore(self, mock_get_resource): mock_get_resource.return_value = "test_subscription" self.verify_delete( self.proxy.delete_subscription, subscription.Subscription, ignore_missing=True, method_args=["test_queue", "resource_or_id"], expected_args=["test_subscription"], ) mock_get_resource.assert_called_once_with( subscription.Subscription, "resource_or_id", queue_name="test_queue", ) class TestMessageClaim(TestMessageProxy): def test_claim_create(self): self._verify( "openstack.message.v2.claim.Claim.create", self.proxy.create_claim, method_args=["test_queue"], expected_args=[self.proxy], expected_kwargs={"base_path": None}, ) def test_claim_get(self): self._verify( "openstack.proxy.Proxy._get", self.proxy.get_claim, method_args=["test_queue", "resource_or_id"], expected_args=[claim.Claim, "resource_or_id"], expected_kwargs={"queue_name": "test_queue"}, ) self.verify_get_overrided( self.proxy, claim.Claim, 'openstack.message.v2.claim.Claim' ) def test_claim_update(self): self._verify( "openstack.proxy.Proxy._update", self.proxy.update_claim, method_args=["test_queue", "resource_or_id"], method_kwargs={"k1": "v1"}, expected_args=[claim.Claim, "resource_or_id"], expected_kwargs={"queue_name": "test_queue", "k1": "v1"}, ) def test_claim_delete(self): self.verify_delete( self.proxy.delete_claim, claim.Claim, ignore_missing=False, method_args=["test_queue", "test_claim"], expected_args=["test_claim"], expected_kwargs={ "queue_name": "test_queue", "ignore_missing": False, }, ) def test_claim_delete_ignore(self): self.verify_delete( self.proxy.delete_claim, claim.Claim, ignore_missing=True, method_args=["test_queue", "test_claim"], expected_args=["test_claim"], expected_kwargs={ "queue_name": "test_queue", "ignore_missing": True, }, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/message/v2/test_queue.py0000664000175000017500000001360000000000000025047 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid from openstack.message.v2 import queue from openstack.tests.unit import base FAKE1 = { 'name': 'test_queue', 'description': 'Queue used for test.', '_default_message_ttl': 3600, '_max_messages_post_size': 262144, } FAKE2 = { 'name': 'test_queue', 'description': 'Queue used for test.', '_default_message_ttl': 3600, '_max_messages_post_size': 262144, 'client_id': 'OLD_CLIENT_ID', 'project_id': 'OLD_PROJECT_ID', } class TestQueue(base.TestCase): def test_basic(self): sot = queue.Queue() self.assertEqual('queues', sot.resources_key) self.assertEqual('/queues', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = queue.Queue.new(**FAKE2) self.assertEqual(FAKE1['description'], sot.description) self.assertEqual(FAKE1['name'], sot.name) self.assertEqual(FAKE1['name'], sot.id) self.assertEqual( FAKE1['_default_message_ttl'], sot.default_message_ttl ) self.assertEqual( FAKE1['_max_messages_post_size'], sot.max_messages_post_size ) self.assertEqual(FAKE2['client_id'], sot.client_id) self.assertEqual(FAKE2['project_id'], sot.project_id) @mock.patch.object(uuid, 'uuid4') def test_create(self, mock_uuid): sess = mock.Mock() resp = mock.Mock() sess.put.return_value = resp sess.get_project_id.return_value = 'NEW_PROJECT_ID' mock_uuid.return_value = 'NEW_CLIENT_ID' sot = queue.Queue(**FAKE1) sot._translate_response = mock.Mock() res = sot.create(sess) url = 'queues/%s' % FAKE1['name'] headers = { 'Client-ID': 'NEW_CLIENT_ID', 'X-PROJECT-ID': 'NEW_PROJECT_ID', } sess.put.assert_called_with(url, headers=headers, json=FAKE1) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp, has_body=False) self.assertEqual(sot, res) def test_create_client_id_project_id_exist(self): sess = mock.Mock() resp = mock.Mock() sess.put.return_value = resp sot = queue.Queue(**FAKE2) sot._translate_response = mock.Mock() res = sot.create(sess) url = 'queues/%s' % FAKE2['name'] headers = { 'Client-ID': 'OLD_CLIENT_ID', 'X-PROJECT-ID': 'OLD_PROJECT_ID', } sess.put.assert_called_with(url, headers=headers, json=FAKE1) sot._translate_response.assert_called_once_with(resp, has_body=False) self.assertEqual(sot, res) @mock.patch.object(uuid, 'uuid4') def test_get(self, mock_uuid): sess = mock.Mock() resp = mock.Mock() sess.get.return_value = resp sess.get_project_id.return_value = 'NEW_PROJECT_ID' mock_uuid.return_value = 'NEW_CLIENT_ID' sot = queue.Queue(**FAKE1) sot._translate_response = mock.Mock() res = sot.fetch(sess) url = 'queues/%s' % FAKE1['name'] headers = { 'Client-ID': 'NEW_CLIENT_ID', 'X-PROJECT-ID': 'NEW_PROJECT_ID', } sess.get.assert_called_with(url, headers=headers, skip_cache=False) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp) self.assertEqual(sot, res) def test_get_client_id_project_id_exist(self): sess = mock.Mock() resp = mock.Mock() sess.get.return_value = resp sot = queue.Queue(**FAKE2) sot._translate_response = mock.Mock() res = sot.fetch(sess) url = 'queues/%s' % FAKE2['name'] headers = { 'Client-ID': 'OLD_CLIENT_ID', 'X-PROJECT-ID': 'OLD_PROJECT_ID', } sess.get.assert_called_with(url, headers=headers, skip_cache=False) sot._translate_response.assert_called_once_with(resp) self.assertEqual(sot, res) @mock.patch.object(uuid, 'uuid4') def test_delete(self, mock_uuid): sess = mock.Mock() resp = mock.Mock() sess.delete.return_value = resp sess.get_project_id.return_value = 'NEW_PROJECT_ID' mock_uuid.return_value = 'NEW_CLIENT_ID' sot = queue.Queue(**FAKE1) sot._translate_response = mock.Mock() sot.delete(sess) url = 'queues/%s' % FAKE1['name'] headers = { 'Client-ID': 'NEW_CLIENT_ID', 'X-PROJECT-ID': 'NEW_PROJECT_ID', } sess.delete.assert_called_with(url, headers=headers) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp, has_body=False) def test_delete_client_id_project_id_exist(self): sess = mock.Mock() resp = mock.Mock() sess.delete.return_value = resp sot = queue.Queue(**FAKE2) sot._translate_response = mock.Mock() sot.delete(sess) url = 'queues/%s' % FAKE2['name'] headers = { 'Client-ID': 'OLD_CLIENT_ID', 'X-PROJECT-ID': 'OLD_PROJECT_ID', } sess.delete.assert_called_with(url, headers=headers) sot._translate_response.assert_called_once_with(resp, has_body=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/message/v2/test_subscription.py0000664000175000017500000001553500000000000026460 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock import uuid from openstack.message.v2 import subscription from openstack.tests.unit import base FAKE1 = { "age": 1632, "id": "576b54963990b48c644bb7e7", "subscriber": "http://10.229.49.117:5679", "subscription_id": "576b54963990b48c644bb7e7", "source": "test", "ttl": 3600, "options": {"name": "test"}, "queue_name": "queue1", } FAKE2 = { "age": 1632, "id": "576b54963990b48c644bb7e7", "subscriber": "http://10.229.49.117:5679", "subscription_id": "576b54963990b48c644bb7e7", "source": "test", "ttl": 3600, "options": {"name": "test"}, "queue_name": "queue1", "client_id": "OLD_CLIENT_ID", "project_id": "OLD_PROJECT_ID", } class TestSubscription(base.TestCase): def test_basic(self): sot = subscription.Subscription() self.assertEqual("subscriptions", sot.resources_key) self.assertEqual("/queues/%(queue_name)s/subscriptions", sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = subscription.Subscription.new(**FAKE2) self.assertEqual(FAKE2["age"], sot.age) self.assertEqual(FAKE2["id"], sot.id) self.assertEqual(FAKE2["options"], sot.options) self.assertEqual(FAKE2["source"], sot.source) self.assertEqual(FAKE2["subscriber"], sot.subscriber) self.assertEqual(FAKE2["subscription_id"], sot.subscription_id) self.assertEqual(FAKE2["ttl"], sot.ttl) self.assertEqual(FAKE2["queue_name"], sot.queue_name) self.assertEqual(FAKE2["client_id"], sot.client_id) self.assertEqual(FAKE2["project_id"], sot.project_id) @mock.patch.object(uuid, "uuid4") def test_create(self, mock_uuid): sess = mock.Mock() resp = mock.Mock() sess.post.return_value = resp sess.get_project_id.return_value = "NEW_PROJECT_ID" mock_uuid.return_value = "NEW_CLIENT_ID" FAKE = copy.deepcopy(FAKE1) sot = subscription.Subscription(**FAKE1) sot._translate_response = mock.Mock() res = sot.create(sess) url = "/queues/{queue}/subscriptions".format( queue=FAKE.pop("queue_name") ) headers = { "Client-ID": "NEW_CLIENT_ID", "X-PROJECT-ID": "NEW_PROJECT_ID", } sess.post.assert_called_once_with(url, headers=headers, json=FAKE) sess.get_project_id.assert_called_once_with() self.assertEqual(sot, res) def test_create_client_id_project_id_exist(self): sess = mock.Mock() resp = mock.Mock() sess.post.return_value = resp FAKE = copy.deepcopy(FAKE2) sot = subscription.Subscription(**FAKE2) sot._translate_response = mock.Mock() res = sot.create(sess) url = "/queues/{queue}/subscriptions".format( queue=FAKE.pop("queue_name") ) headers = { "Client-ID": FAKE.pop("client_id"), "X-PROJECT-ID": FAKE.pop("project_id"), } sess.post.assert_called_once_with(url, headers=headers, json=FAKE) self.assertEqual(sot, res) @mock.patch.object(uuid, "uuid4") def test_get(self, mock_uuid): sess = mock.Mock() resp = mock.Mock() sess.get.return_value = resp sess.get_project_id.return_value = "NEW_PROJECT_ID" mock_uuid.return_value = "NEW_CLIENT_ID" sot = subscription.Subscription(**FAKE1) sot._translate_response = mock.Mock() res = sot.fetch(sess) url = "queues/{queue}/subscriptions/{subscription}".format( queue=FAKE1["queue_name"], subscription=FAKE1["id"], ) headers = { "Client-ID": "NEW_CLIENT_ID", "X-PROJECT-ID": "NEW_PROJECT_ID", } sess.get.assert_called_with(url, headers=headers, skip_cache=False) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp) self.assertEqual(sot, res) def test_get_client_id_project_id_exist(self): sess = mock.Mock() resp = mock.Mock() sess.get.return_value = resp sot = subscription.Subscription(**FAKE2) sot._translate_response = mock.Mock() res = sot.fetch(sess) url = "queues/{queue}/subscriptions/{subscription}".format( queue=FAKE2["queue_name"], subscription=FAKE2["id"], ) headers = { "Client-ID": "OLD_CLIENT_ID", "X-PROJECT-ID": "OLD_PROJECT_ID", } sess.get.assert_called_with(url, headers=headers, skip_cache=False) sot._translate_response.assert_called_once_with(resp) self.assertEqual(sot, res) @mock.patch.object(uuid, "uuid4") def test_delete(self, mock_uuid): sess = mock.Mock() resp = mock.Mock() sess.delete.return_value = resp sess.get_project_id.return_value = "NEW_PROJECT_ID" mock_uuid.return_value = "NEW_CLIENT_ID" sot = subscription.Subscription(**FAKE1) sot._translate_response = mock.Mock() sot.delete(sess) url = "queues/{queue}/subscriptions/{subscription}".format( queue=FAKE1["queue_name"], subscription=FAKE1["id"], ) headers = { "Client-ID": "NEW_CLIENT_ID", "X-PROJECT-ID": "NEW_PROJECT_ID", } sess.delete.assert_called_with(url, headers=headers) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp, has_body=False) def test_delete_client_id_project_id_exist(self): sess = mock.Mock() resp = mock.Mock() sess.delete.return_value = resp sot = subscription.Subscription(**FAKE2) sot._translate_response = mock.Mock() sot.delete(sess) url = "queues/{queue}/subscriptions/{subscription}".format( queue=FAKE2["queue_name"], subscription=FAKE2["id"], ) headers = { "Client-ID": "OLD_CLIENT_ID", "X-PROJECT-ID": "OLD_PROJECT_ID", } sess.delete.assert_called_with(url, headers=headers) sot._translate_response.assert_called_once_with(resp, has_body=False) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.477425 openstacksdk-4.0.0/openstack/tests/unit/network/0000775000175000017500000000000000000000000022030 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/__init__.py0000664000175000017500000000000000000000000024127 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/test_version.py0000664000175000017500000000257500000000000025137 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network import version from openstack.tests.unit import base IDENTIFIER = 'v2.0' EXAMPLE = { 'id': IDENTIFIER, 'links': '2', 'status': '3', } class TestVersion(base.TestCase): def test_basic(self): sot = version.Version() self.assertEqual('version', sot.resource_key) self.assertEqual('versions', sot.resources_key) self.assertEqual('/', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = version.Version(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['status'], sot.status) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.493433 openstacksdk-4.0.0/openstack/tests/unit/network/v2/0000775000175000017500000000000000000000000022357 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/__init__.py0000664000175000017500000000000000000000000024456 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_address_group.py0000664000175000017500000000400400000000000026627 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import address_group from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'name': '1', 'description': '2', 'project_id': '3', 'addresses': ['10.0.0.1/32'], } class TestAddressGroup(base.TestCase): def test_basic(self): sot = address_group.AddressGroup() self.assertEqual('address_group', sot.resource_key) self.assertEqual('address_groups', sot.resources_key) self.assertEqual('/address-groups', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { "name": "name", "description": "description", "project_id": "project_id", "sort_key": "sort_key", "sort_dir": "sort_dir", "limit": "limit", "marker": "marker", }, sot._query_mapping._mapping, ) def test_make_it(self): sot = address_group.AddressGroup(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertCountEqual(EXAMPLE['addresses'], sot.addresses) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_address_scope.py0000664000175000017500000000312600000000000026610 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import address_scope from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'ip_version': 4, 'name': '1', 'shared': True, 'project_id': '2', } class TestAddressScope(base.TestCase): def test_basic(self): sot = address_scope.AddressScope() self.assertEqual('address_scope', sot.resource_key) self.assertEqual('address_scopes', sot.resources_key) self.assertEqual('/address-scopes', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = address_scope.AddressScope(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['ip_version'], sot.ip_version) self.assertEqual(EXAMPLE['name'], sot.name) self.assertTrue(sot.is_shared) self.assertEqual(EXAMPLE['project_id'], sot.project_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_agent.py0000664000175000017500000001420400000000000025067 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.network.v2 import agent from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'admin_state_up': True, 'agent_type': 'Test Agent', 'alive': True, 'availability_zone': 'az1', 'binary': 'test-binary', 'configurations': {'attr1': 'value1', 'attr2': 'value2'}, 'created_at': '2016-03-09T12:14:57.233772', 'description': 'test description', 'heartbeat_timestamp': '2016-08-09T12:14:57.233772', 'host': 'test-host', 'id': IDENTIFIER, 'resources_synced': False, 'started_at': '2016-07-09T12:14:57.233772', 'topic': 'test-topic', 'ha_state': 'active', } class TestAgent(base.TestCase): def test_basic(self): sot = agent.Agent() self.assertEqual('agent', sot.resource_key) self.assertEqual('agents', sot.resources_key) self.assertEqual('/agents', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = agent.Agent(**EXAMPLE) self.assertTrue(sot.is_admin_state_up) self.assertEqual(EXAMPLE['agent_type'], sot.agent_type) self.assertTrue(sot.is_alive) self.assertEqual(EXAMPLE['availability_zone'], sot.availability_zone) self.assertEqual(EXAMPLE['binary'], sot.binary) self.assertEqual(EXAMPLE['configurations'], sot.configuration) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['heartbeat_timestamp'], sot.last_heartbeat_at) self.assertEqual(EXAMPLE['host'], sot.host) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['resources_synced'], sot.resources_synced) self.assertEqual(EXAMPLE['started_at'], sot.started_at) self.assertEqual(EXAMPLE['topic'], sot.topic) self.assertEqual(EXAMPLE['ha_state'], sot.ha_state) def test_add_agent_to_network(self): # Add agent to network net = agent.Agent(**EXAMPLE) response = mock.Mock() response.body = {'network_id': '1'} response.json = mock.Mock(return_value=response.body) sess = mock.Mock() sess.post = mock.Mock(return_value=response) body = {'network_id': '1'} self.assertEqual(response.body, net.add_agent_to_network(sess, **body)) url = 'agents/IDENTIFIER/dhcp-networks' sess.post.assert_called_with(url, json=body) def test_remove_agent_from_network(self): # Remove agent from agent net = agent.Agent(**EXAMPLE) sess = mock.Mock() network_id = {} self.assertIsNone(net.remove_agent_from_network(sess, network_id)) body = {'network_id': {}} sess.delete.assert_called_with( 'agents/IDENTIFIER/dhcp-networks/', json=body ) def test_add_router_to_agent(self): # Add router to agent sot = agent.Agent(**EXAMPLE) response = mock.Mock() response.body = {'router_id': '1'} response.json = mock.Mock(return_value=response.body) sess = mock.Mock() sess.post = mock.Mock(return_value=response) router_id = '1' self.assertEqual( response.body, sot.add_router_to_agent(sess, router_id) ) body = {'router_id': router_id} url = 'agents/IDENTIFIER/l3-routers' sess.post.assert_called_with(url, json=body) def test_remove_router_from_agent(self): # Remove router from agent sot = agent.Agent(**EXAMPLE) sess = mock.Mock() router_id = {} self.assertIsNone(sot.remove_router_from_agent(sess, router_id)) body = {'router_id': {}} sess.delete.assert_called_with( 'agents/IDENTIFIER/l3-routers/', json=body ) def test_get_bgp_speakers_hosted_by_dragent(self): sot = agent.Agent(**EXAMPLE) sess = mock.Mock() response = mock.Mock() response.body = { 'bgp_speakers': [{'name': 'bgp_speaker_1', 'ip_version': 4}] } response.json = mock.Mock(return_value=response.body) response.status_code = 200 sess.get = mock.Mock(return_value=response) resp = sot.get_bgp_speakers_hosted_by_dragent(sess) self.assertEqual(resp, response.body) sess.get.assert_called_with('agents/IDENTIFIER/bgp-drinstances') class TestNetworkHostingDHCPAgent(base.TestCase): def test_basic(self): net = agent.NetworkHostingDHCPAgent() self.assertEqual('agent', net.resource_key) self.assertEqual('agents', net.resources_key) self.assertEqual('/networks/%(network_id)s/dhcp-agents', net.base_path) self.assertEqual('dhcp-agent', net.resource_name) self.assertFalse(net.allow_create) self.assertTrue(net.allow_fetch) self.assertFalse(net.allow_commit) self.assertFalse(net.allow_delete) self.assertTrue(net.allow_list) class TestRouterL3Agent(base.TestCase): def test_basic(self): sot = agent.RouterL3Agent() self.assertEqual('agent', sot.resource_key) self.assertEqual('agents', sot.resources_key) self.assertEqual('/routers/%(router_id)s/l3-agents', sot.base_path) self.assertEqual('l3-agent', sot.resource_name) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_retrieve) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_auto_allocated_topology.py0000664000175000017500000000252000000000000030703 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import auto_allocated_topology from openstack.tests.unit import base EXAMPLE = { 'project_id': '1', 'dry_run': False, } class TestAutoAllocatedTopology(base.TestCase): def test_basic(self): topo = auto_allocated_topology.AutoAllocatedTopology self.assertEqual('auto_allocated_topology', topo.resource_key) self.assertEqual('/auto-allocated-topology', topo.base_path) self.assertFalse(topo.allow_create) self.assertTrue(topo.allow_fetch) self.assertFalse(topo.allow_commit) self.assertTrue(topo.allow_delete) self.assertFalse(topo.allow_list) def test_make_it(self): topo = auto_allocated_topology.AutoAllocatedTopology(**EXAMPLE) self.assertEqual(EXAMPLE['project_id'], topo.project_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_availability_zone.py0000664000175000017500000000306000000000000027474 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import availability_zone from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'name': '1', 'resource': '2', 'state': '3', } class TestAvailabilityZone(base.TestCase): def test_basic(self): sot = availability_zone.AvailabilityZone() self.assertEqual('availability_zone', sot.resource_key) self.assertEqual('availability_zones', sot.resources_key) self.assertEqual('/availability_zones', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = availability_zone.AvailabilityZone(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['resource'], sot.resource) self.assertEqual(EXAMPLE['state'], sot.state) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_bgp_peer.py0000664000175000017500000000353100000000000025555 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import bgp_peer from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'auth_type': 'none', 'remote_as': '1001', 'name': 'bgp-peer', 'peer_ip': '10.0.0.3', 'id': IDENTIFIER, 'project_id': '42', } class TestBgpPeer(base.TestCase): def test_basic(self): sot = bgp_peer.BgpPeer() self.assertEqual('bgp_peer', sot.resource_key) self.assertEqual('bgp_peers', sot.resources_key) self.assertEqual('/bgp-peers', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = bgp_peer.BgpPeer(**EXAMPLE) self.assertEqual(EXAMPLE['auth_type'], sot.auth_type) self.assertEqual(EXAMPLE['remote_as'], sot.remote_as) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['peer_ip'], sot.peer_ip) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', }, sot._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_bgp_speaker.py0000664000175000017500000001521200000000000026253 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.network.v2 import bgp_speaker from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'name': 'bgp-speaker', 'peers': [], 'ip_version': 4, 'advertise_floating_ip_host_routes': 'true', 'advertise_tenant_networks': 'true', 'local_as': 1000, 'networks': [], 'project_id': '42', } class TestBgpSpeaker(base.TestCase): def test_basic(self): sot = bgp_speaker.BgpSpeaker() self.assertEqual('bgp_speaker', sot.resource_key) self.assertEqual('bgp_speakers', sot.resources_key) self.assertEqual('/bgp-speakers', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = bgp_speaker.BgpSpeaker(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['ip_version'], sot.ip_version) self.assertEqual( EXAMPLE['advertise_floating_ip_host_routes'], sot.advertise_floating_ip_host_routes, ) self.assertEqual(EXAMPLE['local_as'], sot.local_as) self.assertEqual(EXAMPLE['networks'], sot.networks) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', }, sot._query_mapping._mapping, ) def test_add_bgp_peer(self): sot = bgp_speaker.BgpSpeaker(**EXAMPLE) response = mock.Mock() response.body = {'bgp_peer_id': '101'} response.json = mock.Mock(return_value=response.body) response.status_code = 200 sess = mock.Mock() sess.put = mock.Mock(return_value=response) ret = sot.add_bgp_peer(sess, '101') self.assertIsInstance(ret, dict) self.assertEqual(ret, {'bgp_peer_id': '101'}) body = {'bgp_peer_id': '101'} url = 'bgp-speakers/IDENTIFIER/add_bgp_peer' sess.put.assert_called_with(url, json=body) def test_remove_bgp_peer(self): sot = bgp_speaker.BgpSpeaker(**EXAMPLE) response = mock.Mock() response.body = {'bgp_peer_id': '102'} response.json = mock.Mock(return_value=response.body) response.status_code = 200 sess = mock.Mock() sess.put = mock.Mock(return_value=response) ret = sot.remove_bgp_peer(sess, '102') self.assertIsNone(ret) body = {'bgp_peer_id': '102'} url = 'bgp-speakers/IDENTIFIER/remove_bgp_peer' sess.put.assert_called_with(url, json=body) def test_add_gateway_network(self): sot = bgp_speaker.BgpSpeaker(**EXAMPLE) response = mock.Mock() response.body = {'network_id': 'net_id'} response.json = mock.Mock(return_value=response.body) response.status_code = 200 sess = mock.Mock() sess.put = mock.Mock(return_value=response) ret = sot.add_gateway_network(sess, 'net_id') self.assertIsInstance(ret, dict) self.assertEqual(ret, {'network_id': 'net_id'}) body = {'network_id': 'net_id'} url = 'bgp-speakers/IDENTIFIER/add_gateway_network' sess.put.assert_called_with(url, json=body) def test_remove_gateway_network(self): sot = bgp_speaker.BgpSpeaker(**EXAMPLE) response = mock.Mock() response.body = {'network_id': 'net_id42'} response.json = mock.Mock(return_value=response.body) response.status_code = 200 sess = mock.Mock() sess.put = mock.Mock(return_value=response) ret = sot.remove_gateway_network(sess, 'net_id42') self.assertIsNone(ret) body = {'network_id': 'net_id42'} url = 'bgp-speakers/IDENTIFIER/remove_gateway_network' sess.put.assert_called_with(url, json=body) def test_get_advertised_routes(self): sot = bgp_speaker.BgpSpeaker(**EXAMPLE) response = mock.Mock() response.body = { 'advertised_routes': [ {'cidr': '192.168.10.0/24', 'nexthop': '10.0.0.1'} ] } response.json = mock.Mock(return_value=response.body) response.status_code = 200 sess = mock.Mock() sess.get = mock.Mock(return_value=response) ret = sot.get_advertised_routes(sess) url = 'bgp-speakers/IDENTIFIER/get_advertised_routes' sess.get.assert_called_with(url) self.assertEqual(ret, response.body) def test_get_bgp_dragents(self): sot = bgp_speaker.BgpSpeaker(**EXAMPLE) response = mock.Mock() response.body = { 'agents': [{'binary': 'neutron-bgp-dragent', 'alive': True}] } response.json = mock.Mock(return_value=response.body) response.status_code = 200 sess = mock.Mock() sess.get = mock.Mock(return_value=response) ret = sot.get_bgp_dragents(sess) url = 'bgp-speakers/IDENTIFIER/bgp-dragents' sess.get.assert_called_with(url) self.assertEqual(ret, response.body) def test_add_bgp_speaker_to_dragent(self): sot = bgp_speaker.BgpSpeaker(**EXAMPLE) agent_id = '123-42' response = mock.Mock() response.status_code = 201 sess = mock.Mock() sess.post = mock.Mock(return_value=response) self.assertIsNone(sot.add_bgp_speaker_to_dragent(sess, agent_id)) body = {'bgp_speaker_id': sot.id} url = 'agents/%s/bgp-drinstances' % agent_id sess.post.assert_called_with(url, json=body) def test_remove_bgp_speaker_from_dragent(self): sot = bgp_speaker.BgpSpeaker(**EXAMPLE) agent_id = '123-42' response = mock.Mock() response.status_code = 204 sess = mock.Mock() sess.delete = mock.Mock(return_value=response) self.assertIsNone(sot.remove_bgp_speaker_from_dragent(sess, agent_id)) url = f'agents/{agent_id}/bgp-drinstances/{IDENTIFIER}' sess.delete.assert_called_with(url) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_bgpvpn.py0000664000175000017500000000770600000000000025276 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import bgpvpn from openstack.network.v2 import bgpvpn_network_association from openstack.network.v2 import bgpvpn_port_association from openstack.network.v2 import bgpvpn_router_association from openstack.network.v2 import network from openstack.network.v2 import port from openstack.network.v2 import router from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' NET_ID = 'NET_ID' PORT_ID = 'PORT_ID' ROUTER_ID = 'ROUTER_ID' EXAMPLE = { 'id': IDENTIFIER, 'name': 'bgpvpn', 'project_id': '42', 'route_distinguishers': ['64512:1777', '64512:1888', '64512:1999'], 'route_targets': '64512:1444', 'import_targets': '64512:1555', 'export_targets': '64512:1666', } class TestBgpVpn(base.TestCase): def test_basic(self): sot = bgpvpn.BgpVpn() self.assertEqual('bgpvpn', sot.resource_key) self.assertEqual('bgpvpns', sot.resources_key) self.assertEqual('/bgpvpn/bgpvpns', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = bgpvpn.BgpVpn(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual( EXAMPLE['route_distinguishers'], sot.route_distinguishers ) self.assertEqual(EXAMPLE['route_targets'], sot.route_targets) self.assertEqual(EXAMPLE['import_targets'], sot.import_targets) self.assertEqual(EXAMPLE['export_targets'], sot.export_targets) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'local_pref': 'local_pref', 'name': 'name', 'networks': 'networks', 'routers': 'routers', 'ports': 'ports', 'project_id': 'project_id', 'type': 'type', 'vni': 'vni', }, sot._query_mapping._mapping, ) def test_create_bgpvpn_network_association(self): test_bpgvpn = bgpvpn.BgpVpn(**EXAMPLE) test_net = network.Network(**{'name': 'foo_net', 'id': NET_ID}) sot = bgpvpn_network_association.BgpVpnNetworkAssociation( bgpvn_id=test_bpgvpn.id, network_id=test_net.id ) self.assertEqual(test_net.id, sot.network_id) self.assertEqual(test_bpgvpn.id, sot.bgpvn_id) def test_create_bgpvpn_port_association(self): test_bpgvpn = bgpvpn.BgpVpn(**EXAMPLE) test_port = port.Port( **{'name': 'foo_port', 'id': PORT_ID, 'network_id': NET_ID} ) sot = bgpvpn_port_association.BgpVpnPortAssociation( bgpvn_id=test_bpgvpn.id, port_id=test_port.id ) self.assertEqual(test_port.id, sot.port_id) self.assertEqual(test_bpgvpn.id, sot.bgpvn_id) def test_create_bgpvpn_router_association(self): test_bpgvpn = bgpvpn.BgpVpn(**EXAMPLE) test_router = router.Router(**{'name': 'foo_port'}) sot = bgpvpn_router_association.BgpVpnRouterAssociation( bgpvn_id=test_bpgvpn.id, router_id=test_router.id ) self.assertEqual(test_router.id, sot.router_id) self.assertEqual(test_bpgvpn.id, sot.bgpvn_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_default_security_group_rule.py0000664000175000017500000000660700000000000031617 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import default_security_group_rule from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'description': '1', 'direction': '2', 'ethertype': '3', 'id': IDENTIFIER, 'port_range_max': 4, 'port_range_min': 5, 'protocol': '6', 'remote_group_id': '7', 'remote_ip_prefix': '8', 'remote_address_group_id': '13', 'used_in_default_sg': True, 'used_in_non_default_sg': True, } class TestDefaultSecurityGroupRule(base.TestCase): def test_basic(self): sot = default_security_group_rule.DefaultSecurityGroupRule() self.assertEqual('default_security_group_rule', sot.resource_key) self.assertEqual('default_security_group_rules', sot.resources_key) self.assertEqual('/default-security-group-rules', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { 'description': 'description', 'direction': 'direction', 'id': 'id', 'ether_type': 'ethertype', 'limit': 'limit', 'marker': 'marker', 'port_range_max': 'port_range_max', 'port_range_min': 'port_range_min', 'protocol': 'protocol', 'remote_group_id': 'remote_group_id', 'remote_address_group_id': 'remote_address_group_id', 'remote_ip_prefix': 'remote_ip_prefix', 'sort_dir': 'sort_dir', 'sort_key': 'sort_key', 'used_in_default_sg': 'used_in_default_sg', 'used_in_non_default_sg': 'used_in_non_default_sg', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = default_security_group_rule.DefaultSecurityGroupRule(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['direction'], sot.direction) self.assertEqual(EXAMPLE['ethertype'], sot.ether_type) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['port_range_max'], sot.port_range_max) self.assertEqual(EXAMPLE['port_range_min'], sot.port_range_min) self.assertEqual(EXAMPLE['protocol'], sot.protocol) self.assertEqual(EXAMPLE['remote_group_id'], sot.remote_group_id) self.assertEqual( EXAMPLE['remote_address_group_id'], sot.remote_address_group_id ) self.assertEqual(EXAMPLE['remote_ip_prefix'], sot.remote_ip_prefix) self.assertEqual(EXAMPLE['used_in_default_sg'], sot.used_in_default_sg) self.assertEqual( EXAMPLE['used_in_non_default_sg'], sot.used_in_non_default_sg ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_extension.py0000664000175000017500000000322100000000000026002 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import extension from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'alias': '1', 'description': '2', 'links': [], 'name': '4', 'updated': '2016-03-09T12:14:57.233772', } class TestExtension(base.TestCase): def test_basic(self): sot = extension.Extension() self.assertEqual('extension', sot.resource_key) self.assertEqual('extensions', sot.resources_key) self.assertEqual('/extensions', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = extension.Extension(**EXAMPLE) self.assertEqual(EXAMPLE['alias'], sot.id) self.assertEqual(EXAMPLE['alias'], sot.alias) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['updated'], sot.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_firewall_group.py0000664000175000017500000000420100000000000027006 0ustar00zuulzuul00000000000000# Copyright (c) 2018 China Telecom Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from openstack.network.v2 import firewall_group IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'description': '1', 'name': '2', 'egress_firewall_policy_id': '3', 'ingress_firewall_policy_id': '4', 'shared': True, 'status': 'ACTIVE', 'ports': ['5', '6'], 'project_id': '7', } class TestFirewallGroup(testtools.TestCase): def test_basic(self): sot = firewall_group.FirewallGroup() self.assertEqual('firewall_group', sot.resource_key) self.assertEqual('firewall_groups', sot.resources_key) self.assertEqual('/fwaas/firewall_groups', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = firewall_group.FirewallGroup(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual( EXAMPLE['egress_firewall_policy_id'], sot.egress_firewall_policy_id ) self.assertEqual( EXAMPLE['ingress_firewall_policy_id'], sot.ingress_firewall_policy_id, ) self.assertEqual(EXAMPLE['shared'], sot.shared) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(list, type(sot.ports)) self.assertEqual(EXAMPLE['ports'], sot.ports) self.assertEqual(EXAMPLE['project_id'], sot.project_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_firewall_policy.py0000664000175000017500000000353700000000000027164 0ustar00zuulzuul00000000000000# Copyright (c) 2018 China Telecom Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from openstack.network.v2 import firewall_policy EXAMPLE = { 'description': '1', 'name': '2', 'firewall_rules': [ 'a30b0ec2-a468-4b1c-8dbf-928ded2a57a8', '8d562e98-24f3-46e1-bbf3-d9347c0a67ee', ], 'shared': True, 'project_id': '4', } class TestFirewallPolicy(testtools.TestCase): def test_basic(self): sot = firewall_policy.FirewallPolicy() self.assertEqual('firewall_policy', sot.resource_key) self.assertEqual('firewall_policies', sot.resources_key) self.assertEqual('/fwaas/firewall_policies', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = firewall_policy.FirewallPolicy(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['firewall_rules'], sot.firewall_rules) self.assertEqual(EXAMPLE['shared'], sot.shared) self.assertEqual(list, type(sot.firewall_rules)) self.assertEqual(EXAMPLE['project_id'], sot.project_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_firewall_rule.py0000664000175000017500000000454400000000000026633 0ustar00zuulzuul00000000000000# Copyright (c) 2018 China Telecom Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from openstack.network.v2 import firewall_rule EXAMPLE = { 'action': 'allow', 'description': '1', 'destination_ip_address': '10.0.0.2/24', 'destination_port': '2', 'name': '3', 'enabled': True, 'ip_version': 4, 'protocol': 'tcp', 'shared': True, 'source_ip_address': '10.0.1.2/24', 'source_port': '5', 'project_id': '6', } class TestFirewallRule(testtools.TestCase): def test_basic(self): sot = firewall_rule.FirewallRule() self.assertEqual('firewall_rule', sot.resource_key) self.assertEqual('firewall_rules', sot.resources_key) self.assertEqual('/fwaas/firewall_rules', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = firewall_rule.FirewallRule(**EXAMPLE) self.assertEqual(EXAMPLE['action'], sot.action) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual( EXAMPLE['destination_ip_address'], sot.destination_ip_address ) self.assertEqual(EXAMPLE['destination_port'], sot.destination_port) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['enabled'], sot.enabled) self.assertEqual(EXAMPLE['ip_version'], sot.ip_version) self.assertEqual(EXAMPLE['protocol'], sot.protocol) self.assertEqual(EXAMPLE['shared'], sot.shared) self.assertEqual(EXAMPLE['source_ip_address'], sot.source_ip_address) self.assertEqual(EXAMPLE['source_port'], sot.source_port) self.assertEqual(EXAMPLE['project_id'], sot.project_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_flavor.py0000664000175000017500000000652700000000000025273 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.network.v2 import flavor from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE_WITH_OPTIONAL = { 'id': IDENTIFIER, 'name': 'test-flavor', 'service_type': 'VPN', 'description': 'VPN flavor', 'enabled': True, 'service_profiles': ['1', '2'], } EXAMPLE = { 'id': IDENTIFIER, 'name': 'test-flavor', 'service_type': 'VPN', } class TestFlavor(base.TestCase): def test_basic(self): flavors = flavor.Flavor() self.assertEqual('flavor', flavors.resource_key) self.assertEqual('flavors', flavors.resources_key) self.assertEqual('/flavors', flavors.base_path) self.assertTrue(flavors.allow_create) self.assertTrue(flavors.allow_fetch) self.assertTrue(flavors.allow_commit) self.assertTrue(flavors.allow_delete) self.assertTrue(flavors.allow_list) def test_make_it(self): flavors = flavor.Flavor(**EXAMPLE) self.assertEqual(EXAMPLE['name'], flavors.name) self.assertEqual(EXAMPLE['service_type'], flavors.service_type) def test_make_it_with_optional(self): flavors = flavor.Flavor(**EXAMPLE_WITH_OPTIONAL) self.assertEqual(EXAMPLE_WITH_OPTIONAL['name'], flavors.name) self.assertEqual( EXAMPLE_WITH_OPTIONAL['service_type'], flavors.service_type ) self.assertEqual( EXAMPLE_WITH_OPTIONAL['description'], flavors.description ) self.assertEqual(EXAMPLE_WITH_OPTIONAL['enabled'], flavors.is_enabled) self.assertEqual( EXAMPLE_WITH_OPTIONAL['service_profiles'], flavors.service_profile_ids, ) def test_associate_flavor_with_service_profile(self): flav = flavor.Flavor(EXAMPLE) response = mock.Mock() response.body = { 'service_profile': {'id': '1'}, } response.json = mock.Mock(return_value=response.body) sess = mock.Mock() sess.post = mock.Mock(return_value=response) flav.id = 'IDENTIFIER' self.assertEqual( response.body, flav.associate_flavor_with_service_profile(sess, '1'), ) url = 'flavors/IDENTIFIER/service_profiles' sess.post.assert_called_with(url, json=response.body) def test_disassociate_flavor_from_service_profile(self): flav = flavor.Flavor(EXAMPLE) response = mock.Mock() response.json = mock.Mock(return_value=response.body) sess = mock.Mock() sess.post = mock.Mock(return_value=response) flav.id = 'IDENTIFIER' self.assertEqual( None, flav.disassociate_flavor_from_service_profile(sess, '1') ) url = 'flavors/IDENTIFIER/service_profiles/1' sess.delete.assert_called_with( url, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_floating_ip.py0000664000175000017500000001170100000000000026263 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.network.v2 import floating_ip from openstack import proxy from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'created_at': '0', 'fixed_ip_address': '1', 'floating_ip_address': '127.0.0.1', 'floating_network_id': '3', 'id': IDENTIFIER, 'port_id': '5', 'qos_policy_id': '51', 'project_id': '6', 'router_id': '7', 'description': '8', 'dns_domain': '9', 'dns_name': '10', 'status': 'ACTIVE', 'revision_number': 12, 'updated_at': '13', 'subnet_id': '14', 'tags': ['15', '16'], } class TestFloatingIP(base.TestCase): def test_basic(self): sot = floating_ip.FloatingIP() self.assertEqual('floatingip', sot.resource_key) self.assertEqual('floatingips', sot.resources_key) self.assertEqual('/floatingips', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = floating_ip.FloatingIP(**EXAMPLE) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['fixed_ip_address'], sot.fixed_ip_address) self.assertEqual( EXAMPLE['floating_ip_address'], sot.floating_ip_address ) self.assertEqual( EXAMPLE['floating_network_id'], sot.floating_network_id ) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['port_id'], sot.port_id) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['router_id'], sot.router_id) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['dns_domain'], sot.dns_domain) self.assertEqual(EXAMPLE['dns_name'], sot.dns_name) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['revision_number'], sot.revision_number) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) self.assertEqual(EXAMPLE['subnet_id'], sot.subnet_id) self.assertEqual(EXAMPLE['tags'], sot.tags) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'description': 'description', 'project_id': 'project_id', 'tenant_id': 'project_id', 'status': 'status', 'port_id': 'port_id', 'subnet_id': 'subnet_id', 'router_id': 'router_id', 'fixed_ip_address': 'fixed_ip_address', 'floating_ip_address': 'floating_ip_address', 'floating_network_id': 'floating_network_id', 'tags': 'tags', 'any_tags': 'tags-any', 'not_tags': 'not-tags', 'not_any_tags': 'not-tags-any', 'sort_dir': 'sort_dir', 'sort_key': 'sort_key', }, sot._query_mapping._mapping, ) def test_find_available(self): mock_session = mock.Mock(spec=proxy.Proxy) mock_session.get_filter = mock.Mock(return_value={}) mock_session.default_microversion = None mock_session.session = self.cloud.session data = {'id': 'one', 'floating_ip_address': '10.0.0.1'} fake_response = mock.Mock() body = {floating_ip.FloatingIP.resources_key: [data]} fake_response.json = mock.Mock(return_value=body) fake_response.status_code = 200 mock_session.get = mock.Mock(return_value=fake_response) result = floating_ip.FloatingIP.find_available(mock_session) self.assertEqual('one', result.id) mock_session.get.assert_called_with( floating_ip.FloatingIP.base_path, headers={'Accept': 'application/json'}, params={}, microversion=None, ) def test_find_available_nada(self): mock_session = mock.Mock(spec=proxy.Proxy) mock_session.default_microversion = None fake_response = mock.Mock() body = {floating_ip.FloatingIP.resources_key: []} fake_response.json = mock.Mock(return_value=body) fake_response.status_code = 200 mock_session.get = mock.Mock(return_value=fake_response) self.assertIsNone(floating_ip.FloatingIP.find_available(mock_session)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_health_monitor.py0000664000175000017500000000440600000000000027010 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import health_monitor from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'admin_state_up': True, 'delay': '2', 'expected_codes': '3', 'http_method': '4', 'id': IDENTIFIER, 'max_retries': '6', 'pools': [{'id': '7'}], 'pool_id': '7', 'project_id': '8', 'timeout': '9', 'type': '10', 'url_path': '11', 'name': '12', } class TestHealthMonitor(base.TestCase): def test_basic(self): sot = health_monitor.HealthMonitor() self.assertEqual('healthmonitor', sot.resource_key) self.assertEqual('healthmonitors', sot.resources_key) self.assertEqual('/lbaas/healthmonitors', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = health_monitor.HealthMonitor(**EXAMPLE) self.assertTrue(sot.is_admin_state_up) self.assertEqual(EXAMPLE['delay'], sot.delay) self.assertEqual(EXAMPLE['expected_codes'], sot.expected_codes) self.assertEqual(EXAMPLE['http_method'], sot.http_method) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['max_retries'], sot.max_retries) self.assertEqual(EXAMPLE['pools'], sot.pool_ids) self.assertEqual(EXAMPLE['pool_id'], sot.pool_id) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['timeout'], sot.timeout) self.assertEqual(EXAMPLE['type'], sot.type) self.assertEqual(EXAMPLE['url_path'], sot.url_path) self.assertEqual(EXAMPLE['name'], sot.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_l3_conntrack_helper.py0000664000175000017500000000311300000000000027705 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import l3_conntrack_helper from openstack.tests.unit import base EXAMPLE = { 'id': 'ct_helper_id', 'protocol': 'udp', 'port': 69, 'helper': 'tftp', } class TestL3ConntrackHelper(base.TestCase): def test_basic(self): sot = l3_conntrack_helper.ConntrackHelper() self.assertEqual('conntrack_helper', sot.resource_key) self.assertEqual('conntrack_helpers', sot.resources_key) self.assertEqual( '/routers/%(router_id)s/conntrack_helpers', sot.base_path ) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = l3_conntrack_helper.ConntrackHelper(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['protocol'], sot.protocol) self.assertEqual(EXAMPLE['port'], sot.port) self.assertEqual(EXAMPLE['helper'], sot.helper) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_listener.py0000664000175000017500000000464600000000000025627 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import listener from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'admin_state_up': True, 'connection_limit': '2', 'default_pool_id': '3', 'description': '4', 'id': IDENTIFIER, 'loadbalancers': [{'id': '6'}], 'loadbalancer_id': '6', 'name': '7', 'project_id': '8', 'protocol': '9', 'protocol_port': '10', 'default_tls_container_ref': '11', 'sni_container_refs': [], } class TestListener(base.TestCase): def test_basic(self): sot = listener.Listener() self.assertEqual('listener', sot.resource_key) self.assertEqual('listeners', sot.resources_key) self.assertEqual('/lbaas/listeners', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = listener.Listener(**EXAMPLE) self.assertTrue(sot.is_admin_state_up) self.assertEqual(EXAMPLE['connection_limit'], sot.connection_limit) self.assertEqual(EXAMPLE['default_pool_id'], sot.default_pool_id) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['loadbalancers'], sot.load_balancer_ids) self.assertEqual(EXAMPLE['loadbalancer_id'], sot.load_balancer_id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['protocol'], sot.protocol) self.assertEqual(EXAMPLE['protocol_port'], sot.protocol_port) self.assertEqual( EXAMPLE['default_tls_container_ref'], sot.default_tls_container_ref ) self.assertEqual(EXAMPLE['sni_container_refs'], sot.sni_container_refs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_load_balancer.py0000664000175000017500000000457600000000000026552 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import load_balancer from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'admin_state_up': True, 'description': '2', 'id': IDENTIFIER, 'listeners': [{'id', '4'}], 'name': '5', 'operating_status': '6', 'provisioning_status': '7', 'project_id': '8', 'vip_address': '9', 'vip_subnet_id': '10', 'vip_port_id': '11', 'provider': '12', 'pools': [{'id', '13'}], } class TestLoadBalancer(base.TestCase): def test_basic(self): sot = load_balancer.LoadBalancer() self.assertEqual('loadbalancer', sot.resource_key) self.assertEqual('loadbalancers', sot.resources_key) self.assertEqual('/lbaas/loadbalancers', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = load_balancer.LoadBalancer(**EXAMPLE) self.assertTrue(sot.is_admin_state_up) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['listeners'], sot.listener_ids) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['operating_status'], sot.operating_status) self.assertEqual( EXAMPLE['provisioning_status'], sot.provisioning_status ) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['vip_address'], sot.vip_address) self.assertEqual(EXAMPLE['vip_subnet_id'], sot.vip_subnet_id) self.assertEqual(EXAMPLE['vip_port_id'], sot.vip_port_id) self.assertEqual(EXAMPLE['provider'], sot.provider) self.assertEqual(EXAMPLE['pools'], sot.pool_ids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_local_ip.py0000664000175000017500000000541200000000000025554 0ustar00zuulzuul00000000000000# Copyright 2021 Huawei, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from openstack.network.v2 import local_ip from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'created_at': '0', 'id': IDENTIFIER, 'name': '1', 'description': '2', 'project_id': '3', 'local_port_id': '4', 'network_id': '5', 'local_ip_address': '127.0.0.1', 'ip_mode': 'translate', 'revision_number': '6', 'updated_at': '7', } class TestLocalIP(base.TestCase): def test_basic(self): sot = local_ip.LocalIP() self.assertEqual('local_ip', sot.resource_key) self.assertEqual('local_ips', sot.resources_key) self.assertEqual('/local_ips', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { "name": "name", "description": "description", "project_id": "project_id", "network_id": "network_id", "local_port_id": "local_port_id", "local_ip_address": "local_ip_address", "ip_mode": "ip_mode", "sort_key": "sort_key", "sort_dir": "sort_dir", "limit": "limit", "marker": "marker", }, sot._query_mapping._mapping, ) def test_make_it(self): sot = local_ip.LocalIP(**EXAMPLE) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['local_port_id'], sot.local_port_id) self.assertEqual(EXAMPLE['network_id'], sot.network_id) self.assertEqual(EXAMPLE['local_ip_address'], sot.local_ip_address) self.assertEqual(EXAMPLE['ip_mode'], sot.ip_mode) self.assertEqual(EXAMPLE['revision_number'], sot.revision_number) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_local_ip_association.py0000664000175000017500000000426100000000000030151 0ustar00zuulzuul00000000000000# Copyright 2021 Huawei, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from openstack.network.v2 import local_ip_association from openstack.tests.unit import base EXAMPLE = { 'local_ip_id': '0', 'local_ip_address': '127.0.0.1', 'fixed_port_id': '1', 'fixed_ip': '127.0.0.2', 'host': '2', } class TestLocalIP(base.TestCase): def test_basic(self): sot = local_ip_association.LocalIPAssociation() self.assertEqual('port_association', sot.resource_key) self.assertEqual('port_associations', sot.resources_key) self.assertEqual( '/local_ips/%(local_ip_id)s/port_associations', sot.base_path ) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { 'fixed_port_id': 'fixed_port_id', 'fixed_ip': 'fixed_ip', 'host': 'host', 'limit': 'limit', 'marker': 'marker', 'sort_dir': 'sort_dir', 'sort_key': 'sort_key', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = local_ip_association.LocalIPAssociation(**EXAMPLE) self.assertEqual(EXAMPLE['local_ip_id'], sot.local_ip_id) self.assertEqual(EXAMPLE['local_ip_address'], sot.local_ip_address) self.assertEqual(EXAMPLE['fixed_port_id'], sot.fixed_port_id) self.assertEqual(EXAMPLE['fixed_ip'], sot.fixed_ip) self.assertEqual(EXAMPLE['host'], sot.host) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_metering_label.py0000664000175000017500000000320200000000000026736 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import metering_label from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'description': '1', 'id': IDENTIFIER, 'name': '3', 'project_id': '4', 'shared': False, } class TestMeteringLabel(base.TestCase): def test_basic(self): sot = metering_label.MeteringLabel() self.assertEqual('metering_label', sot.resource_key) self.assertEqual('metering_labels', sot.resources_key) self.assertEqual('/metering/metering-labels', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = metering_label.MeteringLabel(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['shared'], sot.is_shared) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_metering_label_rule.py0000664000175000017500000000534500000000000027777 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import metering_label_rule from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'direction': '1', 'excluded': False, 'id': IDENTIFIER, 'metering_label_id': '4', 'project_id': '5', 'remote_ip_prefix': '6', } class TestMeteringLabelRule(base.TestCase): def test_basic(self): sot = metering_label_rule.MeteringLabelRule() self.assertEqual('metering_label_rule', sot.resource_key) self.assertEqual('metering_label_rules', sot.resources_key) self.assertEqual('/metering/metering-label-rules', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = metering_label_rule.MeteringLabelRule(**EXAMPLE) self.assertEqual(EXAMPLE['direction'], sot.direction) self.assertFalse(sot.is_excluded) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['metering_label_id'], sot.metering_label_id) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['remote_ip_prefix'], sot.remote_ip_prefix) def test_make_it_source_and_destination(self): custom_example = EXAMPLE.copy() custom_example["source_ip_prefix"] = "192.168.0.11/32" custom_example["destination_ip_prefix"] = "0.0.0.0/0" sot = metering_label_rule.MeteringLabelRule(**custom_example) self.assertEqual(custom_example['direction'], sot.direction) self.assertFalse(sot.is_excluded) self.assertEqual(custom_example['id'], sot.id) self.assertEqual( custom_example['metering_label_id'], sot.metering_label_id ) self.assertEqual(custom_example['project_id'], sot.project_id) self.assertEqual( custom_example['remote_ip_prefix'], sot.remote_ip_prefix ) self.assertEqual( custom_example['source_ip_prefix'], sot.source_ip_prefix ) self.assertEqual( custom_example['destination_ip_prefix'], sot.destination_ip_prefix ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_ndp_proxy.py0000664000175000017500000000361500000000000026017 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import ndp_proxy from openstack.tests.unit import base EXAMPLE = { 'id': 'np_id', 'name': 'np_name', 'router_id': 'router-uuid', 'port_id': 'port-uuid', 'project_id': 'project-uuid', 'description': 'fake-desc', 'created_at': '2021-12-21T19:14:57.233772', 'updated_at': '2021-12-21T19:14:57.233772', } class TestNDPProxy(base.TestCase): def test_basic(self): sot = ndp_proxy.NDPProxy() self.assertEqual('ndp_proxy', sot.resource_key) self.assertEqual('ndp_proxies', sot.resources_key) self.assertEqual('/ndp_proxies', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = ndp_proxy.NDPProxy(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['router_id'], sot.router_id) self.assertEqual(EXAMPLE['port_id'], sot.port_id) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_network.py0000664000175000017500000001262600000000000025470 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import network from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'admin_state_up': True, 'availability_zone_hints': ['1', '2'], 'availability_zones': ['3'], 'created_at': '2016-03-09T12:14:57.233772', 'description': '4', 'dns_domain': '5', 'id': IDENTIFIER, 'ipv4_address_scope': '6', 'ipv6_address_scope': '7', 'is_default': False, 'mtu': 8, 'name': '9', 'port_security_enabled': True, 'project_id': '10', 'provider:network_type': '11', 'provider:physical_network': '12', 'provider:segmentation_id': '13', 'qos_policy_id': '14', 'revision_number': 15, 'router:external': True, 'segments': '16', 'shared': True, 'status': '17', 'subnets': ['18', '19'], 'updated_at': '2016-07-09T12:14:57.233772', 'vlan_transparent': False, } class TestNetwork(base.TestCase): def test_basic(self): sot = network.Network() self.assertEqual('network', sot.resource_key) self.assertEqual('networks', sot.resources_key) self.assertEqual('/networks', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = network.Network(**EXAMPLE) self.assertTrue(sot.is_admin_state_up) self.assertEqual( EXAMPLE['availability_zone_hints'], sot.availability_zone_hints ) self.assertEqual(EXAMPLE['availability_zones'], sot.availability_zones) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['dns_domain'], sot.dns_domain) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual( EXAMPLE['ipv4_address_scope'], sot.ipv4_address_scope_id ) self.assertEqual( EXAMPLE['ipv6_address_scope'], sot.ipv6_address_scope_id ) self.assertFalse(sot.is_default) self.assertEqual(EXAMPLE['mtu'], sot.mtu) self.assertEqual(EXAMPLE['name'], sot.name) self.assertTrue(sot.is_port_security_enabled) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual( EXAMPLE['provider:network_type'], sot.provider_network_type ) self.assertEqual( EXAMPLE['provider:physical_network'], sot.provider_physical_network ) self.assertEqual( EXAMPLE['provider:segmentation_id'], sot.provider_segmentation_id ) self.assertEqual(EXAMPLE['qos_policy_id'], sot.qos_policy_id) self.assertEqual(EXAMPLE['revision_number'], sot.revision_number) self.assertTrue(sot.is_router_external) self.assertEqual(EXAMPLE['segments'], sot.segments) self.assertTrue(sot.is_shared) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['subnets'], sot.subnet_ids) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) self.assertEqual(EXAMPLE['vlan_transparent'], sot.is_vlan_transparent) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'description': 'description', 'name': 'name', 'project_id': 'project_id', 'status': 'status', 'ipv4_address_scope_id': 'ipv4_address_scope', 'ipv6_address_scope_id': 'ipv6_address_scope', 'is_admin_state_up': 'admin_state_up', 'is_port_security_enabled': 'port_security_enabled', 'is_router_external': 'router:external', 'is_shared': 'shared', 'provider_network_type': 'provider:network_type', 'provider_physical_network': 'provider:physical_network', 'provider_segmentation_id': 'provider:segmentation_id', 'tags': 'tags', 'any_tags': 'tags-any', 'not_tags': 'not-tags', 'not_any_tags': 'not-tags-any', 'sort_dir': 'sort_dir', 'sort_key': 'sort_key', }, sot._query_mapping._mapping, ) class TestDHCPAgentHostingNetwork(base.TestCase): def test_basic(self): net = network.DHCPAgentHostingNetwork() self.assertEqual('network', net.resource_key) self.assertEqual('networks', net.resources_key) self.assertEqual('/agents/%(agent_id)s/dhcp-networks', net.base_path) self.assertEqual('dhcp-network', net.resource_name) self.assertFalse(net.allow_create) self.assertTrue(net.allow_fetch) self.assertFalse(net.allow_commit) self.assertFalse(net.allow_delete) self.assertTrue(net.allow_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_network_ip_availability.py0000664000175000017500000000612600000000000030710 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import network_ip_availability from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'network_id': IDENTIFIER, 'network_name': 'private', 'subnet_ip_availability': [], 'project_id': '5', 'total_ips': 6, 'used_ips': 10, } EXAMPLE_WITH_OPTIONAL = { 'network_id': IDENTIFIER, 'network_name': 'private', 'subnet_ip_availability': [ { "used_ips": 3, "subnet_id": "2e4db1d6-ab2d-4bb1-93bb-a003fdbc9b39", "subnet_name": "private-subnet", "ip_version": 6, "cidr": "fd91:c3ba:e818::/64", "total_ips": 18446744073709551614, } ], 'project_id': '2', 'total_ips': 1844, 'used_ips': 6, } class TestNetworkIPAvailability(base.TestCase): def test_basic(self): sot = network_ip_availability.NetworkIPAvailability() self.assertEqual('network_ip_availability', sot.resource_key) self.assertEqual('network_ip_availabilities', sot.resources_key) self.assertEqual('/network-ip-availabilities', sot.base_path) self.assertEqual('network_name', sot.name_attribute) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = network_ip_availability.NetworkIPAvailability(**EXAMPLE) self.assertEqual(EXAMPLE['network_id'], sot.network_id) self.assertEqual(EXAMPLE['network_name'], sot.network_name) self.assertEqual( EXAMPLE['subnet_ip_availability'], sot.subnet_ip_availability ) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['total_ips'], sot.total_ips) self.assertEqual(EXAMPLE['used_ips'], sot.used_ips) def test_make_it_with_optional(self): sot = network_ip_availability.NetworkIPAvailability( **EXAMPLE_WITH_OPTIONAL ) self.assertEqual(EXAMPLE_WITH_OPTIONAL['network_id'], sot.network_id) self.assertEqual( EXAMPLE_WITH_OPTIONAL['network_name'], sot.network_name ) self.assertEqual( EXAMPLE_WITH_OPTIONAL['subnet_ip_availability'], sot.subnet_ip_availability, ) self.assertEqual(EXAMPLE_WITH_OPTIONAL['project_id'], sot.project_id) self.assertEqual(EXAMPLE_WITH_OPTIONAL['total_ips'], sot.total_ips) self.assertEqual(EXAMPLE_WITH_OPTIONAL['used_ips'], sot.used_ips) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_network_segment_range.py0000664000175000017500000000476300000000000030371 0ustar00zuulzuul00000000000000# Copyright (c) 2018, Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import network_segment_range from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'name': '1', 'default': False, 'shared': False, 'project_id': '2', 'network_type': '3', 'physical_network': '4', 'minimum': 5, 'maximum': 6, 'used': {}, 'available': [], } class TestNetworkSegmentRange(base.TestCase): def test_basic(self): test_seg_range = network_segment_range.NetworkSegmentRange() self.assertEqual('network_segment_range', test_seg_range.resource_key) self.assertEqual( 'network_segment_ranges', test_seg_range.resources_key ) self.assertEqual('/network_segment_ranges', test_seg_range.base_path) self.assertTrue(test_seg_range.allow_create) self.assertTrue(test_seg_range.allow_fetch) self.assertTrue(test_seg_range.allow_commit) self.assertTrue(test_seg_range.allow_delete) self.assertTrue(test_seg_range.allow_list) def test_make_it(self): test_seg_range = network_segment_range.NetworkSegmentRange(**EXAMPLE) self.assertEqual(EXAMPLE['id'], test_seg_range.id) self.assertEqual(EXAMPLE['name'], test_seg_range.name) self.assertEqual(EXAMPLE['default'], test_seg_range.default) self.assertEqual(EXAMPLE['shared'], test_seg_range.shared) self.assertEqual(EXAMPLE['project_id'], test_seg_range.project_id) self.assertEqual(EXAMPLE['network_type'], test_seg_range.network_type) self.assertEqual( EXAMPLE['physical_network'], test_seg_range.physical_network ) self.assertEqual(EXAMPLE['minimum'], test_seg_range.minimum) self.assertEqual(EXAMPLE['maximum'], test_seg_range.maximum) self.assertEqual(EXAMPLE['used'], test_seg_range.used) self.assertEqual(EXAMPLE['available'], test_seg_range.available) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_pool.py0000664000175000017500000000614500000000000024747 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import pool from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'admin_state_up': True, 'description': '2', 'healthmonitor_id': '3-1', 'health_monitors': ['3'], 'health_monitor_status': ['4'], 'id': IDENTIFIER, 'lb_algorithm': '5', 'listeners': [{'id': '6'}], 'listener_id': '6', 'members': [{'id': '7'}], 'name': '8', 'project_id': '9', 'protocol': '10', 'provider': '11', 'session_persistence': '12', 'status': '13', 'status_description': '14', 'subnet_id': '15', 'loadbalancers': [{'id': '16'}], 'loadbalancer_id': '16', 'vip_id': '17', } class TestPool(base.TestCase): def test_basic(self): sot = pool.Pool() self.assertEqual('pool', sot.resource_key) self.assertEqual('pools', sot.resources_key) self.assertEqual('/lbaas/pools', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = pool.Pool(**EXAMPLE) self.assertTrue(sot.is_admin_state_up) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['healthmonitor_id'], sot.health_monitor_id) self.assertEqual(EXAMPLE['health_monitors'], sot.health_monitor_ids) self.assertEqual( EXAMPLE['health_monitor_status'], sot.health_monitor_status ) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['lb_algorithm'], sot.lb_algorithm) self.assertEqual(EXAMPLE['listeners'], sot.listener_ids) self.assertEqual(EXAMPLE['listener_id'], sot.listener_id) self.assertEqual(EXAMPLE['members'], sot.member_ids) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['protocol'], sot.protocol) self.assertEqual(EXAMPLE['provider'], sot.provider) self.assertEqual( EXAMPLE['session_persistence'], sot.session_persistence ) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['status_description'], sot.status_description) self.assertEqual(EXAMPLE['subnet_id'], sot.subnet_id) self.assertEqual(EXAMPLE['loadbalancers'], sot.load_balancer_ids) self.assertEqual(EXAMPLE['loadbalancer_id'], sot.load_balancer_id) self.assertEqual(EXAMPLE['vip_id'], sot.virtual_ip_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_pool_member.py0000664000175000017500000000364700000000000026302 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import pool_member from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'address': '1', 'admin_state_up': True, 'id': IDENTIFIER, 'project_id': '4', 'protocol_port': 5, 'subnet_id': '6', 'weight': 7, 'name': '8', 'pool_id': 'FAKE_POOL', } class TestPoolMember(base.TestCase): def test_basic(self): sot = pool_member.PoolMember() self.assertEqual('member', sot.resource_key) self.assertEqual('members', sot.resources_key) self.assertEqual('/lbaas/pools/%(pool_id)s/members', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = pool_member.PoolMember(**EXAMPLE) self.assertEqual(EXAMPLE['address'], sot.address) self.assertTrue(sot.is_admin_state_up) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['protocol_port'], sot.protocol_port) self.assertEqual(EXAMPLE['subnet_id'], sot.subnet_id) self.assertEqual(EXAMPLE['weight'], sot.weight) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['pool_id'], sot.pool_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_port.py0000664000175000017500000001512000000000000024753 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import port from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'admin_state_up': True, 'allowed_address_pairs': [{'2': 2}], 'binding:host_id': '3', 'binding:profile': {'4': 4}, 'binding:vif_details': {'5': 5}, 'binding:vif_type': '6', 'binding:vnic_type': '7', 'created_at': '2016-03-09T12:14:57.233772', 'data_plane_status': '32', 'description': '8', 'device_id': '9', 'device_owner': '10', 'device_profile': 'cyborg_device_profile_1', 'dns_assignment': [{'11': 11}], 'dns_domain': 'a11', 'dns_name': '12', 'extra_dhcp_opts': [{'13': 13}], 'fixed_ips': [{'14': '14'}], 'hardware_offload_type': None, 'id': IDENTIFIER, 'ip_allocation': 'immediate', 'mac_address': '16', 'name': '17', 'network_id': '18', 'numa_affinity_policy': False, 'port_security_enabled': True, 'qos_network_policy_id': '32', 'qos_policy_id': '21', 'propagate_uplink_status': False, 'resource_request': { 'required': ['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'], 'resources': { 'NET_BW_EGR_KILOBIT_PER_SEC': 1, 'NET_BW_IGR_KILOBIT_PER_SEC': 2, }, }, 'revision_number': 22, 'security_groups': ['23'], 'status': '25', 'project_id': '26', 'trunk_details': { 'trunk_id': '27', 'sub_ports': [ { 'port_id': '28', 'segmentation_id': 29, 'segmentation_type': '30', 'mac_address': '31', } ], }, 'updated_at': '2016-07-09T12:14:57.233772', } class TestPort(base.TestCase): def test_basic(self): sot = port.Port() self.assertEqual('port', sot.resource_key) self.assertEqual('ports', sot.resources_key) self.assertEqual('/ports', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { "binding:host_id": "binding:host_id", "binding:profile": "binding:profile", "binding:vif_details": "binding:vif_details", "binding:vif_type": "binding:vif_type", "binding:vnic_type": "binding:vnic_type", "description": "description", "device_id": "device_id", "device_owner": "device_owner", "fields": "fields", "fixed_ips": "fixed_ips", "id": "id", "ip_address": "ip_address", "mac_address": "mac_address", "name": "name", "network_id": "network_id", "security_groups": "security_groups", "status": "status", "subnet_id": "subnet_id", "is_admin_state_up": "admin_state_up", "is_port_security_enabled": "port_security_enabled", "project_id": "project_id", "security_group_ids": "security_groups", "limit": "limit", "marker": "marker", "any_tags": "tags-any", "not_any_tags": "not-tags-any", "not_tags": "not-tags", "tags": "tags", 'sort_dir': 'sort_dir', 'sort_key': 'sort_key', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = port.Port(**EXAMPLE) self.assertTrue(sot.is_admin_state_up) self.assertEqual( EXAMPLE['allowed_address_pairs'], sot.allowed_address_pairs ) self.assertEqual(EXAMPLE['binding:host_id'], sot.binding_host_id) self.assertEqual(EXAMPLE['binding:profile'], sot.binding_profile) self.assertEqual( EXAMPLE['binding:vif_details'], sot.binding_vif_details ) self.assertEqual(EXAMPLE['binding:vif_type'], sot.binding_vif_type) self.assertEqual(EXAMPLE['binding:vnic_type'], sot.binding_vnic_type) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['data_plane_status'], sot.data_plane_status) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['device_id'], sot.device_id) self.assertEqual(EXAMPLE['device_owner'], sot.device_owner) self.assertEqual(EXAMPLE['device_profile'], sot.device_profile) self.assertEqual(EXAMPLE['dns_assignment'], sot.dns_assignment) self.assertEqual(EXAMPLE['dns_domain'], sot.dns_domain) self.assertEqual(EXAMPLE['dns_name'], sot.dns_name) self.assertEqual(EXAMPLE['extra_dhcp_opts'], sot.extra_dhcp_opts) self.assertEqual(EXAMPLE['fixed_ips'], sot.fixed_ips) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['ip_allocation'], sot.ip_allocation) self.assertEqual(EXAMPLE['mac_address'], sot.mac_address) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['network_id'], sot.network_id) self.assertEqual( EXAMPLE['numa_affinity_policy'], sot.numa_affinity_policy ) self.assertTrue(sot.is_port_security_enabled) self.assertEqual( EXAMPLE['qos_network_policy_id'], sot.qos_network_policy_id ) self.assertEqual(EXAMPLE['qos_policy_id'], sot.qos_policy_id) self.assertEqual( EXAMPLE['propagate_uplink_status'], sot.propagate_uplink_status ) self.assertEqual(EXAMPLE['resource_request'], sot.resource_request) self.assertEqual(EXAMPLE['revision_number'], sot.revision_number) self.assertEqual(EXAMPLE['security_groups'], sot.security_group_ids) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['trunk_details'], sot.trunk_details) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_port_forwarding.py0000664000175000017500000000472400000000000027205 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import port_forwarding from openstack.tests.unit import base EXAMPLE = { 'id': 'pf_id', 'protocol': 'tcp', 'internal_ip_address': '1.2.3.4', 'floatingip_id': 'floating-ip-uuid', 'internal_port': 80, 'internal_port_id': 'internal-port-uuid', 'external_port': 8080, 'description': 'description', } class TestFloatingIP(base.TestCase): def test_basic(self): sot = port_forwarding.PortForwarding() self.assertEqual('port_forwarding', sot.resource_key) self.assertEqual('port_forwardings', sot.resources_key) self.assertEqual( '/floatingips/%(floatingip_id)s/port_forwardings', sot.base_path ) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { 'internal_port_id': 'internal_port_id', 'external_port': 'external_port', 'limit': 'limit', 'marker': 'marker', 'protocol': 'protocol', 'sort_dir': 'sort_dir', 'sort_key': 'sort_key', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = port_forwarding.PortForwarding(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['floatingip_id'], sot.floatingip_id) self.assertEqual(EXAMPLE['protocol'], sot.protocol) self.assertEqual( EXAMPLE['internal_ip_address'], sot.internal_ip_address ) self.assertEqual(EXAMPLE['internal_port'], sot.internal_port) self.assertEqual(EXAMPLE['internal_port_id'], sot.internal_port_id) self.assertEqual(EXAMPLE['external_port'], sot.external_port) self.assertEqual(EXAMPLE['description'], sot.description) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_proxy.py0000664000175000017500000026262600000000000025167 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid from openstack import exceptions from openstack.network.v2 import _proxy from openstack.network.v2 import address_group from openstack.network.v2 import address_scope from openstack.network.v2 import agent from openstack.network.v2 import auto_allocated_topology from openstack.network.v2 import availability_zone from openstack.network.v2 import bgp_peer from openstack.network.v2 import bgp_speaker from openstack.network.v2 import bgpvpn from openstack.network.v2 import bgpvpn_network_association from openstack.network.v2 import bgpvpn_port_association from openstack.network.v2 import bgpvpn_router_association from openstack.network.v2 import extension from openstack.network.v2 import firewall_group from openstack.network.v2 import firewall_policy from openstack.network.v2 import firewall_rule from openstack.network.v2 import flavor from openstack.network.v2 import floating_ip from openstack.network.v2 import health_monitor from openstack.network.v2 import l3_conntrack_helper from openstack.network.v2 import listener from openstack.network.v2 import load_balancer from openstack.network.v2 import local_ip from openstack.network.v2 import local_ip_association from openstack.network.v2 import metering_label from openstack.network.v2 import metering_label_rule from openstack.network.v2 import ndp_proxy from openstack.network.v2 import network from openstack.network.v2 import network_ip_availability from openstack.network.v2 import network_segment_range from openstack.network.v2 import pool from openstack.network.v2 import pool_member from openstack.network.v2 import port from openstack.network.v2 import port_forwarding from openstack.network.v2 import qos_bandwidth_limit_rule from openstack.network.v2 import qos_dscp_marking_rule from openstack.network.v2 import qos_minimum_bandwidth_rule from openstack.network.v2 import qos_minimum_packet_rate_rule from openstack.network.v2 import qos_policy from openstack.network.v2 import qos_rule_type from openstack.network.v2 import quota from openstack.network.v2 import rbac_policy from openstack.network.v2 import router from openstack.network.v2 import security_group from openstack.network.v2 import security_group_rule from openstack.network.v2 import segment from openstack.network.v2 import service_profile from openstack.network.v2 import service_provider from openstack.network.v2 import subnet from openstack.network.v2 import subnet_pool from openstack.network.v2 import tap_mirror from openstack.network.v2 import vpn_endpoint_group from openstack.network.v2 import vpn_ike_policy from openstack.network.v2 import vpn_ipsec_policy from openstack.network.v2 import vpn_ipsec_site_connection from openstack.network.v2 import vpn_service from openstack import proxy as proxy_base from openstack.tests.unit import test_proxy_base QOS_POLICY_ID = 'qos-policy-id-' + uuid.uuid4().hex QOS_RULE_ID = 'qos-rule-id-' + uuid.uuid4().hex NETWORK_ID = 'network-id-' + uuid.uuid4().hex AGENT_ID = 'agent-id-' + uuid.uuid4().hex ROUTER_ID = 'router-id-' + uuid.uuid4().hex FIP_ID = 'fip-id-' + uuid.uuid4().hex CT_HELPER_ID = 'ct-helper-id-' + uuid.uuid4().hex LOCAL_IP_ID = 'lip-id-' + uuid.uuid4().hex BGPVPN_ID = 'bgpvpn-id-' + uuid.uuid4().hex class TestNetworkProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) def verify_update( self, test_method, resource_type, base_path=None, *, method_args=None, method_kwargs=None, expected_args=None, expected_kwargs=None, expected_result="result", mock_method="openstack.network.v2._proxy.Proxy._update", ): super().verify_update( test_method, resource_type, base_path=base_path, method_args=method_args, method_kwargs=method_kwargs, expected_args=expected_args, expected_kwargs=expected_kwargs, expected_result=expected_result, mock_method=mock_method, ) def verify_delete( self, test_method, resource_type, ignore_missing=True, *, method_args=None, method_kwargs=None, expected_args=None, expected_kwargs=None, mock_method="openstack.network.v2._proxy.Proxy._delete", ): super().verify_delete( test_method, resource_type, ignore_missing=ignore_missing, method_args=method_args, method_kwargs=method_kwargs, expected_args=expected_args, expected_kwargs=expected_kwargs, mock_method=mock_method, ) class TestNetworkAddressGroup(TestNetworkProxy): def test_address_group_create_attrs(self): self.verify_create( self.proxy.create_address_group, address_group.AddressGroup ) def test_address_group_delete(self): self.verify_delete( self.proxy.delete_address_group, address_group.AddressGroup, False ) def test_address_group_delete_ignore(self): self.verify_delete( self.proxy.delete_address_group, address_group.AddressGroup, True ) def test_address_group_find(self): self.verify_find( self.proxy.find_address_group, address_group.AddressGroup ) def test_address_group_get(self): self.verify_get( self.proxy.get_address_group, address_group.AddressGroup ) def test_address_groups(self): self.verify_list(self.proxy.address_groups, address_group.AddressGroup) def test_address_group_update(self): self.verify_update( self.proxy.update_address_group, address_group.AddressGroup ) @mock.patch( 'openstack.network.v2._proxy.Proxy.add_addresses_to_address_group' ) def test_add_addresses_to_address_group(self, add_addresses): data = mock.sentinel self.proxy.add_addresses_to_address_group( address_group.AddressGroup, data ) add_addresses.assert_called_once_with(address_group.AddressGroup, data) @mock.patch( 'openstack.network.v2._proxy.Proxy.' 'remove_addresses_from_address_group' ) def test_remove_addresses_from_address_group(self, remove_addresses): data = mock.sentinel self.proxy.remove_addresses_from_address_group( address_group.AddressGroup, data ) remove_addresses.assert_called_once_with( address_group.AddressGroup, data ) class TestNetworkAddressScope(TestNetworkProxy): def test_address_scope_create_attrs(self): self.verify_create( self.proxy.create_address_scope, address_scope.AddressScope ) def test_address_scope_delete(self): self.verify_delete( self.proxy.delete_address_scope, address_scope.AddressScope, False ) def test_address_scope_delete_ignore(self): self.verify_delete( self.proxy.delete_address_scope, address_scope.AddressScope, True ) def test_address_scope_find(self): self.verify_find( self.proxy.find_address_scope, address_scope.AddressScope ) def test_address_scope_get(self): self.verify_get( self.proxy.get_address_scope, address_scope.AddressScope ) def test_address_scopes(self): self.verify_list(self.proxy.address_scopes, address_scope.AddressScope) def test_address_scope_update(self): self.verify_update( self.proxy.update_address_scope, address_scope.AddressScope ) class TestNetworkAgent(TestNetworkProxy): def test_agent_delete(self): self.verify_delete(self.proxy.delete_agent, agent.Agent, True) def test_agent_get(self): self.verify_get(self.proxy.get_agent, agent.Agent) def test_agents(self): self.verify_list(self.proxy.agents, agent.Agent) def test_agent_update(self): self.verify_update(self.proxy.update_agent, agent.Agent) class TestNetworkAvailability(TestNetworkProxy): def test_availability_zones(self): self.verify_list( self.proxy.availability_zones, availability_zone.AvailabilityZone ) def test_dhcp_agent_hosting_networks(self): self.verify_list( self.proxy.dhcp_agent_hosting_networks, network.DHCPAgentHostingNetwork, method_kwargs={'agent': AGENT_ID}, expected_kwargs={'agent_id': AGENT_ID}, ) def test_network_hosting_dhcp_agents(self): self.verify_list( self.proxy.network_hosting_dhcp_agents, agent.NetworkHostingDHCPAgent, method_kwargs={'network': NETWORK_ID}, expected_kwargs={'network_id': NETWORK_ID}, ) class TestNetworkExtension(TestNetworkProxy): def test_extension_find(self): self.verify_find(self.proxy.find_extension, extension.Extension) def test_extensions(self): self.verify_list(self.proxy.extensions, extension.Extension) def test_floating_ip_create_attrs(self): self.verify_create(self.proxy.create_ip, floating_ip.FloatingIP) def test_floating_ip_delete(self): self.verify_delete( self.proxy.delete_ip, floating_ip.FloatingIP, False, expected_kwargs={'if_revision': None}, ) def test_floating_ip_delete_ignore(self): self.verify_delete( self.proxy.delete_ip, floating_ip.FloatingIP, True, expected_kwargs={'if_revision': None}, ) def test_floating_ip_delete_if_revision(self): self.verify_delete( self.proxy.delete_ip, floating_ip.FloatingIP, True, method_kwargs={'if_revision': 42}, expected_kwargs={'if_revision': 42}, ) def test_floating_ip_find(self): self.verify_find(self.proxy.find_ip, floating_ip.FloatingIP) def test_floating_ip_get(self): self.verify_get(self.proxy.get_ip, floating_ip.FloatingIP) def test_ips(self): self.verify_list(self.proxy.ips, floating_ip.FloatingIP) def test_floating_ip_update(self): self.verify_update( self.proxy.update_ip, floating_ip.FloatingIP, expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': None}, ) def test_floating_ip_update_if_revision(self): self.verify_update( self.proxy.update_ip, floating_ip.FloatingIP, method_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, ) class TestNetworkHealthMonitor(TestNetworkProxy): def test_health_monitor_create_attrs(self): self.verify_create( self.proxy.create_health_monitor, health_monitor.HealthMonitor ) def test_health_monitor_delete(self): self.verify_delete( self.proxy.delete_health_monitor, health_monitor.HealthMonitor, False, ) def test_health_monitor_delete_ignore(self): self.verify_delete( self.proxy.delete_health_monitor, health_monitor.HealthMonitor, True, ) def test_health_monitor_find(self): self.verify_find( self.proxy.find_health_monitor, health_monitor.HealthMonitor ) def test_health_monitor_get(self): self.verify_get( self.proxy.get_health_monitor, health_monitor.HealthMonitor ) def test_health_monitors(self): self.verify_list( self.proxy.health_monitors, health_monitor.HealthMonitor ) def test_health_monitor_update(self): self.verify_update( self.proxy.update_health_monitor, health_monitor.HealthMonitor ) class TestNetworkListener(TestNetworkProxy): def test_listener_create_attrs(self): self.verify_create(self.proxy.create_listener, listener.Listener) def test_listener_delete(self): self.verify_delete( self.proxy.delete_listener, listener.Listener, False ) def test_listener_delete_ignore(self): self.verify_delete(self.proxy.delete_listener, listener.Listener, True) def test_listener_find(self): self.verify_find(self.proxy.find_listener, listener.Listener) def test_listener_get(self): self.verify_get(self.proxy.get_listener, listener.Listener) def test_listeners(self): self.verify_list(self.proxy.listeners, listener.Listener) def test_listener_update(self): self.verify_update(self.proxy.update_listener, listener.Listener) class TestNetworkLoadBalancer(TestNetworkProxy): def test_load_balancer_create_attrs(self): self.verify_create( self.proxy.create_load_balancer, load_balancer.LoadBalancer ) def test_load_balancer_delete(self): self.verify_delete( self.proxy.delete_load_balancer, load_balancer.LoadBalancer, False ) def test_load_balancer_delete_ignore(self): self.verify_delete( self.proxy.delete_load_balancer, load_balancer.LoadBalancer, True ) def test_load_balancer_find(self): self.verify_find( self.proxy.find_load_balancer, load_balancer.LoadBalancer ) def test_load_balancer_get(self): self.verify_get( self.proxy.get_load_balancer, load_balancer.LoadBalancer ) def test_load_balancers(self): self.verify_list(self.proxy.load_balancers, load_balancer.LoadBalancer) def test_load_balancer_update(self): self.verify_update( self.proxy.update_load_balancer, load_balancer.LoadBalancer ) class TestNetworkMeteringLabel(TestNetworkProxy): def test_metering_label_create_attrs(self): self.verify_create( self.proxy.create_metering_label, metering_label.MeteringLabel ) def test_metering_label_delete(self): self.verify_delete( self.proxy.delete_metering_label, metering_label.MeteringLabel, False, ) def test_metering_label_delete_ignore(self): self.verify_delete( self.proxy.delete_metering_label, metering_label.MeteringLabel, True, ) def test_metering_label_find(self): self.verify_find( self.proxy.find_metering_label, metering_label.MeteringLabel ) def test_metering_label_get(self): self.verify_get( self.proxy.get_metering_label, metering_label.MeteringLabel ) def test_metering_labels(self): self.verify_list( self.proxy.metering_labels, metering_label.MeteringLabel ) def test_metering_label_update(self): self.verify_update( self.proxy.update_metering_label, metering_label.MeteringLabel ) def test_metering_label_rule_create_attrs(self): self.verify_create( self.proxy.create_metering_label_rule, metering_label_rule.MeteringLabelRule, ) def test_metering_label_rule_delete(self): self.verify_delete( self.proxy.delete_metering_label_rule, metering_label_rule.MeteringLabelRule, False, ) def test_metering_label_rule_delete_ignore(self): self.verify_delete( self.proxy.delete_metering_label_rule, metering_label_rule.MeteringLabelRule, True, ) def test_metering_label_rule_find(self): self.verify_find( self.proxy.find_metering_label_rule, metering_label_rule.MeteringLabelRule, ) def test_metering_label_rule_get(self): self.verify_get( self.proxy.get_metering_label_rule, metering_label_rule.MeteringLabelRule, ) def test_metering_label_rules(self): self.verify_list( self.proxy.metering_label_rules, metering_label_rule.MeteringLabelRule, ) def test_metering_label_rule_update(self): self.verify_update( self.proxy.update_metering_label_rule, metering_label_rule.MeteringLabelRule, ) class TestNetworkNetwork(TestNetworkProxy): def test_network_create_attrs(self): self.verify_create(self.proxy.create_network, network.Network) def test_network_delete(self): self.verify_delete( self.proxy.delete_network, network.Network, False, expected_kwargs={'if_revision': None}, ) def test_network_delete_ignore(self): self.verify_delete( self.proxy.delete_network, network.Network, True, expected_kwargs={'if_revision': None}, ) def test_network_delete_if_revision(self): self.verify_delete( self.proxy.delete_network, network.Network, True, method_kwargs={'if_revision': 42}, expected_kwargs={'if_revision': 42}, ) def test_network_find(self): self.verify_find(self.proxy.find_network, network.Network) def test_network_find_with_filter(self): self._verify( 'openstack.proxy.Proxy._find', self.proxy.find_network, method_args=["net1"], method_kwargs={"project_id": "1"}, expected_args=[network.Network, "net1"], expected_kwargs={"project_id": "1", "ignore_missing": True}, ) def test_network_get(self): self.verify_get(self.proxy.get_network, network.Network) def test_networks(self): self.verify_list(self.proxy.networks, network.Network) def test_network_update(self): self.verify_update( self.proxy.update_network, network.Network, expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': None}, ) def test_network_update_if_revision(self): self.verify_update( self.proxy.update_network, network.Network, method_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, ) class TestNetworkFlavor(TestNetworkProxy): def test_flavor_create_attrs(self): self.verify_create(self.proxy.create_flavor, flavor.Flavor) def test_flavor_delete(self): self.verify_delete(self.proxy.delete_flavor, flavor.Flavor, True) def test_flavor_find(self): self.verify_find(self.proxy.find_flavor, flavor.Flavor) def test_flavor_get(self): self.verify_get(self.proxy.get_flavor, flavor.Flavor) def test_flavor_update(self): self.verify_update(self.proxy.update_flavor, flavor.Flavor) def test_flavors(self): self.verify_list(self.proxy.flavors, flavor.Flavor) class TestNetworkLocalIp(TestNetworkProxy): def test_local_ip_create_attrs(self): self.verify_create(self.proxy.create_local_ip, local_ip.LocalIP) def test_local_ip_delete(self): self.verify_delete( self.proxy.delete_local_ip, local_ip.LocalIP, False, expected_kwargs={'if_revision': None}, ) def test_local_ip_delete_ignore(self): self.verify_delete( self.proxy.delete_local_ip, local_ip.LocalIP, True, expected_kwargs={'if_revision': None}, ) def test_local_ip_delete_if_revision(self): self.verify_delete( self.proxy.delete_local_ip, local_ip.LocalIP, True, method_kwargs={'if_revision': 42}, expected_kwargs={'if_revision': 42}, ) def test_local_ip_find(self): self.verify_find(self.proxy.find_local_ip, local_ip.LocalIP) def test_local_ip_get(self): self.verify_get(self.proxy.get_local_ip, local_ip.LocalIP) def test_local_ips(self): self.verify_list(self.proxy.local_ips, local_ip.LocalIP) def test_local_ip_update(self): self.verify_update( self.proxy.update_local_ip, local_ip.LocalIP, expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': None}, ) def test_local_ip_update_if_revision(self): self.verify_update( self.proxy.update_local_ip, local_ip.LocalIP, method_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, ) class TestNetworkLocalIpAssociation(TestNetworkProxy): def test_local_ip_association_create_attrs(self): self.verify_create( self.proxy.create_local_ip_association, local_ip_association.LocalIPAssociation, method_kwargs={'local_ip': LOCAL_IP_ID}, expected_kwargs={'local_ip_id': LOCAL_IP_ID}, ) def test_local_ip_association_delete(self): self.verify_delete( self.proxy.delete_local_ip_association, local_ip_association.LocalIPAssociation, ignore_missing=False, method_args=[LOCAL_IP_ID, "resource_or_id"], expected_args=["resource_or_id"], expected_kwargs={'if_revision': None, 'local_ip_id': LOCAL_IP_ID}, ) def test_local_ip_association_delete_ignore(self): self.verify_delete( self.proxy.delete_local_ip_association, local_ip_association.LocalIPAssociation, ignore_missing=True, method_args=[LOCAL_IP_ID, "resource_or_id"], expected_args=["resource_or_id"], expected_kwargs={'if_revision': None, 'local_ip_id': LOCAL_IP_ID}, ) def test_local_ip_association_find(self): lip = local_ip.LocalIP.new(id=LOCAL_IP_ID) self._verify( 'openstack.proxy.Proxy._find', self.proxy.find_local_ip_association, method_args=['local_ip_association_id', lip], expected_args=[ local_ip_association.LocalIPAssociation, 'local_ip_association_id', ], expected_kwargs={ 'ignore_missing': True, 'local_ip_id': LOCAL_IP_ID, }, ) def test_local_ip_association_get(self): lip = local_ip.LocalIP.new(id=LOCAL_IP_ID) self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_local_ip_association, method_args=['local_ip_association_id', lip], expected_args=[ local_ip_association.LocalIPAssociation, 'local_ip_association_id', ], expected_kwargs={'local_ip_id': LOCAL_IP_ID}, ) def test_local_ip_associations(self): self.verify_list( self.proxy.local_ip_associations, local_ip_association.LocalIPAssociation, method_kwargs={'local_ip': LOCAL_IP_ID}, expected_kwargs={'local_ip_id': LOCAL_IP_ID}, ) class TestNetworkServiceProfile(TestNetworkProxy): def test_service_profile_create_attrs(self): self.verify_create( self.proxy.create_service_profile, service_profile.ServiceProfile ) def test_service_profile_delete(self): self.verify_delete( self.proxy.delete_service_profile, service_profile.ServiceProfile, True, ) def test_service_profile_find(self): self.verify_find( self.proxy.find_service_profile, service_profile.ServiceProfile ) def test_service_profile_get(self): self.verify_get( self.proxy.get_service_profile, service_profile.ServiceProfile ) def test_service_profiles(self): self.verify_list( self.proxy.service_profiles, service_profile.ServiceProfile ) def test_service_profile_update(self): self.verify_update( self.proxy.update_service_profile, service_profile.ServiceProfile ) class TestNetworkIpAvailability(TestNetworkProxy): def test_network_ip_availability_find(self): self.verify_find( self.proxy.find_network_ip_availability, network_ip_availability.NetworkIPAvailability, ) def test_network_ip_availability_get(self): self.verify_get( self.proxy.get_network_ip_availability, network_ip_availability.NetworkIPAvailability, ) def test_network_ip_availabilities(self): self.verify_list( self.proxy.network_ip_availabilities, network_ip_availability.NetworkIPAvailability, ) def test_pool_member_create_attrs(self): self.verify_create( self.proxy.create_pool_member, pool_member.PoolMember, method_kwargs={"pool": "test_id"}, expected_kwargs={"pool_id": "test_id"}, ) class TestNetworkPoolMember(TestNetworkProxy): def test_pool_member_delete(self): self.verify_delete( self.proxy.delete_pool_member, pool_member.PoolMember, ignore_missing=False, method_kwargs={"pool": "test_id"}, expected_kwargs={"pool_id": "test_id"}, ) def test_pool_member_delete_ignore(self): self.verify_delete( self.proxy.delete_pool_member, pool_member.PoolMember, ignore_missing=True, method_kwargs={"pool": "test_id"}, expected_kwargs={"pool_id": "test_id"}, ) def test_pool_member_find(self): self._verify( 'openstack.proxy.Proxy._find', self.proxy.find_pool_member, method_args=["MEMBER", "POOL"], expected_args=[pool_member.PoolMember, "MEMBER"], expected_kwargs={"pool_id": "POOL", "ignore_missing": True}, ) def test_pool_member_get(self): self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_pool_member, method_args=["MEMBER", "POOL"], expected_args=[pool_member.PoolMember, "MEMBER"], expected_kwargs={"pool_id": "POOL"}, ) def test_pool_members(self): self.verify_list( self.proxy.pool_members, pool_member.PoolMember, method_args=["test_id"], expected_args=[], expected_kwargs={"pool_id": "test_id"}, ) def test_pool_member_update(self): self._verify( "openstack.network.v2._proxy.Proxy._update", self.proxy.update_pool_member, method_args=["MEMBER", "POOL"], expected_args=[pool_member.PoolMember, "MEMBER"], expected_kwargs={"pool_id": "POOL"}, ) class TestNetworkPool(TestNetworkProxy): def test_pool_create_attrs(self): self.verify_create(self.proxy.create_pool, pool.Pool) def test_pool_delete(self): self.verify_delete(self.proxy.delete_pool, pool.Pool, False) def test_pool_delete_ignore(self): self.verify_delete(self.proxy.delete_pool, pool.Pool, True) def test_pool_find(self): self.verify_find(self.proxy.find_pool, pool.Pool) def test_pool_get(self): self.verify_get(self.proxy.get_pool, pool.Pool) def test_pools(self): self.verify_list(self.proxy.pools, pool.Pool) def test_pool_update(self): self.verify_update(self.proxy.update_pool, pool.Pool) def test_port_create_attrs(self): self.verify_create(self.proxy.create_port, port.Port) def test_port_delete(self): self.verify_delete( self.proxy.delete_port, port.Port, False, expected_kwargs={'if_revision': None}, ) def test_port_delete_ignore(self): self.verify_delete( self.proxy.delete_port, port.Port, True, expected_kwargs={'if_revision': None}, ) def test_port_delete_if_revision(self): self.verify_delete( self.proxy.delete_port, port.Port, True, method_kwargs={'if_revision': 42}, expected_kwargs={'if_revision': 42}, ) def test_port_find(self): self.verify_find(self.proxy.find_port, port.Port) def test_port_get(self): self.verify_get(self.proxy.get_port, port.Port) def test_ports(self): self.verify_list(self.proxy.ports, port.Port) def test_port_update(self): self.verify_update( self.proxy.update_port, port.Port, expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': None}, ) def test_port_update_if_revision(self): self.verify_update( self.proxy.update_port, port.Port, method_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, ) @mock.patch('openstack.network.v2._proxy.Proxy._bulk_create') def test_ports_create(self, bc): data = mock.sentinel self.proxy.create_ports(data) bc.assert_called_once_with(port.Port, data) class TestNetworkQosBandwidth(TestNetworkProxy): def test_qos_bandwidth_limit_rule_create_attrs(self): self.verify_create( self.proxy.create_qos_bandwidth_limit_rule, qos_bandwidth_limit_rule.QoSBandwidthLimitRule, method_kwargs={'qos_policy': QOS_POLICY_ID}, expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_bandwidth_limit_rule_delete(self): self.verify_delete( self.proxy.delete_qos_bandwidth_limit_rule, qos_bandwidth_limit_rule.QoSBandwidthLimitRule, ignore_missing=False, method_args=["resource_or_id", QOS_POLICY_ID], expected_args=["resource_or_id"], expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_bandwidth_limit_rule_delete_ignore(self): self.verify_delete( self.proxy.delete_qos_bandwidth_limit_rule, qos_bandwidth_limit_rule.QoSBandwidthLimitRule, ignore_missing=True, method_args=["resource_or_id", QOS_POLICY_ID], expected_args=["resource_or_id"], expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_bandwidth_limit_rule_find(self): policy = qos_policy.QoSPolicy.new(id=QOS_POLICY_ID) self._verify( 'openstack.proxy.Proxy._find', self.proxy.find_qos_bandwidth_limit_rule, method_args=['rule_id', policy], expected_args=[ qos_bandwidth_limit_rule.QoSBandwidthLimitRule, 'rule_id', ], expected_kwargs={ 'ignore_missing': True, 'qos_policy_id': QOS_POLICY_ID, }, ) def test_qos_bandwidth_limit_rule_get(self): self.verify_get( self.proxy.get_qos_bandwidth_limit_rule, qos_bandwidth_limit_rule.QoSBandwidthLimitRule, method_kwargs={'qos_policy': QOS_POLICY_ID}, expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_bandwidth_limit_rules(self): self.verify_list( self.proxy.qos_bandwidth_limit_rules, qos_bandwidth_limit_rule.QoSBandwidthLimitRule, method_kwargs={'qos_policy': QOS_POLICY_ID}, expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_bandwidth_limit_rule_update(self): policy = qos_policy.QoSPolicy.new(id=QOS_POLICY_ID) self._verify( 'openstack.network.v2._proxy.Proxy._update', self.proxy.update_qos_bandwidth_limit_rule, method_args=['rule_id', policy], method_kwargs={'foo': 'bar'}, expected_args=[ qos_bandwidth_limit_rule.QoSBandwidthLimitRule, 'rule_id', ], expected_kwargs={'qos_policy_id': QOS_POLICY_ID, 'foo': 'bar'}, ) class TestNetworkQosDscpMarking(TestNetworkProxy): def test_qos_dscp_marking_rule_create_attrs(self): self.verify_create( self.proxy.create_qos_dscp_marking_rule, qos_dscp_marking_rule.QoSDSCPMarkingRule, method_kwargs={'qos_policy': QOS_POLICY_ID}, expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_dscp_marking_rule_delete(self): self.verify_delete( self.proxy.delete_qos_dscp_marking_rule, qos_dscp_marking_rule.QoSDSCPMarkingRule, ignore_missing=False, method_args=["resource_or_id", QOS_POLICY_ID], expected_args=["resource_or_id"], expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_dscp_marking_rule_delete_ignore(self): self.verify_delete( self.proxy.delete_qos_dscp_marking_rule, qos_dscp_marking_rule.QoSDSCPMarkingRule, ignore_missing=True, method_args=["resource_or_id", QOS_POLICY_ID], expected_args=["resource_or_id"], expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_dscp_marking_rule_find(self): policy = qos_policy.QoSPolicy.new(id=QOS_POLICY_ID) self._verify( 'openstack.proxy.Proxy._find', self.proxy.find_qos_dscp_marking_rule, method_args=['rule_id', policy], expected_args=[ qos_dscp_marking_rule.QoSDSCPMarkingRule, 'rule_id', ], expected_kwargs={ 'ignore_missing': True, 'qos_policy_id': QOS_POLICY_ID, }, ) def test_qos_dscp_marking_rule_get(self): self.verify_get( self.proxy.get_qos_dscp_marking_rule, qos_dscp_marking_rule.QoSDSCPMarkingRule, method_kwargs={'qos_policy': QOS_POLICY_ID}, expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_dscp_marking_rules(self): self.verify_list( self.proxy.qos_dscp_marking_rules, qos_dscp_marking_rule.QoSDSCPMarkingRule, method_kwargs={'qos_policy': QOS_POLICY_ID}, expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_dscp_marking_rule_update(self): policy = qos_policy.QoSPolicy.new(id=QOS_POLICY_ID) self._verify( 'openstack.network.v2._proxy.Proxy._update', self.proxy.update_qos_dscp_marking_rule, method_args=['rule_id', policy], method_kwargs={'foo': 'bar'}, expected_args=[ qos_dscp_marking_rule.QoSDSCPMarkingRule, 'rule_id', ], expected_kwargs={'qos_policy_id': QOS_POLICY_ID, 'foo': 'bar'}, ) class TestNetworkQosMinimumBandwidth(TestNetworkProxy): def test_qos_minimum_bandwidth_rule_create_attrs(self): self.verify_create( self.proxy.create_qos_minimum_bandwidth_rule, qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, method_kwargs={'qos_policy': QOS_POLICY_ID}, expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_minimum_bandwidth_rule_delete(self): self.verify_delete( self.proxy.delete_qos_minimum_bandwidth_rule, qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, ignore_missing=False, method_args=["resource_or_id", QOS_POLICY_ID], expected_args=["resource_or_id"], expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_minimum_bandwidth_rule_delete_ignore(self): self.verify_delete( self.proxy.delete_qos_minimum_bandwidth_rule, qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, ignore_missing=True, method_args=["resource_or_id", QOS_POLICY_ID], expected_args=["resource_or_id"], expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_minimum_bandwidth_rule_find(self): policy = qos_policy.QoSPolicy.new(id=QOS_POLICY_ID) self._verify( 'openstack.proxy.Proxy._find', self.proxy.find_qos_minimum_bandwidth_rule, method_args=['rule_id', policy], expected_args=[ qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, 'rule_id', ], expected_kwargs={ 'ignore_missing': True, 'qos_policy_id': QOS_POLICY_ID, }, ) def test_qos_minimum_bandwidth_rule_get(self): self.verify_get( self.proxy.get_qos_minimum_bandwidth_rule, qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, method_kwargs={'qos_policy': QOS_POLICY_ID}, expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_minimum_bandwidth_rules(self): self.verify_list( self.proxy.qos_minimum_bandwidth_rules, qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, method_kwargs={'qos_policy': QOS_POLICY_ID}, expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_minimum_bandwidth_rule_update(self): policy = qos_policy.QoSPolicy.new(id=QOS_POLICY_ID) self._verify( 'openstack.network.v2._proxy.Proxy._update', self.proxy.update_qos_minimum_bandwidth_rule, method_args=['rule_id', policy], method_kwargs={'foo': 'bar'}, expected_args=[ qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, 'rule_id', ], expected_kwargs={'qos_policy_id': QOS_POLICY_ID, 'foo': 'bar'}, ) class TestNetworkQosMinimumPacketRate(TestNetworkProxy): def test_qos_minimum_packet_rate_rule_create_attrs(self): self.verify_create( self.proxy.create_qos_minimum_packet_rate_rule, qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, method_kwargs={'qos_policy': QOS_POLICY_ID}, expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_minimum_packet_rate_rule_delete(self): self.verify_delete( self.proxy.delete_qos_minimum_packet_rate_rule, qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, ignore_missing=False, method_args=["resource_or_id", QOS_POLICY_ID], expected_args=["resource_or_id"], expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_minimum_packet_rate_rule_delete_ignore(self): self.verify_delete( self.proxy.delete_qos_minimum_packet_rate_rule, qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, ignore_missing=True, method_args=["resource_or_id", QOS_POLICY_ID], expected_args=["resource_or_id"], expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_minimum_packet_rate_rule_find(self): policy = qos_policy.QoSPolicy.new(id=QOS_POLICY_ID) self._verify( 'openstack.proxy.Proxy._find', self.proxy.find_qos_minimum_packet_rate_rule, method_args=['rule_id', policy], expected_args=[ qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, 'rule_id', ], expected_kwargs={ 'ignore_missing': True, 'qos_policy_id': QOS_POLICY_ID, }, ) def test_qos_minimum_packet_rate_rule_get(self): self.verify_get( self.proxy.get_qos_minimum_packet_rate_rule, qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, method_kwargs={'qos_policy': QOS_POLICY_ID}, expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_minimum_packet_rate_rules(self): self.verify_list( self.proxy.qos_minimum_packet_rate_rules, qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, method_kwargs={'qos_policy': QOS_POLICY_ID}, expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, ) def test_qos_minimum_packet_rate_rule_update(self): policy = qos_policy.QoSPolicy.new(id=QOS_POLICY_ID) self._verify( 'openstack.network.v2._proxy.Proxy._update', self.proxy.update_qos_minimum_packet_rate_rule, method_args=['rule_id', policy], method_kwargs={'foo': 'bar'}, expected_args=[ qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, 'rule_id', ], expected_kwargs={'qos_policy_id': QOS_POLICY_ID, 'foo': 'bar'}, ) class TestNetworkQosRuleType(TestNetworkProxy): def test_qos_rule_type_find(self): self.verify_find( self.proxy.find_qos_rule_type, qos_rule_type.QoSRuleType ) def test_qos_rule_type_get(self): self.verify_get( self.proxy.get_qos_rule_type, qos_rule_type.QoSRuleType ) def test_qos_rule_types(self): self.verify_list(self.proxy.qos_rule_types, qos_rule_type.QoSRuleType) class TestNetworkQuota(TestNetworkProxy): def test_quota_delete(self): self.verify_delete(self.proxy.delete_quota, quota.Quota, False) def test_quota_delete_ignore(self): self.verify_delete(self.proxy.delete_quota, quota.Quota, True) def test_quota_get(self): self.verify_get(self.proxy.get_quota, quota.Quota) @mock.patch.object(proxy_base.Proxy, "_get_resource") def test_quota_get_details(self, mock_get): fake_quota = mock.Mock(project_id='PROJECT') mock_get.return_value = fake_quota self._verify( "openstack.proxy.Proxy._get", self.proxy.get_quota, method_args=['QUOTA_ID'], method_kwargs={'details': True}, expected_args=[quota.QuotaDetails], expected_kwargs={'project': fake_quota.id, 'requires_id': False}, ) mock_get.assert_called_once_with(quota.Quota, 'QUOTA_ID') @mock.patch.object(proxy_base.Proxy, "_get_resource") def test_quota_default_get(self, mock_get): fake_quota = mock.Mock(project_id='PROJECT') mock_get.return_value = fake_quota self._verify( "openstack.proxy.Proxy._get", self.proxy.get_quota_default, method_args=['QUOTA_ID'], expected_args=[quota.QuotaDefault], expected_kwargs={'project': fake_quota.id, 'requires_id': False}, ) mock_get.assert_called_once_with(quota.Quota, 'QUOTA_ID') def test_quotas(self): self.verify_list(self.proxy.quotas, quota.Quota) def test_quota_update(self): self.verify_update(self.proxy.update_quota, quota.Quota) class TestNetworkRbacPolicy(TestNetworkProxy): def test_rbac_policy_create_attrs(self): self.verify_create( self.proxy.create_rbac_policy, rbac_policy.RBACPolicy ) def test_rbac_policy_delete(self): self.verify_delete( self.proxy.delete_rbac_policy, rbac_policy.RBACPolicy, False ) def test_rbac_policy_delete_ignore(self): self.verify_delete( self.proxy.delete_rbac_policy, rbac_policy.RBACPolicy, True ) def test_rbac_policy_find(self): self.verify_find(self.proxy.find_rbac_policy, rbac_policy.RBACPolicy) def test_rbac_policy_get(self): self.verify_get(self.proxy.get_rbac_policy, rbac_policy.RBACPolicy) def test_rbac_policies(self): self.verify_list(self.proxy.rbac_policies, rbac_policy.RBACPolicy) def test_rbac_policy_update(self): self.verify_update( self.proxy.update_rbac_policy, rbac_policy.RBACPolicy ) class TestNetworkRouter(TestNetworkProxy): def test_router_create_attrs(self): self.verify_create(self.proxy.create_router, router.Router) def test_router_delete(self): self.verify_delete( self.proxy.delete_router, router.Router, False, expected_kwargs={'if_revision': None}, ) def test_router_delete_ignore(self): self.verify_delete( self.proxy.delete_router, router.Router, True, expected_kwargs={'if_revision': None}, ) def test_router_delete_if_revision(self): self.verify_delete( self.proxy.delete_router, router.Router, True, method_kwargs={'if_revision': 42}, expected_kwargs={'if_revision': 42}, ) def test_router_find(self): self.verify_find(self.proxy.find_router, router.Router) def test_router_get(self): self.verify_get(self.proxy.get_router, router.Router) def test_routers(self): self.verify_list(self.proxy.routers, router.Router) def test_router_update(self): self.verify_update( self.proxy.update_router, router.Router, expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': None}, ) def test_router_update_if_revision(self): self.verify_update( self.proxy.update_router, router.Router, method_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, ) @mock.patch.object(proxy_base.Proxy, '_get_resource') @mock.patch.object(router.Router, 'add_interface') def test_add_interface_to_router_with_port( self, mock_add_interface, mock_get ): x_router = router.Router.new(id="ROUTER_ID") mock_get.return_value = x_router self._verify( "openstack.network.v2.router.Router.add_interface", self.proxy.add_interface_to_router, method_args=["FAKE_ROUTER"], method_kwargs={"port_id": "PORT"}, expected_args=[self.proxy], expected_kwargs={"port_id": "PORT"}, ) mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") @mock.patch.object(proxy_base.Proxy, '_get_resource') @mock.patch.object(router.Router, 'add_interface') def test_add_interface_to_router_with_subnet( self, mock_add_interface, mock_get ): x_router = router.Router.new(id="ROUTER_ID") mock_get.return_value = x_router self._verify( "openstack.network.v2.router.Router.add_interface", self.proxy.add_interface_to_router, method_args=["FAKE_ROUTER"], method_kwargs={"subnet_id": "SUBNET"}, expected_args=[self.proxy], expected_kwargs={"subnet_id": "SUBNET"}, ) mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") @mock.patch.object(proxy_base.Proxy, '_get_resource') @mock.patch.object(router.Router, 'remove_interface') def test_remove_interface_from_router_with_port( self, mock_remove, mock_get ): x_router = router.Router.new(id="ROUTER_ID") mock_get.return_value = x_router self._verify( "openstack.network.v2.router.Router.remove_interface", self.proxy.remove_interface_from_router, method_args=["FAKE_ROUTER"], method_kwargs={"port_id": "PORT"}, expected_args=[self.proxy], expected_kwargs={"port_id": "PORT"}, ) mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") @mock.patch.object(proxy_base.Proxy, '_get_resource') @mock.patch.object(router.Router, 'remove_interface') def test_remove_interface_from_router_with_subnet( self, mock_remove, mock_get ): x_router = router.Router.new(id="ROUTER_ID") mock_get.return_value = x_router self._verify( "openstack.network.v2.router.Router.remove_interface", self.proxy.remove_interface_from_router, method_args=["FAKE_ROUTER"], method_kwargs={"subnet_id": "SUBNET"}, expected_args=[self.proxy], expected_kwargs={"subnet_id": "SUBNET"}, ) mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") @mock.patch.object(proxy_base.Proxy, '_get_resource') @mock.patch.object(router.Router, 'add_extra_routes') def test_add_extra_routes_to_router(self, mock_add_extra_routes, mock_get): x_router = router.Router.new(id="ROUTER_ID") mock_get.return_value = x_router self._verify( "openstack.network.v2.router.Router.add_extra_routes", self.proxy.add_extra_routes_to_router, method_args=["FAKE_ROUTER"], method_kwargs={"body": {"router": {"routes": []}}}, expected_args=[self.proxy], expected_kwargs={"body": {"router": {"routes": []}}}, ) mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") @mock.patch.object(proxy_base.Proxy, '_get_resource') @mock.patch.object(router.Router, 'remove_extra_routes') def test_remove_extra_routes_from_router( self, mock_remove_extra_routes, mock_get ): x_router = router.Router.new(id="ROUTER_ID") mock_get.return_value = x_router self._verify( "openstack.network.v2.router.Router.remove_extra_routes", self.proxy.remove_extra_routes_from_router, method_args=["FAKE_ROUTER"], method_kwargs={"body": {"router": {"routes": []}}}, expected_args=[self.proxy], expected_kwargs={"body": {"router": {"routes": []}}}, ) mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") @mock.patch.object(proxy_base.Proxy, '_get_resource') @mock.patch.object(router.Router, 'add_gateway') def test_add_gateway_to_router(self, mock_add, mock_get): x_router = router.Router.new(id="ROUTER_ID") mock_get.return_value = x_router self._verify( "openstack.network.v2.router.Router.add_gateway", self.proxy.add_gateway_to_router, method_args=["FAKE_ROUTER"], method_kwargs={"foo": "bar"}, expected_args=[self.proxy], expected_kwargs={"foo": "bar"}, ) mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") @mock.patch.object(proxy_base.Proxy, '_get_resource') @mock.patch.object(router.Router, 'remove_gateway') def test_remove_gateway_from_router(self, mock_remove, mock_get): x_router = router.Router.new(id="ROUTER_ID") mock_get.return_value = x_router self._verify( "openstack.network.v2.router.Router.remove_gateway", self.proxy.remove_gateway_from_router, method_args=["FAKE_ROUTER"], method_kwargs={"foo": "bar"}, expected_args=[self.proxy], expected_kwargs={"foo": "bar"}, ) mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") @mock.patch.object(proxy_base.Proxy, '_get_resource') @mock.patch.object(router.Router, 'add_external_gateways') def test_add_external_gateways(self, mock_add, mock_get): x_router = router.Router.new(id="ROUTER_ID") mock_get.return_value = x_router self._verify( "openstack.network.v2.router.Router.add_external_gateways", self.proxy.add_external_gateways, method_args=["FAKE_ROUTER", "bar"], expected_args=[self.proxy, "bar"], ) mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") @mock.patch.object(proxy_base.Proxy, '_get_resource') @mock.patch.object(router.Router, 'update_external_gateways') def test_update_external_gateways(self, mock_remove, mock_get): x_router = router.Router.new(id="ROUTER_ID") mock_get.return_value = x_router self._verify( "openstack.network.v2.router.Router.update_external_gateways", self.proxy.update_external_gateways, method_args=["FAKE_ROUTER", "bar"], expected_args=[self.proxy, "bar"], ) mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") @mock.patch.object(proxy_base.Proxy, '_get_resource') @mock.patch.object(router.Router, 'remove_external_gateways') def test_remove_external_gateways(self, mock_remove, mock_get): x_router = router.Router.new(id="ROUTER_ID") mock_get.return_value = x_router self._verify( "openstack.network.v2.router.Router.remove_external_gateways", self.proxy.remove_external_gateways, method_args=["FAKE_ROUTER", "bar"], expected_args=[self.proxy, "bar"], ) mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") def test_router_hosting_l3_agents_list(self): self.verify_list( self.proxy.routers_hosting_l3_agents, agent.RouterL3Agent, method_kwargs={'router': ROUTER_ID}, expected_kwargs={'router_id': ROUTER_ID}, ) def test_agent_hosted_routers_list(self): self.verify_list( self.proxy.agent_hosted_routers, router.L3AgentRouter, method_kwargs={'agent': AGENT_ID}, expected_kwargs={'agent_id': AGENT_ID}, ) class TestNetworkFirewallGroup(TestNetworkProxy): def test_firewall_group_create_attrs(self): self.verify_create( self.proxy.create_firewall_group, firewall_group.FirewallGroup ) def test_firewall_group_delete(self): self.verify_delete( self.proxy.delete_firewall_group, firewall_group.FirewallGroup, False, ) def test_firewall_group_delete_ignore(self): self.verify_delete( self.proxy.delete_firewall_group, firewall_group.FirewallGroup, True, ) def test_firewall_group_find(self): self.verify_find( self.proxy.find_firewall_group, firewall_group.FirewallGroup ) def test_firewall_group_get(self): self.verify_get( self.proxy.get_firewall_group, firewall_group.FirewallGroup ) def test_firewall_groups(self): self.verify_list( self.proxy.firewall_groups, firewall_group.FirewallGroup ) def test_firewall_group_update(self): self.verify_update( self.proxy.update_firewall_group, firewall_group.FirewallGroup ) class TestNetworkPolicy(TestNetworkProxy): def test_firewall_policy_create_attrs(self): self.verify_create( self.proxy.create_firewall_policy, firewall_policy.FirewallPolicy ) def test_firewall_policy_delete(self): self.verify_delete( self.proxy.delete_firewall_policy, firewall_policy.FirewallPolicy, False, ) def test_firewall_policy_delete_ignore(self): self.verify_delete( self.proxy.delete_firewall_policy, firewall_policy.FirewallPolicy, True, ) def test_firewall_policy_find(self): self.verify_find( self.proxy.find_firewall_policy, firewall_policy.FirewallPolicy ) def test_firewall_policy_get(self): self.verify_get( self.proxy.get_firewall_policy, firewall_policy.FirewallPolicy ) def test_firewall_policies(self): self.verify_list( self.proxy.firewall_policies, firewall_policy.FirewallPolicy ) def test_firewall_policy_update(self): self.verify_update( self.proxy.update_firewall_policy, firewall_policy.FirewallPolicy ) class TestNetworkRule(TestNetworkProxy): def test_firewall_rule_create_attrs(self): self.verify_create( self.proxy.create_firewall_rule, firewall_rule.FirewallRule ) def test_firewall_rule_delete(self): self.verify_delete( self.proxy.delete_firewall_rule, firewall_rule.FirewallRule, False ) def test_firewall_rule_delete_ignore(self): self.verify_delete( self.proxy.delete_firewall_rule, firewall_rule.FirewallRule, True ) def test_firewall_rule_find(self): self.verify_find( self.proxy.find_firewall_rule, firewall_rule.FirewallRule ) def test_firewall_rule_get(self): self.verify_get( self.proxy.get_firewall_rule, firewall_rule.FirewallRule ) def test_firewall_rules(self): self.verify_list(self.proxy.firewall_rules, firewall_rule.FirewallRule) def test_firewall_rule_update(self): self.verify_update( self.proxy.update_firewall_rule, firewall_rule.FirewallRule ) class TestNetworkNetworkSegment(TestNetworkProxy): def test_network_segment_range_create_attrs(self): self.verify_create( self.proxy.create_network_segment_range, network_segment_range.NetworkSegmentRange, ) def test_network_segment_range_delete(self): self.verify_delete( self.proxy.delete_network_segment_range, network_segment_range.NetworkSegmentRange, False, ) def test_network_segment_range_delete_ignore(self): self.verify_delete( self.proxy.delete_network_segment_range, network_segment_range.NetworkSegmentRange, True, ) def test_network_segment_range_find(self): self.verify_find( self.proxy.find_network_segment_range, network_segment_range.NetworkSegmentRange, ) def test_network_segment_range_get(self): self.verify_get( self.proxy.get_network_segment_range, network_segment_range.NetworkSegmentRange, ) def test_network_segment_ranges(self): self.verify_list( self.proxy.network_segment_ranges, network_segment_range.NetworkSegmentRange, ) def test_network_segment_range_update(self): self.verify_update( self.proxy.update_network_segment_range, network_segment_range.NetworkSegmentRange, ) class TestNetworkSecurityGroup(TestNetworkProxy): def test_security_group_create_attrs(self): self.verify_create( self.proxy.create_security_group, security_group.SecurityGroup ) def test_security_group_delete(self): self.verify_delete( self.proxy.delete_security_group, security_group.SecurityGroup, False, expected_kwargs={'if_revision': None}, ) def test_security_group_delete_ignore(self): self.verify_delete( self.proxy.delete_security_group, security_group.SecurityGroup, True, expected_kwargs={'if_revision': None}, ) def test_security_group_delete_if_revision(self): self.verify_delete( self.proxy.delete_security_group, security_group.SecurityGroup, True, method_kwargs={'if_revision': 42}, expected_kwargs={'if_revision': 42}, ) def test_security_group_find(self): self.verify_find( self.proxy.find_security_group, security_group.SecurityGroup ) def test_security_group_get(self): self.verify_get( self.proxy.get_security_group, security_group.SecurityGroup ) def test_security_groups(self): self.verify_list( self.proxy.security_groups, security_group.SecurityGroup ) def test_security_group_update(self): self.verify_update( self.proxy.update_security_group, security_group.SecurityGroup, expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': None}, ) def test_security_group_update_if_revision(self): self.verify_update( self.proxy.update_security_group, security_group.SecurityGroup, method_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, ) def test_security_group_rule_create_attrs(self): self.verify_create( self.proxy.create_security_group_rule, security_group_rule.SecurityGroupRule, ) def test_security_group_rule_delete(self): self.verify_delete( self.proxy.delete_security_group_rule, security_group_rule.SecurityGroupRule, False, expected_kwargs={'if_revision': None}, ) def test_security_group_rule_delete_ignore(self): self.verify_delete( self.proxy.delete_security_group_rule, security_group_rule.SecurityGroupRule, True, expected_kwargs={'if_revision': None}, ) def test_security_group_rule_delete_if_revision(self): self.verify_delete( self.proxy.delete_security_group_rule, security_group_rule.SecurityGroupRule, True, method_kwargs={'if_revision': 42}, expected_kwargs={'if_revision': 42}, ) def test_security_group_rule_find(self): self.verify_find( self.proxy.find_security_group_rule, security_group_rule.SecurityGroupRule, ) def test_security_group_rule_get(self): self.verify_get( self.proxy.get_security_group_rule, security_group_rule.SecurityGroupRule, ) def test_security_group_rules(self): self.verify_list( self.proxy.security_group_rules, security_group_rule.SecurityGroupRule, ) @mock.patch('openstack.network.v2._proxy.Proxy._bulk_create') def test_security_group_rules_create(self, bc): data = mock.sentinel self.proxy.create_security_group_rules(data) bc.assert_called_once_with(security_group_rule.SecurityGroupRule, data) class TestNetworkSegment(TestNetworkProxy): def test_segment_create_attrs(self): self.verify_create(self.proxy.create_segment, segment.Segment) def test_segment_delete(self): self.verify_delete(self.proxy.delete_segment, segment.Segment, False) def test_segment_delete_ignore(self): self.verify_delete(self.proxy.delete_segment, segment.Segment, True) def test_segment_find(self): self.verify_find(self.proxy.find_segment, segment.Segment) def test_segment_get(self): self.verify_get(self.proxy.get_segment, segment.Segment) def test_segments(self): self.verify_list(self.proxy.segments, segment.Segment) def test_segment_update(self): self.verify_update(self.proxy.update_segment, segment.Segment) class TestNetworkSubnet(TestNetworkProxy): def test_subnet_create_attrs(self): self.verify_create(self.proxy.create_subnet, subnet.Subnet) def test_subnet_delete(self): self.verify_delete( self.proxy.delete_subnet, subnet.Subnet, False, expected_kwargs={'if_revision': None}, ) def test_subnet_delete_ignore(self): self.verify_delete( self.proxy.delete_subnet, subnet.Subnet, True, expected_kwargs={'if_revision': None}, ) def test_subnet_delete_if_revision(self): self.verify_delete( self.proxy.delete_subnet, subnet.Subnet, True, method_kwargs={'if_revision': 42}, expected_kwargs={'if_revision': 42}, ) def test_subnet_find(self): self.verify_find(self.proxy.find_subnet, subnet.Subnet) def test_subnet_get(self): self.verify_get(self.proxy.get_subnet, subnet.Subnet) def test_subnets(self): self.verify_list(self.proxy.subnets, subnet.Subnet) def test_subnet_update(self): self.verify_update( self.proxy.update_subnet, subnet.Subnet, expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': None}, ) def test_subnet_pool_create_attrs(self): self.verify_create( self.proxy.create_subnet_pool, subnet_pool.SubnetPool ) def test_subnet_pool_delete(self): self.verify_delete( self.proxy.delete_subnet_pool, subnet_pool.SubnetPool, False ) def test_subnet_pool_delete_ignore(self): self.verify_delete( self.proxy.delete_subnet_pool, subnet_pool.SubnetPool, True ) def test_subnet_pool_find(self): self.verify_find(self.proxy.find_subnet_pool, subnet_pool.SubnetPool) def test_subnet_pool_get(self): self.verify_get(self.proxy.get_subnet_pool, subnet_pool.SubnetPool) def test_subnet_pools(self): self.verify_list(self.proxy.subnet_pools, subnet_pool.SubnetPool) def test_subnet_pool_update(self): self.verify_update( self.proxy.update_subnet_pool, subnet_pool.SubnetPool ) class TestNetworkVpnEndpointGroup(TestNetworkProxy): def test_vpn_endpoint_group_create_attrs(self): self.verify_create( self.proxy.create_vpn_endpoint_group, vpn_endpoint_group.VpnEndpointGroup, ) def test_vpn_endpoint_group_delete(self): self.verify_delete( self.proxy.delete_vpn_endpoint_group, vpn_endpoint_group.VpnEndpointGroup, False, ) def test_vpn_endpoint_group_delete_ignore(self): self.verify_delete( self.proxy.delete_vpn_endpoint_group, vpn_endpoint_group.VpnEndpointGroup, True, ) def test_vpn_endpoint_group_find(self): self.verify_find( self.proxy.find_vpn_endpoint_group, vpn_endpoint_group.VpnEndpointGroup, ) def test_vpn_endpoint_group_get(self): self.verify_get( self.proxy.get_vpn_endpoint_group, vpn_endpoint_group.VpnEndpointGroup, ) def test_vpn_endpoint_groups(self): self.verify_list( self.proxy.vpn_endpoint_groups, vpn_endpoint_group.VpnEndpointGroup ) def test_vpn_endpoint_group_update(self): self.verify_update( self.proxy.update_vpn_endpoint_group, vpn_endpoint_group.VpnEndpointGroup, ) class TestNetworkVpnSiteConnection(TestNetworkProxy): def test_ipsec_site_connection_create_attrs(self): self.verify_create( self.proxy.create_vpn_ipsec_site_connection, vpn_ipsec_site_connection.VpnIPSecSiteConnection, ) def test_ipsec_site_connection_delete(self): self.verify_delete( self.proxy.delete_vpn_ipsec_site_connection, vpn_ipsec_site_connection.VpnIPSecSiteConnection, False, ) def test_ipsec_site_connection_delete_ignore(self): self.verify_delete( self.proxy.delete_vpn_ipsec_site_connection, vpn_ipsec_site_connection.VpnIPSecSiteConnection, True, ) def test_ipsec_site_connection_find(self): self.verify_find( self.proxy.find_vpn_ipsec_site_connection, vpn_ipsec_site_connection.VpnIPSecSiteConnection, ) def test_ipsec_site_connection_get(self): self.verify_get( self.proxy.get_vpn_ipsec_site_connection, vpn_ipsec_site_connection.VpnIPSecSiteConnection, ) def test_ipsec_site_connections(self): self.verify_list( self.proxy.vpn_ipsec_site_connections, vpn_ipsec_site_connection.VpnIPSecSiteConnection, ) def test_ipsec_site_connection_update(self): self.verify_update( self.proxy.update_vpn_ipsec_site_connection, vpn_ipsec_site_connection.VpnIPSecSiteConnection, ) class TestNetworkVpnIkePolicy(TestNetworkProxy): def test_ike_policy_create_attrs(self): self.verify_create( self.proxy.create_vpn_ike_policy, vpn_ike_policy.VpnIkePolicy ) def test_ike_policy_delete(self): self.verify_delete( self.proxy.delete_vpn_ike_policy, vpn_ike_policy.VpnIkePolicy, False, ) def test_ike_policy_delete_ignore(self): self.verify_delete( self.proxy.delete_vpn_ike_policy, vpn_ike_policy.VpnIkePolicy, True ) def test_ike_policy_find(self): self.verify_find( self.proxy.find_vpn_ike_policy, vpn_ike_policy.VpnIkePolicy ) def test_ike_policy_get(self): self.verify_get( self.proxy.get_vpn_ike_policy, vpn_ike_policy.VpnIkePolicy ) def test_ike_policies(self): self.verify_list( self.proxy.vpn_ike_policies, vpn_ike_policy.VpnIkePolicy ) def test_ike_policy_update(self): self.verify_update( self.proxy.update_vpn_ike_policy, vpn_ike_policy.VpnIkePolicy ) class TestNetworkVpnIpsecPolicy(TestNetworkProxy): def test_ipsec_policy_create_attrs(self): self.verify_create( self.proxy.create_vpn_ipsec_policy, vpn_ipsec_policy.VpnIpsecPolicy ) def test_ipsec_policy_delete(self): self.verify_delete( self.proxy.delete_vpn_ipsec_policy, vpn_ipsec_policy.VpnIpsecPolicy, False, ) def test_ipsec_policy_delete_ignore(self): self.verify_delete( self.proxy.delete_vpn_ipsec_policy, vpn_ipsec_policy.VpnIpsecPolicy, True, ) def test_ipsec_policy_find(self): self.verify_find( self.proxy.find_vpn_ipsec_policy, vpn_ipsec_policy.VpnIpsecPolicy ) def test_ipsec_policy_get(self): self.verify_get( self.proxy.get_vpn_ipsec_policy, vpn_ipsec_policy.VpnIpsecPolicy ) def test_ipsec_policies(self): self.verify_list( self.proxy.vpn_ipsec_policies, vpn_ipsec_policy.VpnIpsecPolicy ) def test_ipsec_policy_update(self): self.verify_update( self.proxy.update_vpn_ipsec_policy, vpn_ipsec_policy.VpnIpsecPolicy ) class TestNetworkVpnService(TestNetworkProxy): def test_vpn_service_create_attrs(self): self.verify_create( self.proxy.create_vpn_service, vpn_service.VpnService ) def test_vpn_service_delete(self): self.verify_delete( self.proxy.delete_vpn_service, vpn_service.VpnService, False ) def test_vpn_service_delete_ignore(self): self.verify_delete( self.proxy.delete_vpn_service, vpn_service.VpnService, True ) def test_vpn_service_find(self): self.verify_find(self.proxy.find_vpn_service, vpn_service.VpnService) def test_vpn_service_get(self): self.verify_get(self.proxy.get_vpn_service, vpn_service.VpnService) def test_vpn_services(self): self.verify_list(self.proxy.vpn_services, vpn_service.VpnService) def test_vpn_service_update(self): self.verify_update( self.proxy.update_vpn_service, vpn_service.VpnService ) class TestNetworkServiceProvider(TestNetworkProxy): def test_service_provider(self): self.verify_list( self.proxy.service_providers, service_provider.ServiceProvider ) class TestNetworkAutoAllocatedTopology(TestNetworkProxy): def test_auto_allocated_topology_get(self): self.verify_get( self.proxy.get_auto_allocated_topology, auto_allocated_topology.AutoAllocatedTopology, ) def test_auto_allocated_topology_delete(self): self.verify_delete( self.proxy.delete_auto_allocated_topology, auto_allocated_topology.AutoAllocatedTopology, False, ) def test_auto_allocated_topology_delete_ignore(self): self.verify_delete( self.proxy.delete_auto_allocated_topology, auto_allocated_topology.AutoAllocatedTopology, True, ) def test_validate_topology(self): self.verify_get( self.proxy.validate_auto_allocated_topology, auto_allocated_topology.ValidateTopology, method_args=[mock.sentinel.project_id], expected_args=[], expected_kwargs={ "project": mock.sentinel.project_id, "requires_id": False, }, ) class TestNetworkTags(TestNetworkProxy): def test_set_tags(self): x_network = network.Network.new(id='NETWORK_ID') self._verify( 'openstack.network.v2.network.Network.set_tags', self.proxy.set_tags, method_args=[x_network, ['TAG1', 'TAG2']], expected_args=[self.proxy, ['TAG1', 'TAG2']], expected_result=mock.sentinel.result_set_tags, ) @mock.patch('openstack.network.v2.network.Network.set_tags') def test_set_tags_resource_without_tag_suport(self, mock_set_tags): no_tag_resource = object() self.assertRaises( exceptions.InvalidRequest, self.proxy.set_tags, no_tag_resource, ['TAG1', 'TAG2'], ) self.assertEqual(0, mock_set_tags.call_count) class TestNetworkFloatingIp(TestNetworkProxy): def test_create_floating_ip_port_forwarding(self): self.verify_create( self.proxy.create_floating_ip_port_forwarding, port_forwarding.PortForwarding, method_kwargs={'floating_ip': FIP_ID}, expected_kwargs={'floatingip_id': FIP_ID}, ) def test_delete_floating_ip_port_forwarding(self): self.verify_delete( self.proxy.delete_floating_ip_port_forwarding, port_forwarding.PortForwarding, ignore_missing=False, method_args=[FIP_ID, "resource_or_id"], expected_args=["resource_or_id"], expected_kwargs={'floatingip_id': FIP_ID}, ) def test_delete_floating_ip_port_forwarding_ignore(self): self.verify_delete( self.proxy.delete_floating_ip_port_forwarding, port_forwarding.PortForwarding, ignore_missing=True, method_args=[FIP_ID, "resource_or_id"], expected_args=["resource_or_id"], expected_kwargs={'floatingip_id': FIP_ID}, ) def test_find_floating_ip_port_forwarding(self): fip = floating_ip.FloatingIP.new(id=FIP_ID) self._verify( 'openstack.proxy.Proxy._find', self.proxy.find_floating_ip_port_forwarding, method_args=[fip, 'port_forwarding_id'], expected_args=[ port_forwarding.PortForwarding, 'port_forwarding_id', ], expected_kwargs={'ignore_missing': True, 'floatingip_id': FIP_ID}, ) def test_get_floating_ip_port_forwarding(self): fip = floating_ip.FloatingIP.new(id=FIP_ID) self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_floating_ip_port_forwarding, method_args=[fip, 'port_forwarding_id'], expected_args=[ port_forwarding.PortForwarding, 'port_forwarding_id', ], expected_kwargs={'floatingip_id': FIP_ID}, ) def test_floating_ip_port_forwardings(self): self.verify_list( self.proxy.floating_ip_port_forwardings, port_forwarding.PortForwarding, method_kwargs={'floating_ip': FIP_ID}, expected_kwargs={'floatingip_id': FIP_ID}, ) def test_update_floating_ip_port_forwarding(self): fip = floating_ip.FloatingIP.new(id=FIP_ID) self._verify( 'openstack.network.v2._proxy.Proxy._update', self.proxy.update_floating_ip_port_forwarding, method_args=[fip, 'port_forwarding_id'], method_kwargs={'foo': 'bar'}, expected_args=[ port_forwarding.PortForwarding, 'port_forwarding_id', ], expected_kwargs={'floatingip_id': FIP_ID, 'foo': 'bar'}, ) def test_create_l3_conntrack_helper(self): self.verify_create( self.proxy.create_conntrack_helper, l3_conntrack_helper.ConntrackHelper, method_kwargs={'router': ROUTER_ID}, expected_kwargs={'router_id': ROUTER_ID}, ) def test_delete_l3_conntrack_helper(self): r = router.Router.new(id=ROUTER_ID) self.verify_delete( self.proxy.delete_conntrack_helper, l3_conntrack_helper.ConntrackHelper, ignore_missing=False, method_args=['resource_or_id', r], expected_args=['resource_or_id'], expected_kwargs={'router_id': ROUTER_ID}, ) def test_delete_l3_conntrack_helper_ignore(self): r = router.Router.new(id=ROUTER_ID) self.verify_delete( self.proxy.delete_conntrack_helper, l3_conntrack_helper.ConntrackHelper, ignore_missing=True, method_args=['resource_or_id', r], expected_args=['resource_or_id'], expected_kwargs={'router_id': ROUTER_ID}, ) def test_get_l3_conntrack_helper(self): r = router.Router.new(id=ROUTER_ID) self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_conntrack_helper, method_args=['conntrack_helper_id', r], expected_args=[ l3_conntrack_helper.ConntrackHelper, 'conntrack_helper_id', ], expected_kwargs={'router_id': ROUTER_ID}, ) def test_l3_conntrack_helpers(self): self.verify_list( self.proxy.conntrack_helpers, l3_conntrack_helper.ConntrackHelper, method_args=[ROUTER_ID], expected_args=[], expected_kwargs={'router_id': ROUTER_ID}, ) def test_update_l3_conntrack_helper(self): r = router.Router.new(id=ROUTER_ID) self._verify( 'openstack.network.v2._proxy.Proxy._update', self.proxy.update_conntrack_helper, method_args=['conntrack_helper_id', r], method_kwargs={'foo': 'bar'}, expected_args=[ l3_conntrack_helper.ConntrackHelper, 'conntrack_helper_id', ], expected_kwargs={'router_id': ROUTER_ID, 'foo': 'bar'}, ) class TestNetworkNDPProxy(TestNetworkProxy): def test_ndp_proxy_create_attrs(self): self.verify_create(self.proxy.create_ndp_proxy, ndp_proxy.NDPProxy) def test_ndp_proxy_delete(self): self.verify_delete( self.proxy.delete_ndp_proxy, ndp_proxy.NDPProxy, False ) def test_ndp_proxy_delete_ignore(self): self.verify_delete( self.proxy.delete_ndp_proxy, ndp_proxy.NDPProxy, True ) def test_ndp_proxy_find(self): self.verify_find(self.proxy.find_ndp_proxy, ndp_proxy.NDPProxy) def test_ndp_proxy_get(self): self.verify_get(self.proxy.get_ndp_proxy, ndp_proxy.NDPProxy) def test_ndp_proxies(self): self.verify_list(self.proxy.ndp_proxies, ndp_proxy.NDPProxy) def test_ndp_proxy_update(self): self.verify_update(self.proxy.update_ndp_proxy, ndp_proxy.NDPProxy) class TestNetworkBGP(TestNetworkProxy): def test_bgp_speaker_create(self): self.verify_create( self.proxy.create_bgp_speaker, bgp_speaker.BgpSpeaker ) def test_bgp_speaker_delete(self): self.verify_delete( self.proxy.delete_bgp_speaker, bgp_speaker.BgpSpeaker, False ) def test_bgp_speaker_delete_ignore(self): self.verify_delete( self.proxy.delete_bgp_speaker, bgp_speaker.BgpSpeaker, True ) def test_bgp_speaker_find(self): self.verify_find(self.proxy.find_bgp_speaker, bgp_speaker.BgpSpeaker) def test_bgp_speaker_get(self): self.verify_get(self.proxy.get_bgp_speaker, bgp_speaker.BgpSpeaker) def test_bgp_speakers(self): self.verify_list(self.proxy.bgp_speakers, bgp_speaker.BgpSpeaker) def test_bgp_speaker_update(self): self.verify_update( self.proxy.update_bgp_speaker, bgp_speaker.BgpSpeaker ) def test_bgp_peer_create(self): self.verify_create(self.proxy.create_bgp_peer, bgp_peer.BgpPeer) def test_bgp_peer_delete(self): self.verify_delete(self.proxy.delete_bgp_peer, bgp_peer.BgpPeer, False) def test_bgp_peer_delete_ignore(self): self.verify_delete(self.proxy.delete_bgp_peer, bgp_peer.BgpPeer, True) def test_bgp_peer_find(self): self.verify_find(self.proxy.find_bgp_peer, bgp_peer.BgpPeer) def test_bgp_peer_get(self): self.verify_get(self.proxy.get_bgp_peer, bgp_peer.BgpPeer) def test_bgp_peers(self): self.verify_list(self.proxy.bgp_peers, bgp_peer.BgpPeer) def test_bgp_peer_update(self): self.verify_update(self.proxy.update_bgp_peer, bgp_peer.BgpPeer) class TestNetworkBGPVPN(TestNetworkProxy): NETWORK_ASSOCIATION = 'net-assoc-id' + uuid.uuid4().hex PORT_ASSOCIATION = 'port-assoc-id' + uuid.uuid4().hex ROUTER_ASSOCIATION = 'router-assoc-id' + uuid.uuid4().hex def test_bgpvpn_create(self): self.verify_create(self.proxy.create_bgpvpn, bgpvpn.BgpVpn) def test_bgpvpn_delete(self): self.verify_delete(self.proxy.delete_bgpvpn, bgpvpn.BgpVpn, False) def test_bgpvpn_delete_ignore(self): self.verify_delete(self.proxy.delete_bgpvpn, bgpvpn.BgpVpn, True) def test_bgpvpn_find(self): self.verify_find(self.proxy.find_bgpvpn, bgpvpn.BgpVpn) def test_bgpvpn_get(self): self.verify_get(self.proxy.get_bgpvpn, bgpvpn.BgpVpn) def test_bgpvpns(self): self.verify_list(self.proxy.bgpvpns, bgpvpn.BgpVpn) def test_bgpvpn_update(self): self.verify_update(self.proxy.update_bgpvpn, bgpvpn.BgpVpn) def test_bgpvpn_network_association_create(self): self.verify_create( self.proxy.create_bgpvpn_network_association, bgpvpn_network_association.BgpVpnNetworkAssociation, method_kwargs={'bgpvpn': BGPVPN_ID}, expected_kwargs={'bgpvpn_id': BGPVPN_ID}, ) def test_bgpvpn_network_association_delete(self): self.verify_delete( self.proxy.delete_bgpvpn_network_association, bgpvpn_network_association.BgpVpnNetworkAssociation, False, method_args=[BGPVPN_ID, self.NETWORK_ASSOCIATION], expected_args=[self.NETWORK_ASSOCIATION], expected_kwargs={'ignore_missing': False, 'bgpvpn_id': BGPVPN_ID}, ) def test_bgpvpn_network_association_delete_ignore(self): self.verify_delete( self.proxy.delete_bgpvpn_network_association, bgpvpn_network_association.BgpVpnNetworkAssociation, True, method_args=[BGPVPN_ID, self.NETWORK_ASSOCIATION], expected_args=[self.NETWORK_ASSOCIATION], expected_kwargs={'ignore_missing': True, 'bgpvpn_id': BGPVPN_ID}, ) def test_bgpvpn_network_association_get(self): self.verify_get( self.proxy.get_bgpvpn_network_association, bgpvpn_network_association.BgpVpnNetworkAssociation, method_args=[BGPVPN_ID, self.NETWORK_ASSOCIATION], expected_args=[self.NETWORK_ASSOCIATION], expected_kwargs={'bgpvpn_id': BGPVPN_ID}, ) def test_bgpvpn_network_associations(self): self.verify_list( self.proxy.bgpvpn_network_associations, bgpvpn_network_association.BgpVpnNetworkAssociation, method_args=[ BGPVPN_ID, ], expected_args=[], expected_kwargs={'bgpvpn_id': BGPVPN_ID}, ) def test_bgpvpn_port_association_create(self): self.verify_create( self.proxy.create_bgpvpn_port_association, bgpvpn_port_association.BgpVpnPortAssociation, method_kwargs={'bgpvpn': BGPVPN_ID}, expected_kwargs={'bgpvpn_id': BGPVPN_ID}, ) def test_bgpvpn_port_association_delete(self): self.verify_delete( self.proxy.delete_bgpvpn_port_association, bgpvpn_port_association.BgpVpnPortAssociation, False, method_args=[BGPVPN_ID, self.PORT_ASSOCIATION], expected_args=[self.PORT_ASSOCIATION], expected_kwargs={'ignore_missing': False, 'bgpvpn_id': BGPVPN_ID}, ) def test_bgpvpn_port_association_delete_ignore(self): self.verify_delete( self.proxy.delete_bgpvpn_port_association, bgpvpn_port_association.BgpVpnPortAssociation, True, method_args=[BGPVPN_ID, self.PORT_ASSOCIATION], expected_args=[self.PORT_ASSOCIATION], expected_kwargs={'ignore_missing': True, 'bgpvpn_id': BGPVPN_ID}, ) def test_bgpvpn_port_association_find(self): self.verify_find( self.proxy.find_bgpvpn_port_association, bgpvpn_port_association.BgpVpnPortAssociation, method_args=[BGPVPN_ID], expected_args=['resource_name'], method_kwargs={'ignore_missing': True}, expected_kwargs={'ignore_missing': True, 'bgpvpn_id': BGPVPN_ID}, ) def test_bgpvpn_port_association_get(self): self.verify_get( self.proxy.get_bgpvpn_port_association, bgpvpn_port_association.BgpVpnPortAssociation, method_args=[BGPVPN_ID, self.PORT_ASSOCIATION], expected_args=[self.PORT_ASSOCIATION], expected_kwargs={'bgpvpn_id': BGPVPN_ID}, ) def test_bgpvpn_port_associations(self): self.verify_list( self.proxy.bgpvpn_port_associations, bgpvpn_port_association.BgpVpnPortAssociation, method_args=[ BGPVPN_ID, ], expected_args=[], expected_kwargs={'bgpvpn_id': BGPVPN_ID}, ) def test_bgpvpn_port_association_update(self): self.verify_update( self.proxy.update_bgpvpn_port_association, bgpvpn_port_association.BgpVpnPortAssociation, method_args=[BGPVPN_ID, self.PORT_ASSOCIATION], method_kwargs={}, expected_args=[self.PORT_ASSOCIATION], expected_kwargs={'bgpvpn_id': BGPVPN_ID}, ) def test_bgpvpn_router_association_create(self): self.verify_create( self.proxy.create_bgpvpn_router_association, bgpvpn_router_association.BgpVpnRouterAssociation, method_kwargs={'bgpvpn': BGPVPN_ID}, expected_kwargs={'bgpvpn_id': BGPVPN_ID}, ) def test_bgpvpn_router_association_delete(self): self.verify_delete( self.proxy.delete_bgpvpn_router_association, bgpvpn_router_association.BgpVpnRouterAssociation, False, method_args=[BGPVPN_ID, self.ROUTER_ASSOCIATION], expected_args=[self.ROUTER_ASSOCIATION], expected_kwargs={'ignore_missing': False, 'bgpvpn_id': BGPVPN_ID}, ) def test_bgpvpn_router_association_delete_ignore(self): self.verify_delete( self.proxy.delete_bgpvpn_router_association, bgpvpn_router_association.BgpVpnRouterAssociation, True, method_args=[BGPVPN_ID, self.ROUTER_ASSOCIATION], expected_args=[self.ROUTER_ASSOCIATION], expected_kwargs={'ignore_missing': True, 'bgpvpn_id': BGPVPN_ID}, ) def test_bgpvpn_router_association_get(self): self.verify_get( self.proxy.get_bgpvpn_router_association, bgpvpn_router_association.BgpVpnRouterAssociation, method_args=[BGPVPN_ID, self.ROUTER_ASSOCIATION], expected_args=[self.ROUTER_ASSOCIATION], expected_kwargs={'bgpvpn_id': BGPVPN_ID}, ) def test_bgpvpn_router_associations(self): self.verify_list( self.proxy.bgpvpn_router_associations, bgpvpn_router_association.BgpVpnRouterAssociation, method_args=[ BGPVPN_ID, ], expected_args=[], expected_kwargs={'bgpvpn_id': BGPVPN_ID}, ) def test_bgpvpn_router_association_update(self): self.verify_update( self.proxy.update_bgpvpn_router_association, bgpvpn_router_association.BgpVpnRouterAssociation, method_args=[BGPVPN_ID, self.ROUTER_ASSOCIATION], method_kwargs={}, expected_args=[self.ROUTER_ASSOCIATION], expected_kwargs={'bgpvpn_id': BGPVPN_ID}, ) class TestNetworkTapMirror(TestNetworkProxy): def test_create_tap_mirror(self): self.verify_create(self.proxy.create_tap_mirror, tap_mirror.TapMirror) def test_delete_tap_mirror(self): self.verify_delete( self.proxy.delete_tap_mirror, tap_mirror.TapMirror, False ) def test_delete_tap_mirror_ignore(self): self.verify_delete( self.proxy.delete_tap_mirror, tap_mirror.TapMirror, True ) def test_find_tap_mirror(self): self.verify_find(self.proxy.find_tap_mirror, tap_mirror.TapMirror) def test_get_tap_mirror(self): self.verify_get(self.proxy.get_tap_mirror, tap_mirror.TapMirror) def test_tap_mirrors(self): self.verify_list(self.proxy.tap_mirrors, tap_mirror.TapMirror) def test_update_tap_mirror(self): self.verify_update(self.proxy.update_tap_mirror, tap_mirror.TapMirror) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_qos_bandwidth_limit_rule.py0000664000175000017500000000347600000000000031055 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.network.v2 import qos_bandwidth_limit_rule from openstack.tests.unit import base EXAMPLE = { 'id': 'IDENTIFIER', 'qos_policy_id': 'qos-policy-' + uuid.uuid4().hex, 'max_kbps': 1500, 'max_burst_kbps': 1200, 'direction': 'egress', } class TestQoSBandwidthLimitRule(base.TestCase): def test_basic(self): sot = qos_bandwidth_limit_rule.QoSBandwidthLimitRule() self.assertEqual('bandwidth_limit_rule', sot.resource_key) self.assertEqual('bandwidth_limit_rules', sot.resources_key) self.assertEqual( '/qos/policies/%(qos_policy_id)s/bandwidth_limit_rules', sot.base_path, ) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = qos_bandwidth_limit_rule.QoSBandwidthLimitRule(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['qos_policy_id'], sot.qos_policy_id) self.assertEqual(EXAMPLE['max_kbps'], sot.max_kbps) self.assertEqual(EXAMPLE['max_burst_kbps'], sot.max_burst_kbps) self.assertEqual(EXAMPLE['direction'], sot.direction) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_qos_dscp_marking_rule.py0000664000175000017500000000313200000000000030341 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.network.v2 import qos_dscp_marking_rule from openstack.tests.unit import base EXAMPLE = { 'id': 'IDENTIFIER', 'qos_policy_id': 'qos-policy-' + uuid.uuid4().hex, 'dscp_mark': 40, } class TestQoSDSCPMarkingRule(base.TestCase): def test_basic(self): sot = qos_dscp_marking_rule.QoSDSCPMarkingRule() self.assertEqual('dscp_marking_rule', sot.resource_key) self.assertEqual('dscp_marking_rules', sot.resources_key) self.assertEqual( '/qos/policies/%(qos_policy_id)s/dscp_marking_rules', sot.base_path ) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = qos_dscp_marking_rule.QoSDSCPMarkingRule(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['qos_policy_id'], sot.qos_policy_id) self.assertEqual(EXAMPLE['dscp_mark'], sot.dscp_mark) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_qos_minimum_bandwidth_rule.py0000664000175000017500000000335400000000000031405 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.network.v2 import qos_minimum_bandwidth_rule from openstack.tests.unit import base EXAMPLE = { 'id': 'IDENTIFIER', 'qos_policy_id': 'qos-policy-' + uuid.uuid4().hex, 'min_kbps': 1500, 'direction': 'egress', } class TestQoSMinimumBandwidthRule(base.TestCase): def test_basic(self): sot = qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule() self.assertEqual('minimum_bandwidth_rule', sot.resource_key) self.assertEqual('minimum_bandwidth_rules', sot.resources_key) self.assertEqual( '/qos/policies/%(qos_policy_id)s/minimum_bandwidth_rules', sot.base_path, ) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['qos_policy_id'], sot.qos_policy_id) self.assertEqual(EXAMPLE['min_kbps'], sot.min_kbps) self.assertEqual(EXAMPLE['direction'], sot.direction) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_qos_minimum_packet_rate_rule.py0000664000175000017500000000336300000000000031723 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid from openstack.network.v2 import qos_minimum_packet_rate_rule from openstack.tests.unit import base EXAMPLE = { 'id': 'IDENTIFIER', 'qos_policy_id': 'qos-policy-' + uuid.uuid4().hex, 'min_kpps': 1500, 'direction': 'any', } class TestQoSMinimumPacketRateRule(base.TestCase): def test_basic(self): sot = qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule() self.assertEqual('minimum_packet_rate_rule', sot.resource_key) self.assertEqual('minimum_packet_rate_rules', sot.resources_key) self.assertEqual( '/qos/policies/%(qos_policy_id)s/minimum_packet_rate_rules', sot.base_path, ) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['qos_policy_id'], sot.qos_policy_id) self.assertEqual(EXAMPLE['min_kpps'], sot.min_kpps) self.assertEqual(EXAMPLE['direction'], sot.direction) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_qos_policy.py0000664000175000017500000000344200000000000026154 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack.network.v2 import qos_policy from openstack.tests.unit import base EXAMPLE = { 'id': 'IDENTIFIER', 'description': 'QoS policy description', 'name': 'qos-policy-name', 'shared': True, 'project_id': '2', 'rules': [uuid.uuid4().hex], 'is_default': False, 'tags': ['3'], } class TestQoSPolicy(base.TestCase): def test_basic(self): sot = qos_policy.QoSPolicy() self.assertEqual('policy', sot.resource_key) self.assertEqual('policies', sot.resources_key) self.assertEqual('/qos/policies', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = qos_policy.QoSPolicy(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['rules'], sot.rules) self.assertEqual(EXAMPLE['is_default'], sot.is_default) self.assertEqual(EXAMPLE['tags'], sot.tags) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_qos_rule_type.py0000664000175000017500000000462100000000000026665 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import qos_rule_type from openstack.tests.unit import base EXAMPLE = { 'type': 'bandwidth_limit', 'drivers': [ { 'name': 'openvswitch', 'supported_parameters': [ { 'parameter_values': {'start': 0, 'end': 2147483647}, 'parameter_type': 'range', 'parameter_name': 'max_kbps', }, { 'parameter_values': ['ingress', 'egress'], 'parameter_type': 'choices', 'parameter_name': 'direction', }, { 'parameter_values': {'start': 0, 'end': 2147483647}, 'parameter_type': 'range', 'parameter_name': 'max_burst_kbps', }, ], } ], } class TestQoSRuleType(base.TestCase): def test_basic(self): sot = qos_rule_type.QoSRuleType() self.assertEqual('rule_type', sot.resource_key) self.assertEqual('rule_types', sot.resources_key) self.assertEqual('/qos/rule-types', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertEqual( { 'type': 'type', 'drivers': 'drivers', 'all_rules': 'all_rules', 'all_supported': 'all_supported', 'limit': 'limit', 'marker': 'marker', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = qos_rule_type.QoSRuleType(**EXAMPLE) self.assertEqual(EXAMPLE['type'], sot.type) self.assertEqual(EXAMPLE['drivers'], sot.drivers) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_quota.py0000664000175000017500000001125000000000000025120 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import quota from openstack import resource from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'floatingip': 1, 'network': 2, 'port': 3, 'project_id': '4', 'router': 5, 'subnet': 6, 'subnetpool': 7, 'security_group_rule': 8, 'security_group': 9, 'rbac_policy': -1, 'healthmonitor': 11, 'listener': 12, 'loadbalancer': 13, 'l7policy': 14, 'pool': 15, 'check_limit': True, } class TestQuota(base.TestCase): def test_basic(self): sot = quota.Quota() self.assertEqual('quota', sot.resource_key) self.assertEqual('quotas', sot.resources_key) self.assertEqual('/quotas', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = quota.Quota(**EXAMPLE) self.assertEqual(EXAMPLE['floatingip'], sot.floating_ips) self.assertEqual(EXAMPLE['network'], sot.networks) self.assertEqual(EXAMPLE['port'], sot.ports) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['router'], sot.routers) self.assertEqual(EXAMPLE['subnet'], sot.subnets) self.assertEqual(EXAMPLE['subnetpool'], sot.subnet_pools) self.assertEqual( EXAMPLE['security_group_rule'], sot.security_group_rules ) self.assertEqual(EXAMPLE['security_group'], sot.security_groups) self.assertEqual(EXAMPLE['rbac_policy'], sot.rbac_policies) self.assertEqual(EXAMPLE['healthmonitor'], sot.health_monitors) self.assertEqual(EXAMPLE['listener'], sot.listeners) self.assertEqual(EXAMPLE['loadbalancer'], sot.load_balancers) self.assertEqual(EXAMPLE['l7policy'], sot.l7_policies) self.assertEqual(EXAMPLE['pool'], sot.pools) self.assertEqual(EXAMPLE['check_limit'], sot.check_limit) def test_prepare_request(self): body = {'id': 'ABCDEFGH', 'network': '12345'} quota_obj = quota.Quota(**body) response = quota_obj._prepare_request() self.assertNotIn('id', response) def test_alternate_id(self): my_project_id = 'my-tenant-id' body = {'project_id': my_project_id, 'network': 12345} quota_obj = quota.Quota(**body) self.assertEqual(my_project_id, resource.Resource._get_id(quota_obj)) class TestQuotaDefault(base.TestCase): def test_basic(self): sot = quota.QuotaDefault() self.assertEqual('quota', sot.resource_key) self.assertEqual('quotas', sot.resources_key) self.assertEqual('/quotas/%(project)s/default', sot.base_path) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) def test_make_it(self): sot = quota.QuotaDefault(project='FAKE_PROJECT', **EXAMPLE) self.assertEqual(EXAMPLE['floatingip'], sot.floating_ips) self.assertEqual(EXAMPLE['network'], sot.networks) self.assertEqual(EXAMPLE['port'], sot.ports) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['router'], sot.routers) self.assertEqual(EXAMPLE['subnet'], sot.subnets) self.assertEqual(EXAMPLE['subnetpool'], sot.subnet_pools) self.assertEqual( EXAMPLE['security_group_rule'], sot.security_group_rules ) self.assertEqual(EXAMPLE['security_group'], sot.security_groups) self.assertEqual(EXAMPLE['rbac_policy'], sot.rbac_policies) self.assertEqual(EXAMPLE['healthmonitor'], sot.health_monitors) self.assertEqual(EXAMPLE['listener'], sot.listeners) self.assertEqual(EXAMPLE['loadbalancer'], sot.load_balancers) self.assertEqual(EXAMPLE['l7policy'], sot.l7_policies) self.assertEqual(EXAMPLE['pool'], sot.pools) self.assertEqual(EXAMPLE['check_limit'], sot.check_limit) self.assertEqual('FAKE_PROJECT', sot.project) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_rbac_policy.py0000664000175000017500000000407000000000000026257 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import rbac_policy from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'action': 'access_as_shared', 'object_id': IDENTIFIER, 'object_type': 'network', 'target_tenant': '10', 'project_id': '5', } class TestRBACPolicy(base.TestCase): def test_basic(self): sot = rbac_policy.RBACPolicy() self.assertEqual('rbac_policy', sot.resource_key) self.assertEqual('rbac_policies', sot.resources_key) self.assertEqual('/rbac-policies', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { 'action': 'action', 'object_id': 'object_id', 'object_type': 'object_type', 'project_id': 'project_id', 'target_project_id': 'target_tenant', 'limit': 'limit', 'marker': 'marker', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = rbac_policy.RBACPolicy(**EXAMPLE) self.assertEqual(EXAMPLE['action'], sot.action) self.assertEqual(EXAMPLE['object_id'], sot.object_id) self.assertEqual(EXAMPLE['object_type'], sot.object_type) self.assertEqual(EXAMPLE['target_tenant'], sot.target_project_id) self.assertEqual(EXAMPLE['project_id'], sot.project_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_router.py0000664000175000017500000002574700000000000025327 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import testtools from openstack import exceptions from openstack.network.v2 import router from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'admin_state_up': True, 'availability_zone_hints': ['1'], 'availability_zones': ['2'], 'created_at': 'timestamp1', 'description': '3', 'distributed': False, 'enable_ndp_proxy': True, 'external_gateway_info': {'4': 4}, 'flavor_id': '5', 'ha': False, 'id': IDENTIFIER, 'name': '6', 'revision': 7, 'routes': ['8'], 'status': '9', 'project_id': '10', 'updated_at': 'timestamp2', } EXAMPLE_WITH_OPTIONAL = { 'admin_state_up': False, 'availability_zone_hints': ['zone-1', 'zone-2'], 'availability_zones': ['zone-2'], 'description': 'description', 'distributed': True, 'external_gateway_info': { 'network_id': '1', 'enable_snat': True, 'external_fixed_ips': [], }, 'ha': True, 'id': IDENTIFIER, 'name': 'router1', 'routes': [{'nexthop': '172.24.4.20', 'destination': '10.0.3.1/24'}], 'status': 'ACTIVE', 'project_id': '2', } class TestRouter(base.TestCase): def test_basic(self): sot = router.Router() self.assertEqual('router', sot.resource_key) self.assertEqual('routers', sot.resources_key) self.assertEqual('/routers', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = router.Router(**EXAMPLE) self.assertTrue(sot.is_admin_state_up) self.assertEqual( EXAMPLE['availability_zone_hints'], sot.availability_zone_hints ) self.assertEqual(EXAMPLE['availability_zones'], sot.availability_zones) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['description'], sot.description) self.assertTrue(sot.enable_ndp_proxy) self.assertFalse(sot.is_distributed) self.assertEqual( EXAMPLE['external_gateway_info'], sot.external_gateway_info ) self.assertEqual(EXAMPLE['flavor_id'], sot.flavor_id) self.assertFalse(sot.is_ha) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['revision'], sot.revision_number) self.assertEqual(EXAMPLE['routes'], sot.routes) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) def test_make_it_with_optional(self): sot = router.Router(**EXAMPLE_WITH_OPTIONAL) self.assertFalse(sot.is_admin_state_up) self.assertEqual( EXAMPLE_WITH_OPTIONAL['availability_zone_hints'], sot.availability_zone_hints, ) self.assertEqual( EXAMPLE_WITH_OPTIONAL['availability_zones'], sot.availability_zones ) self.assertEqual(EXAMPLE_WITH_OPTIONAL['description'], sot.description) self.assertTrue(sot.is_distributed) self.assertEqual( EXAMPLE_WITH_OPTIONAL['external_gateway_info'], sot.external_gateway_info, ) self.assertTrue(sot.is_ha) self.assertEqual(EXAMPLE_WITH_OPTIONAL['id'], sot.id) self.assertEqual(EXAMPLE_WITH_OPTIONAL['name'], sot.name) self.assertEqual(EXAMPLE_WITH_OPTIONAL['routes'], sot.routes) self.assertEqual(EXAMPLE_WITH_OPTIONAL['status'], sot.status) self.assertEqual(EXAMPLE_WITH_OPTIONAL['project_id'], sot.project_id) def test_add_interface_subnet(self): # Add subnet to a router sot = router.Router(**EXAMPLE) response = mock.Mock() response.body = {"subnet_id": "3", "port_id": "2"} response.json = mock.Mock(return_value=response.body) response.status_code = 200 sess = mock.Mock() sess.put = mock.Mock(return_value=response) body = {"subnet_id": "3"} self.assertEqual(response.body, sot.add_interface(sess, **body)) url = 'routers/IDENTIFIER/add_router_interface' sess.put.assert_called_with(url, json=body) def test_add_interface_port(self): # Add port to a router sot = router.Router(**EXAMPLE) response = mock.Mock() response.body = {"subnet_id": "3", "port_id": "3"} response.json = mock.Mock(return_value=response.body) response.status_code = 200 sess = mock.Mock() sess.put = mock.Mock(return_value=response) body = {"port_id": "3"} self.assertEqual(response.body, sot.add_interface(sess, **body)) url = 'routers/IDENTIFIER/add_router_interface' sess.put.assert_called_with(url, json=body) def test_remove_interface_subnet(self): # Remove subnet from a router sot = router.Router(**EXAMPLE) response = mock.Mock() response.body = {"subnet_id": "3", "port_id": "2"} response.json = mock.Mock(return_value=response.body) response.status_code = 200 sess = mock.Mock() sess.put = mock.Mock(return_value=response) body = {"subnet_id": "3"} self.assertEqual(response.body, sot.remove_interface(sess, **body)) url = 'routers/IDENTIFIER/remove_router_interface' sess.put.assert_called_with(url, json=body) def test_remove_interface_port(self): # Remove port from a router sot = router.Router(**EXAMPLE) response = mock.Mock() response.body = {"subnet_id": "3", "port_id": "3"} response.json = mock.Mock(return_value=response.body) response.status_code = 200 sess = mock.Mock() sess.put = mock.Mock(return_value=response) body = {"network_id": 3, "enable_snat": True} self.assertEqual(response.body, sot.remove_interface(sess, **body)) url = 'routers/IDENTIFIER/remove_router_interface' sess.put.assert_called_with(url, json=body) def test_add_interface_4xx(self): # Neutron may return 4xx, we have to raise if that happens sot = router.Router(**EXAMPLE) response = mock.Mock() msg = '.*borked' response.body = {'NeutronError': {'message': msg}} response.json = mock.Mock(return_value=response.body) response.ok = False response.status_code = 409 response.headers = {'content-type': 'application/json'} sess = mock.Mock() sess.put = mock.Mock(return_value=response) body = {'subnet_id': '3'} with testtools.ExpectedException(exceptions.ConflictException, msg): sot.add_interface(sess, **body) def test_remove_interface_4xx(self): # Neutron may return 4xx for example if a router interface has # extra routes referring to it as a nexthop sot = router.Router(**EXAMPLE) response = mock.Mock() msg = '.*borked' response.body = {'NeutronError': {'message': msg}} response.json = mock.Mock(return_value=response.body) response.ok = False response.status_code = 409 response.headers = {'content-type': 'application/json'} sess = mock.Mock() sess.put = mock.Mock(return_value=response) body = {'subnet_id': '3'} with testtools.ExpectedException(exceptions.ConflictException, msg): sot.remove_interface(sess, **body) def test_add_extra_routes(self): r = router.Router(**EXAMPLE) response = mock.Mock() response.headers = {} json_body = {'router': {}} response.body = json_body response.json = mock.Mock(return_value=response.body) response.status_code = 200 sess = mock.Mock() sess.put = mock.Mock(return_value=response) ret = r.add_extra_routes(sess, json_body) self.assertIsInstance(ret, router.Router) self.assertIsInstance(ret.routes, list) url = 'routers/IDENTIFIER/add_extraroutes' sess.put.assert_called_with(url, json=json_body) def test_remove_extra_routes(self): r = router.Router(**EXAMPLE) response = mock.Mock() response.headers = {} json_body = {'router': {}} response.body = json_body response.json = mock.Mock(return_value=response.body) response.status_code = 200 sess = mock.Mock() sess.put = mock.Mock(return_value=response) ret = r.remove_extra_routes(sess, json_body) self.assertIsInstance(ret, router.Router) self.assertIsInstance(ret.routes, list) url = 'routers/IDENTIFIER/remove_extraroutes' sess.put.assert_called_with(url, json=json_body) def test_add_router_gateway(self): # Add gateway to a router sot = router.Router(**EXAMPLE_WITH_OPTIONAL) response = mock.Mock() response.body = {"network_id": "3", "enable_snat": True} response.json = mock.Mock(return_value=response.body) sess = mock.Mock() sess.put = mock.Mock(return_value=response) body = {"network_id": 3, "enable_snat": True} self.assertEqual(response.body, sot.add_gateway(sess, **body)) url = 'routers/IDENTIFIER/add_gateway_router' sess.put.assert_called_with(url, json=body) def test_remove_router_gateway(self): # Remove gateway to a router sot = router.Router(**EXAMPLE_WITH_OPTIONAL) response = mock.Mock() response.body = {"network_id": "3", "enable_snat": True} response.json = mock.Mock(return_value=response.body) sess = mock.Mock() sess.put = mock.Mock(return_value=response) body = {"network_id": 3, "enable_snat": True} self.assertEqual(response.body, sot.remove_gateway(sess, **body)) url = 'routers/IDENTIFIER/remove_gateway_router' sess.put.assert_called_with(url, json=body) class TestL3AgentRouters(base.TestCase): def test_basic(self): sot = router.L3AgentRouter() self.assertEqual('router', sot.resource_key) self.assertEqual('routers', sot.resources_key) self.assertEqual('/agents/%(agent_id)s/l3-routers', sot.base_path) self.assertEqual('l3-router', sot.resource_name) self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_retrieve) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_security_group.py0000664000175000017500000000755200000000000027064 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import security_group from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' RULES = [ { "remote_group_id": None, "direction": "egress", "remote_ip_prefix": None, "protocol": None, "ethertype": "IPv6", "project_id": "4", "port_range_max": None, "port_range_min": None, "id": "5", "security_group_id": IDENTIFIER, "created_at": "2016-10-04T12:14:57.233772", "updated_at": "2016-10-12T12:15:34.233222", "revision_number": 6, }, { "remote_group_id": "9", "direction": "ingress", "remote_ip_prefix": None, "protocol": None, "ethertype": "IPv6", "project_id": "4", "port_range_max": None, "port_range_min": None, "id": "6", "security_group_id": IDENTIFIER, "created_at": "2016-10-04T12:14:57.233772", "updated_at": "2016-10-12T12:15:34.233222", "revision_number": 7, }, ] EXAMPLE = { 'created_at': '2016-10-04T12:14:57.233772', 'description': '1', 'id': IDENTIFIER, 'name': '2', 'stateful': True, 'revision_number': 3, 'security_group_rules': RULES, 'project_id': '4', 'project_id': '4', 'updated_at': '2016-10-14T12:16:57.233772', 'tags': ['5'], } class TestSecurityGroup(base.TestCase): def test_basic(self): sot = security_group.SecurityGroup() self.assertEqual('security_group', sot.resource_key) self.assertEqual('security_groups', sot.resources_key) self.assertEqual('/security-groups', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { 'any_tags': 'tags-any', 'description': 'description', 'fields': 'fields', 'id': 'id', 'limit': 'limit', 'marker': 'marker', 'name': 'name', 'not_any_tags': 'not-tags-any', 'not_tags': 'not-tags', 'tenant_id': 'tenant_id', 'revision_number': 'revision_number', 'sort_dir': 'sort_dir', 'sort_key': 'sort_key', 'tags': 'tags', 'project_id': 'project_id', 'stateful': 'stateful', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = security_group.SecurityGroup(**EXAMPLE) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['revision_number'], sot.revision_number) self.assertEqual( EXAMPLE['security_group_rules'], sot.security_group_rules ) self.assertEqual(dict, type(sot.security_group_rules[0])) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) self.assertEqual(EXAMPLE['tags'], sot.tags) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_security_group_rule.py0000664000175000017500000000752600000000000030114 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import security_group_rule from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'created_at': '0', 'description': '1', 'direction': '2', 'ethertype': '3', 'id': IDENTIFIER, 'port_range_max': 4, 'port_range_min': 5, 'protocol': '6', 'remote_group_id': '7', 'remote_ip_prefix': '8', 'revision_number': 9, 'security_group_id': '10', 'project_id': '11', 'project_id': '11', 'updated_at': '12', 'remote_address_group_id': '13', } class TestSecurityGroupRule(base.TestCase): def test_basic(self): sot = security_group_rule.SecurityGroupRule() self.assertEqual('security_group_rule', sot.resource_key) self.assertEqual('security_group_rules', sot.resources_key) self.assertEqual('/security-group-rules', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { 'any_tags': 'tags-any', 'description': 'description', 'direction': 'direction', 'id': 'id', 'ether_type': 'ethertype', 'limit': 'limit', 'marker': 'marker', 'not_any_tags': 'not-tags-any', 'not_tags': 'not-tags', 'port_range_max': 'port_range_max', 'port_range_min': 'port_range_min', 'tenant_id': 'tenant_id', 'protocol': 'protocol', 'remote_group_id': 'remote_group_id', 'remote_address_group_id': 'remote_address_group_id', 'remote_ip_prefix': 'remote_ip_prefix', 'revision_number': 'revision_number', 'security_group_id': 'security_group_id', 'sort_dir': 'sort_dir', 'sort_key': 'sort_key', 'tags': 'tags', 'project_id': 'project_id', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = security_group_rule.SecurityGroupRule(**EXAMPLE) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['direction'], sot.direction) self.assertEqual(EXAMPLE['ethertype'], sot.ether_type) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['port_range_max'], sot.port_range_max) self.assertEqual(EXAMPLE['port_range_min'], sot.port_range_min) self.assertEqual(EXAMPLE['protocol'], sot.protocol) self.assertEqual(EXAMPLE['remote_group_id'], sot.remote_group_id) self.assertEqual( EXAMPLE['remote_address_group_id'], sot.remote_address_group_id ) self.assertEqual(EXAMPLE['remote_ip_prefix'], sot.remote_ip_prefix) self.assertEqual(EXAMPLE['revision_number'], sot.revision_number) self.assertEqual(EXAMPLE['security_group_id'], sot.security_group_id) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_segment.py0000664000175000017500000000343000000000000025432 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import segment from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'description': '1', 'id': IDENTIFIER, 'name': '2', 'network_id': '3', 'network_type': '4', 'physical_network': '5', 'segmentation_id': 6, } class TestSegment(base.TestCase): def test_basic(self): sot = segment.Segment() self.assertEqual('segment', sot.resource_key) self.assertEqual('segments', sot.resources_key) self.assertEqual('/segments', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = segment.Segment(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['network_id'], sot.network_id) self.assertEqual(EXAMPLE['network_type'], sot.network_type) self.assertEqual(EXAMPLE['physical_network'], sot.physical_network) self.assertEqual(EXAMPLE['segmentation_id'], sot.segmentation_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_service_profile.py0000664000175000017500000000462500000000000027157 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import service_profile from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE_WITH_OPTIONAL = { 'description': 'test flavor profile', 'driver': 'neutron_lbaas.drivers.octavia.driver.OctaviaDriver', 'enabled': True, 'metainfo': {'foo': 'bar'}, 'project_id': '5', } EXAMPLE = { 'driver': 'neutron_lbaas.drivers.octavia.driver.OctaviaDriver', } class TestServiceProfile(base.TestCase): def test_basic(self): service_profiles = service_profile.ServiceProfile() self.assertEqual('service_profile', service_profiles.resource_key) self.assertEqual('service_profiles', service_profiles.resources_key) self.assertEqual('/service_profiles', service_profiles.base_path) self.assertTrue(service_profiles.allow_create) self.assertTrue(service_profiles.allow_fetch) self.assertTrue(service_profiles.allow_commit) self.assertTrue(service_profiles.allow_delete) self.assertTrue(service_profiles.allow_list) def test_make_it(self): service_profiles = service_profile.ServiceProfile(**EXAMPLE) self.assertEqual(EXAMPLE['driver'], service_profiles.driver) def test_make_it_with_optional(self): service_profiles = service_profile.ServiceProfile( **EXAMPLE_WITH_OPTIONAL ) self.assertEqual( EXAMPLE_WITH_OPTIONAL['description'], service_profiles.description ) self.assertEqual( EXAMPLE_WITH_OPTIONAL['driver'], service_profiles.driver ) self.assertEqual( EXAMPLE_WITH_OPTIONAL['enabled'], service_profiles.is_enabled ) self.assertEqual( EXAMPLE_WITH_OPTIONAL['metainfo'], service_profiles.meta_info ) self.assertEqual( EXAMPLE_WITH_OPTIONAL['project_id'], service_profiles.project_id ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_service_provider.py0000664000175000017500000000270600000000000027347 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import service_provider from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'service_type': 'L3_ROUTER_NAT', 'name': '4', 'default': False, } class TestServiceProvider(base.TestCase): def test_basic(self): sot = service_provider.ServiceProvider() self.assertEqual('service_providers', sot.resources_key) self.assertEqual('/service-providers', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = service_provider.ServiceProvider(**EXAMPLE) self.assertEqual(EXAMPLE['service_type'], sot.service_type) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['default'], sot.is_default) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_sfc_flow_classifier.py0000664000175000017500000000766000000000000030007 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import sfc_flow_classifier from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { "description": "", "project_id": "4ad57e7ce0b24fca8f12b9834d91079d", "tenant_id": "4ad57e7ce0b24fca8f12b9834d91079d", "ethertype": "IPv4", "protocol": 6, "source_port_range_min": 22, "source_port_range_max": 2000, "destination_port_range_min": 80, "destination_port_range_max": 80, "source_ip_prefix": None, "destination_ip_prefix": "22.12.34.45", "logical_source_port": "uuid1", "logical_destination_port": "uuid2", "l7_parameters": None, "id": "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", "name": "flow_classifier", } class TestFlowClassifier(base.TestCase): def test_basic(self): sot = sfc_flow_classifier.SfcFlowClassifier() self.assertEqual('flow_classifier', sot.resource_key) self.assertEqual('flow_classifiers', sot.resources_key) self.assertEqual('/sfc/flow_classifiers', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = sfc_flow_classifier.SfcFlowClassifier(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['protocol'], sot.protocol) self.assertEqual(EXAMPLE['ethertype'], sot.ethertype) self.assertEqual( EXAMPLE['source_port_range_min'], sot.source_port_range_min ) self.assertEqual( EXAMPLE['source_port_range_max'], sot.source_port_range_max ) self.assertEqual( EXAMPLE['destination_port_range_min'], sot.destination_port_range_min, ) self.assertEqual( EXAMPLE['destination_port_range_max'], sot.destination_port_range_max, ) self.assertEqual(EXAMPLE['source_ip_prefix'], sot.source_ip_prefix) self.assertEqual( EXAMPLE['destination_ip_prefix'], sot.destination_ip_prefix ) self.assertEqual( EXAMPLE['logical_source_port'], sot.logical_source_port ) self.assertEqual( EXAMPLE['logical_destination_port'], sot.logical_destination_port ) self.assertEqual(EXAMPLE['l7_parameters'], sot.l7_parameters) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertDictEqual( { "limit": "limit", "marker": "marker", 'description': 'description', 'name': 'name', 'project_id': 'project_id', 'tenant_id': 'tenant_id', 'ethertype': 'ethertype', 'protocol': 'protocol', 'source_port_range_min': 'source_port_range_min', 'source_port_range_max': 'source_port_range_max', 'destination_port_range_min': 'destination_port_range_min', 'destination_port_range_max': 'destination_port_range_max', 'logical_source_port': 'logical_source_port', 'logical_destination_port': 'logical_destination_port', }, sot._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_sfc_port_chain.py0000664000175000017500000000457600000000000026765 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import sfc_port_chain from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { "description": "", "project_id": "4ad57e7ce0b24fca8f12b9834d91079d", "tenant_id": "4ad57e7ce0b24fca8f12b9834d91079d", "port_pair_groups": ["p_group1", "p_group2"], "flow_classifiers": ["f_classifier1", "f_classifier_2"], "chain_parameters": {"correlation": "mpls", "symmetric": True}, "id": "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", "name": "peers", } class TestPortChain(base.TestCase): def test_basic(self): sot = sfc_port_chain.SfcPortChain() self.assertEqual('port_chain', sot.resource_key) self.assertEqual('port_chains', sot.resources_key) self.assertEqual('/sfc/port_chains', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = sfc_port_chain.SfcPortChain(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['port_pair_groups'], sot.port_pair_groups) self.assertEqual(EXAMPLE['flow_classifiers'], sot.flow_classifiers) self.assertEqual(EXAMPLE['chain_parameters'], sot.chain_parameters) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertDictEqual( { "limit": "limit", "marker": "marker", 'description': 'description', 'name': 'name', 'project_id': 'project_id', 'tenant_id': 'tenant_id', }, sot._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_sfc_port_pair.py0000664000175000017500000000474000000000000026627 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import sfc_port_pair from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { "description": "", "project_id": "4ad57e7ce0b24fca8f12b9834d91079d", "tenant_id": "4ad57e7ce0b24fca8f12b9834d91079d", "egress": "d294f042-1736-11ee-821a-7f8301c71f83", "ingress": "d9908eba-1736-11ee-b77f-1fcc4c520068", "service_function_parameters": {"correlation": "mpls", "weigjt": 101}, "id": "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", "name": "port_pair_1", } class TestSfcPortPair(base.TestCase): def test_basic(self): sot = sfc_port_pair.SfcPortPair() self.assertEqual('port_pair', sot.resource_key) self.assertEqual('port_pairs', sot.resources_key) self.assertEqual('/sfc/port_pairs', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = sfc_port_pair.SfcPortPair(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['egress'], sot.egress) self.assertEqual(EXAMPLE['ingress'], sot.ingress) self.assertEqual( EXAMPLE['service_function_parameters'], sot.service_function_parameters, ) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertDictEqual( { "limit": "limit", "marker": "marker", 'description': 'description', 'name': 'name', 'project_id': 'project_id', 'tenant_id': 'tenant_id', 'ingress': 'ingress', 'egress': 'egress', }, sot._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_sfc_port_pair_group.py0000664000175000017500000000450000000000000030035 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import sfc_port_pair_group from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { "description": "", "project_id": "4ad57e7ce0b24fca8f12b9834d91079d", "tenant_id": "4ad57e7ce0b24fca8f12b9834d91079d", "port_pairs": ["8d57819a-174d-11ee-97b0-2f370d29c014"], "port_pair_group_parameters": {}, "id": "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", "name": "port_pair_gr", } class TestSfcPortPairGroup(base.TestCase): def test_basic(self): sot = sfc_port_pair_group.SfcPortPairGroup() self.assertEqual('port_pair_group', sot.resource_key) self.assertEqual('port_pair_groups', sot.resources_key) self.assertEqual('/sfc/port_pair_groups', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = sfc_port_pair_group.SfcPortPairGroup(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['port_pairs'], sot.port_pairs) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual( EXAMPLE['port_pair_group_parameters'], sot.port_pair_group_parameters, ) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertDictEqual( { "limit": "limit", "marker": "marker", 'description': 'description', 'name': 'name', 'project_id': 'project_id', 'tenant_id': 'tenant_id', }, sot._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_sfc_service_graph.py0000664000175000017500000000451300000000000027447 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import sfc_service_graph from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { "description": "", "project_id": "4ad57e7ce0b24fca8f12b9834d91079d", "tenant_id": "4ad57e7ce0b24fca8f12b9834d91079d", "port_chains": { "0e6b9678-19aa-11ee-97ae-a3cec2c2ac72": [ "1e19c266-19aa-11ee-8e02-6fa0c9a9832d" ], "2a394dc8-19aa-11ee-b87e-7f24d71926f1": [ "3299fcf6-19aa-11ee-9398-3f8c68c11209" ], }, "id": "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", "name": "service_graph", } class TestSfcServiceGraph(base.TestCase): def test_basic(self): sot = sfc_service_graph.SfcServiceGraph() self.assertEqual('service_graph', sot.resource_key) self.assertEqual('service_graphs', sot.resources_key) self.assertEqual('/sfc/service_graphs', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = sfc_service_graph.SfcServiceGraph(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['port_chains'], sot.port_chains) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertDictEqual( { "limit": "limit", "marker": "marker", 'description': 'description', 'name': 'name', 'project_id': 'project_id', 'tenant_id': 'tenant_id', }, sot._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_subnet.py0000664000175000017500000000612100000000000025270 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import subnet from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'allocation_pools': [{'1': 1}], 'cidr': '2', 'created_at': '3', 'description': '4', 'dns_nameservers': ['5'], 'dns_publish_fixed_ip': True, 'enable_dhcp': True, 'gateway_ip': '6', 'host_routes': ['7'], 'id': IDENTIFIER, 'ip_version': 8, 'ipv6_address_mode': '9', 'ipv6_ra_mode': '10', 'name': '11', 'network_id': '12', 'revision_number': 13, 'segment_id': '14', 'service_types': ['15'], 'subnetpool_id': '16', 'project_id': '17', 'updated_at': '18', 'use_default_subnetpool': True, } class TestSubnet(base.TestCase): def test_basic(self): sot = subnet.Subnet() self.assertEqual('subnet', sot.resource_key) self.assertEqual('subnets', sot.resources_key) self.assertEqual('/subnets', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = subnet.Subnet(**EXAMPLE) self.assertEqual(EXAMPLE['allocation_pools'], sot.allocation_pools) self.assertEqual(EXAMPLE['cidr'], sot.cidr) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['dns_nameservers'], sot.dns_nameservers) self.assertTrue(sot.dns_publish_fixed_ip) self.assertTrue(sot.is_dhcp_enabled) self.assertEqual(EXAMPLE['gateway_ip'], sot.gateway_ip) self.assertEqual(EXAMPLE['host_routes'], sot.host_routes) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['ip_version'], sot.ip_version) self.assertEqual(EXAMPLE['ipv6_address_mode'], sot.ipv6_address_mode) self.assertEqual(EXAMPLE['ipv6_ra_mode'], sot.ipv6_ra_mode) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['network_id'], sot.network_id) self.assertEqual(EXAMPLE['revision_number'], sot.revision_number) self.assertEqual(EXAMPLE['segment_id'], sot.segment_id) self.assertEqual(EXAMPLE['service_types'], sot.service_types) self.assertEqual(EXAMPLE['subnetpool_id'], sot.subnet_pool_id) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) self.assertTrue(sot.use_default_subnet_pool) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_subnet_pool.py0000664000175000017500000000514100000000000026322 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import subnet_pool from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'address_scope_id': '1', 'created_at': '2', 'default_prefixlen': 3, 'default_quota': 4, 'description': '5', 'id': IDENTIFIER, 'ip_version': 6, 'is_default': True, 'max_prefixlen': 7, 'min_prefixlen': 8, 'name': '9', 'prefixes': ['10', '11'], 'revision_number': 12, 'shared': True, 'project_id': '13', 'updated_at': '14', } class TestSubnetpool(base.TestCase): def test_basic(self): sot = subnet_pool.SubnetPool() self.assertEqual('subnetpool', sot.resource_key) self.assertEqual('subnetpools', sot.resources_key) self.assertEqual('/subnetpools', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = subnet_pool.SubnetPool(**EXAMPLE) self.assertEqual(EXAMPLE['address_scope_id'], sot.address_scope_id) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual( EXAMPLE['default_prefixlen'], sot.default_prefix_length ) self.assertEqual(EXAMPLE['default_quota'], sot.default_quota) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['ip_version'], sot.ip_version) self.assertTrue(sot.is_default) self.assertEqual(EXAMPLE['max_prefixlen'], sot.maximum_prefix_length) self.assertEqual(EXAMPLE['min_prefixlen'], sot.minimum_prefix_length) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['prefixes'], sot.prefixes) self.assertEqual(EXAMPLE['revision_number'], sot.revision_number) self.assertTrue(sot.is_shared) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_tap_flow.py0000664000175000017500000000367500000000000025616 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import tap_flow from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'name': 'my_tap_flow', 'source_port': '1234', 'tap_service_id': '4321', 'id': IDENTIFIER, 'project_id': '42', } class TestTapFlow(base.TestCase): def test_basic(self): sot = tap_flow.TapFlow() self.assertEqual('tap_flow', sot.resource_key) self.assertEqual('tap_flows', sot.resources_key) self.assertEqual('/taas/tap_flows', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = tap_flow.TapFlow(**EXAMPLE) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['source_port'], sot.source_port) self.assertEqual(EXAMPLE['tap_service_id'], sot.tap_service_id) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'name': 'name', 'project_id': 'project_id', 'sort_key': 'sort_key', 'sort_dir': 'sort_dir', }, sot._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_tap_mirror.py0000664000175000017500000000422100000000000026145 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import tap_mirror from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' PORT_ID = 'PORT_ID' EXAMPLE = { 'name': 'my_tap_mirror', 'port_id': PORT_ID, 'directions': {'IN': 99}, 'remote_ip': '193.10.10.1', 'mirror_type': 'erspanv1', 'id': IDENTIFIER, 'project_id': '42', } class TestTapMirror(base.TestCase): def test_basic(self): sot = tap_mirror.TapMirror() self.assertEqual('tap_mirror', sot.resource_key) self.assertEqual('tap_mirrors', sot.resources_key) self.assertEqual('/taas/tap_mirrors', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = tap_mirror.TapMirror(**EXAMPLE) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['port_id'], sot.port_id) self.assertEqual(EXAMPLE['directions'], sot.directions) self.assertEqual(EXAMPLE['remote_ip'], sot.remote_ip) self.assertEqual(EXAMPLE['mirror_type'], sot.mirror_type) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'name': 'name', 'project_id': 'project_id', 'sort_key': 'sort_key', 'sort_dir': 'sort_dir', }, sot._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_tap_service.py0000664000175000017500000000355100000000000026300 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import tap_service from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'name': 'my_tap_service', 'port_id': '1234', 'id': IDENTIFIER, 'project_id': '42', } class TestTapService(base.TestCase): def test_basic(self): sot = tap_service.TapService() self.assertEqual('tap_service', sot.resource_key) self.assertEqual('tap_services', sot.resources_key) self.assertEqual('/taas/tap_services', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = tap_service.TapService(**EXAMPLE) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['port_id'], sot.port_id) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'name': 'name', 'project_id': 'project_id', 'sort_key': 'sort_key', 'sort_dir': 'sort_dir', }, sot._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_trunk.py0000664000175000017500000000723700000000000025144 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import testtools from openstack import exceptions from openstack.network.v2 import trunk from openstack.tests.unit import base EXAMPLE = { 'id': 'IDENTIFIER', 'description': 'Trunk description', 'name': 'trunk-name', 'project_id': '2', 'admin_state_up': True, 'port_id': 'fake_port_id', 'status': 'ACTIVE', 'sub_ports': [ { 'port_id': 'subport_port_id', 'segmentation_id': 1234, 'segmentation_type': 'vlan', } ], } class TestTrunk(base.TestCase): def test_basic(self): sot = trunk.Trunk() self.assertEqual('trunk', sot.resource_key) self.assertEqual('trunks', sot.resources_key) self.assertEqual('/trunks', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = trunk.Trunk(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['admin_state_up'], sot.is_admin_state_up) self.assertEqual(EXAMPLE['port_id'], sot.port_id) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['sub_ports'], sot.sub_ports) def test_add_subports_4xx(self): # Neutron may return 4xx for example if a port does not exist sot = trunk.Trunk(**EXAMPLE) response = mock.Mock() msg = '.*borked' response.body = {'NeutronError': {'message': msg}} response.json = mock.Mock(return_value=response.body) response.ok = False response.status_code = 404 response.headers = {'content-type': 'application/json'} sess = mock.Mock() sess.put = mock.Mock(return_value=response) subports = [ { 'port_id': 'abc', 'segmentation_id': '123', 'segmentation_type': 'vlan', } ] with testtools.ExpectedException(exceptions.NotFoundException, msg): sot.add_subports(sess, subports) def test_delete_subports_4xx(self): # Neutron may return 4xx for example if a port does not exist sot = trunk.Trunk(**EXAMPLE) response = mock.Mock() msg = '.*borked' response.body = {'NeutronError': {'message': msg}} response.json = mock.Mock(return_value=response.body) response.ok = False response.status_code = 404 response.headers = {'content-type': 'application/json'} sess = mock.Mock() sess.put = mock.Mock(return_value=response) subports = [ { 'port_id': 'abc', 'segmentation_id': '123', 'segmentation_type': 'vlan', } ] with testtools.ExpectedException(exceptions.NotFoundException, msg): sot.delete_subports(sess, subports) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_vpn_endpoint_group.py0000664000175000017500000000435600000000000027717 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import vpn_endpoint_group from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { "description": "", "project_id": "4ad57e7ce0b24fca8f12b9834d91079d", "tenant_id": "4ad57e7ce0b24fca8f12b9834d91079d", "endpoints": ["10.2.0.0/24", "10.3.0.0/24"], "type": "cidr", "id": "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", "name": "peers", } class TestVpnEndpointGroup(base.TestCase): def test_basic(self): sot = vpn_endpoint_group.VpnEndpointGroup() self.assertEqual('endpoint_group', sot.resource_key) self.assertEqual('endpoint_groups', sot.resources_key) self.assertEqual('/vpn/endpoint-groups', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = vpn_endpoint_group.VpnEndpointGroup(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['endpoints'], sot.endpoints) self.assertEqual(EXAMPLE['type'], sot.type) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertDictEqual( { "limit": "limit", "marker": "marker", 'description': 'description', 'name': 'name', 'project_id': 'project_id', 'tenant_id': 'tenant_id', 'type': 'endpoint_type', }, sot._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_vpn_ikepolicy.py0000664000175000017500000000430000000000000026640 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import vpn_ike_policy from openstack.tests.unit import base EXAMPLE = { "auth_algorithm": "1", "description": "2", "encryption_algorithm": "3", "ike_version": "4", "lifetime": {'a': 5}, "name": "5", "pfs": "6", "project_id": "7", "phase1_negotiation_mode": "8", "units": "9", "value": 10, } class TestVpnIkePolicy(base.TestCase): def test_basic(self): sot = vpn_ike_policy.VpnIkePolicy() self.assertEqual('ikepolicy', sot.resource_key) self.assertEqual('ikepolicies', sot.resources_key) self.assertEqual('/vpn/ikepolicies', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = vpn_ike_policy.VpnIkePolicy(**EXAMPLE) self.assertEqual(EXAMPLE['auth_algorithm'], sot.auth_algorithm) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual( EXAMPLE['encryption_algorithm'], sot.encryption_algorithm ) self.assertEqual(EXAMPLE['ike_version'], sot.ike_version) self.assertEqual(EXAMPLE['lifetime'], sot.lifetime) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['pfs'], sot.pfs) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual( EXAMPLE['phase1_negotiation_mode'], sot.phase1_negotiation_mode ) self.assertEqual(EXAMPLE['units'], sot.units) self.assertEqual(EXAMPLE['value'], sot.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_vpn_ipsec_site_connection.py0000664000175000017500000000620600000000000031225 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import vpn_ipsec_site_connection from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { "admin_state_up": True, "auth_mode": "1", "ikepolicy_id": "2", "vpnservice_id": "3", "local_ep_group_id": "4", "peer_address": "5", "route_mode": "6", "ipsecpolicy_id": "7", "peer_id": "8", "psk": "9", "description": "10", "initiator": "11", "peer_cidrs": ['1', '2'], "name": "12", "tenant_id": "13", "interval": 5, "mtu": 5, "peer_ep_group_id": "14", "dpd": {'a': 5}, "timeout": 16, "action": "17", "local_id": "18", } class TestVpnIPSecSiteConnection(base.TestCase): def test_basic(self): sot = vpn_ipsec_site_connection.VpnIPSecSiteConnection() self.assertEqual('ipsec_site_connection', sot.resource_key) self.assertEqual('ipsec_site_connections', sot.resources_key) self.assertEqual('/vpn/ipsec-site-connections', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = vpn_ipsec_site_connection.VpnIPSecSiteConnection(**EXAMPLE) self.assertTrue(sot.is_admin_state_up) self.assertEqual(EXAMPLE['auth_mode'], sot.auth_mode) self.assertEqual(EXAMPLE['ikepolicy_id'], sot.ikepolicy_id) self.assertEqual(EXAMPLE['vpnservice_id'], sot.vpnservice_id) self.assertEqual(EXAMPLE['local_ep_group_id'], sot.local_ep_group_id) self.assertEqual(EXAMPLE['peer_address'], sot.peer_address) self.assertEqual(EXAMPLE['route_mode'], sot.route_mode) self.assertEqual(EXAMPLE['ipsecpolicy_id'], sot.ipsecpolicy_id) self.assertEqual(EXAMPLE['peer_id'], sot.peer_id) self.assertEqual(EXAMPLE['psk'], sot.psk) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['initiator'], sot.initiator) self.assertEqual(EXAMPLE['peer_cidrs'], sot.peer_cidrs) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) self.assertEqual(EXAMPLE['interval'], sot.interval) self.assertEqual(EXAMPLE['mtu'], sot.mtu) self.assertEqual(EXAMPLE['peer_ep_group_id'], sot.peer_ep_group_id) self.assertEqual(EXAMPLE['dpd'], sot.dpd) self.assertEqual(EXAMPLE['timeout'], sot.timeout) self.assertEqual(EXAMPLE['action'], sot.action) self.assertEqual(EXAMPLE['local_id'], sot.local_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_vpn_ipsecpolicy.py0000664000175000017500000000551000000000000027177 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import vpn_ipsec_policy from openstack.tests.unit import base EXAMPLE = { "auth_algorithm": "1", "description": "2", "encapsulation_mode": "tunnel", "encryption_algorithm": "3", "lifetime": {'a': 5}, "name": "5", "pfs": "6", "project_id": "7", "transform_protocol": "ESP", "units": "9", "value": 10, } class TestVpnIpsecPolicy(base.TestCase): def test_basic(self): sot = vpn_ipsec_policy.VpnIpsecPolicy() self.assertEqual('ipsecpolicy', sot.resource_key) self.assertEqual('ipsecpolicies', sot.resources_key) self.assertEqual('/vpn/ipsecpolicies', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = vpn_ipsec_policy.VpnIpsecPolicy(**EXAMPLE) self.assertEqual(EXAMPLE['auth_algorithm'], sot.auth_algorithm) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['encapsulation_mode'], sot.encapsulation_mode) self.assertEqual( EXAMPLE['encryption_algorithm'], sot.encryption_algorithm ) self.assertEqual(EXAMPLE['lifetime'], sot.lifetime) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['pfs'], sot.pfs) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['transform_protocol'], sot.transform_protocol) self.assertEqual(EXAMPLE['units'], sot.units) self.assertEqual(EXAMPLE['value'], sot.value) self.assertDictEqual( { "limit": "limit", "marker": "marker", 'auth_algorithm': 'auth_algorithm', 'description': 'description', 'encapsulation_mode': 'encapsulation_mode', 'encryption_algorithm': 'encryption_algorithm', 'name': 'name', 'pfs': 'pfs', 'project_id': 'project_id', 'phase1_negotiation_mode': 'phase1_negotiation_mode', 'transform_protocol': 'transform_protocol', }, sot._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/network/v2/test_vpn_service.py0000664000175000017500000000513700000000000026321 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network.v2 import vpn_service from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { "admin_state_up": True, "description": "1", "external_v4_ip": "2", "external_v6_ip": "3", "id": IDENTIFIER, "name": "4", "router_id": "5", "status": "6", "subnet_id": "7", "project_id": "8", } class TestVpnService(base.TestCase): def test_basic(self): sot = vpn_service.VpnService() self.assertEqual('vpnservice', sot.resource_key) self.assertEqual('vpnservices', sot.resources_key) self.assertEqual('/vpn/vpnservices', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = vpn_service.VpnService(**EXAMPLE) self.assertTrue(sot.is_admin_state_up) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['external_v4_ip'], sot.external_v4_ip) self.assertEqual(EXAMPLE['external_v6_ip'], sot.external_v6_ip) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['router_id'], sot.router_id) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['subnet_id'], sot.subnet_id) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertDictEqual( { "limit": "limit", "marker": "marker", 'description': 'description', 'external_v4_ip': 'external_v4_ip', 'external_v6_ip': 'external_v6_ip', 'name': 'name', 'router_id': 'router_id', 'project_id': 'project_id', 'tenant_id': 'tenant_id', 'subnet_id': 'subnet_id', 'is_admin_state_up': 'admin_state_up', }, sot._query_mapping._mapping, ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.493433 openstacksdk-4.0.0/openstack/tests/unit/object_store/0000775000175000017500000000000000000000000023021 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/object_store/__init__.py0000664000175000017500000000000000000000000025120 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.493433 openstacksdk-4.0.0/openstack/tests/unit/object_store/v1/0000775000175000017500000000000000000000000023347 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/object_store/v1/__init__.py0000664000175000017500000000000000000000000025446 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/object_store/v1/test_account.py0000664000175000017500000000710100000000000026413 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.object_store.v1 import account from openstack.tests.unit import base CONTAINER_NAME = "mycontainer" ACCOUNT_EXAMPLE = { 'content-length': '0', 'accept-ranges': 'bytes', 'date': 'Sat, 05 Jul 2014 19:17:40 GMT', 'x-account-bytes-used': '12345', 'x-account-container-count': '678', 'content-type': 'text/plain; charset=utf-8', 'x-account-object-count': '98765', 'x-timestamp': '1453413555.88937', } class TestAccount(base.TestCase): def setUp(self): super().setUp() self.endpoint = self.cloud.object_store.get_endpoint() + '/' def test_basic(self): sot = account.Account(**ACCOUNT_EXAMPLE) self.assertIsNone(sot.resources_key) self.assertIsNone(sot.id) self.assertEqual('/', sot.base_path) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_head) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) self.assertFalse(sot.allow_create) def test_make_it(self): sot = account.Account(**ACCOUNT_EXAMPLE) self.assertIsNone(sot.id) self.assertEqual( int(ACCOUNT_EXAMPLE['x-account-bytes-used']), sot.account_bytes_used, ) self.assertEqual( int(ACCOUNT_EXAMPLE['x-account-container-count']), sot.account_container_count, ) self.assertEqual( int(ACCOUNT_EXAMPLE['x-account-object-count']), sot.account_object_count, ) self.assertEqual(ACCOUNT_EXAMPLE['x-timestamp'], sot.timestamp) def test_set_temp_url_key(self): sot = account.Account() key = 'super-secure-key' self.register_uris( [ dict( method='POST', uri=self.endpoint, status_code=204, validate=dict( headers={'x-account-meta-temp-url-key': key} ), ), dict( method='HEAD', uri=self.endpoint, headers={'x-account-meta-temp-url-key': key}, ), ] ) sot.set_temp_url_key(self.cloud.object_store, key) self.assert_calls() def test_set_account_temp_url_key_second(self): sot = account.Account() key = 'super-secure-key' self.register_uris( [ dict( method='POST', uri=self.endpoint, status_code=204, validate=dict( headers={'x-account-meta-temp-url-key-2': key} ), ), dict( method='HEAD', uri=self.endpoint, headers={'x-account-meta-temp-url-key-2': key}, ), ] ) sot.set_temp_url_key(self.cloud.object_store, key, secondary=True) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/object_store/v1/test_container.py0000664000175000017500000002342100000000000026744 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from openstack.object_store.v1 import container from openstack.tests.unit import base class TestContainer(base.TestCase): def setUp(self): super().setUp() self.container = self.getUniqueString() self.endpoint = self.cloud.object_store.get_endpoint() + '/' self.container_endpoint = '{endpoint}{container}'.format( endpoint=self.endpoint, container=self.container ) self.body = { "count": 2, "bytes": 630666, "name": self.container, } self.headers = { 'x-container-object-count': '2', 'x-container-read': 'read-settings', 'x-container-write': 'write-settings', 'x-container-sync-to': 'sync-to', 'x-container-sync-key': 'sync-key', 'x-container-bytes-used': '630666', 'x-versions-location': 'versions-location', 'x-history-location': 'history-location', 'content-type': 'application/json; charset=utf-8', 'x-timestamp': '1453414055.48672', 'x-storage-policy': 'Gold', } self.body_plus_headers = dict(self.body, **self.headers) def test_basic(self): sot = container.Container.new(**self.body) self.assertIsNone(sot.resources_key) self.assertEqual('name', sot._alternate_id()) self.assertEqual('/', sot.base_path) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_head) self.assert_no_calls() def test_make_it(self): sot = container.Container.new(**self.body) self.assertEqual(self.body['name'], sot.id) self.assertEqual(self.body['name'], sot.name) self.assertEqual(self.body['count'], sot.count) self.assertEqual(self.body['count'], sot.object_count) self.assertEqual(self.body['bytes'], sot.bytes) self.assertEqual(self.body['bytes'], sot.bytes_used) self.assert_no_calls() def test_create_and_head(self): sot = container.Container(**self.body_plus_headers) # Attributes from create self.assertEqual(self.body_plus_headers['name'], sot.id) self.assertEqual(self.body_plus_headers['name'], sot.name) self.assertEqual(self.body_plus_headers['count'], sot.count) self.assertEqual(self.body_plus_headers['bytes'], sot.bytes) # Attributes from header self.assertEqual( int(self.body_plus_headers['x-container-object-count']), sot.object_count, ) self.assertEqual( int(self.body_plus_headers['x-container-bytes-used']), sot.bytes_used, ) self.assertEqual( self.body_plus_headers['x-container-read'], sot.read_ACL ) self.assertEqual( self.body_plus_headers['x-container-write'], sot.write_ACL ) self.assertEqual( self.body_plus_headers['x-container-sync-to'], sot.sync_to ) self.assertEqual( self.body_plus_headers['x-container-sync-key'], sot.sync_key ) self.assertEqual( self.body_plus_headers['x-versions-location'], sot.versions_location, ) self.assertEqual( self.body_plus_headers['x-history-location'], sot.history_location ) self.assertEqual(self.body_plus_headers['x-timestamp'], sot.timestamp) self.assertEqual( self.body_plus_headers['x-storage-policy'], sot.storage_policy ) def test_list(self): containers = [ {"count": 999, "bytes": 12345, "name": "container1"}, {"count": 888, "bytes": 54321, "name": "container2"}, ] self.register_uris( [dict(method='GET', uri=self.endpoint, json=containers)] ) response = container.Container.list(self.cloud.object_store) self.assertEqual(len(containers), len(list(response))) for index, item in enumerate(response): self.assertEqual(container.Container, type(item)) self.assertEqual(containers[index]["name"], item.name) self.assertEqual(containers[index]["count"], item.count) self.assertEqual(containers[index]["bytes"], item.bytes) self.assert_calls() def _test_create_update(self, sot, sot_call, sess_method): sot.read_ACL = "some ACL" sot.write_ACL = "another ACL" sot.is_content_type_detected = True headers = { "x-container-read": "some ACL", "x-container-write": "another ACL", "x-detect-content-type": 'True', "X-Container-Meta-foo": "bar", } self.register_uris( [ dict( method=sess_method, uri=self.container_endpoint, json=self.body, validate=dict(headers=headers), ), ] ) sot_call(self.cloud.object_store) self.assert_calls() def test_create(self): sot = container.Container.new( name=self.container, metadata={'foo': 'bar'} ) self._test_create_update(sot, sot.create, 'PUT') def test_commit(self): sot = container.Container.new( name=self.container, metadata={'foo': 'bar'} ) self._test_create_update(sot, sot.commit, 'POST') def test_to_dict_recursion(self): # This test is verifying that circular aliases in a Resource # do not cause infinite recursion. count is aliased to object_count # and object_count is aliased to count. sot = container.Container.new(name=self.container) sot_dict = sot.to_dict() self.assertIsNone(sot_dict['count']) self.assertIsNone(sot_dict['object_count']) self.assertEqual(sot_dict['id'], self.container) self.assertEqual(sot_dict['name'], self.container) def test_to_json(self): sot = container.Container.new(name=self.container) self.assertEqual( { 'bytes': None, 'bytes_used': None, 'content_type': None, 'count': None, 'id': self.container, 'if_none_match': None, 'is_content_type_detected': None, 'is_newest': None, 'location': None, 'name': self.container, 'object_count': None, 'read_ACL': None, 'sync_key': None, 'sync_to': None, 'meta_temp_url_key': None, 'meta_temp_url_key_2': None, 'timestamp': None, 'versions_location': None, 'history_location': None, 'write_ACL': None, 'storage_policy': None, }, json.loads(json.dumps(sot)), ) def _test_no_headers(self, sot, sot_call, sess_method): headers = {} self.register_uris( [ dict( method=sess_method, uri=self.container_endpoint, validate=dict(headers=headers), ) ] ) sot_call(self.cloud.object_store) def test_create_no_headers(self): sot = container.Container.new(name=self.container) self._test_no_headers(sot, sot.create, 'PUT') self.assert_calls() def test_commit_no_headers(self): sot = container.Container.new(name=self.container) self._test_no_headers(sot, sot.commit, 'POST') self.assert_calls() def test_set_temp_url_key(self): sot = container.Container.new(name=self.container) key = self.getUniqueString() self.register_uris( [ dict( method='POST', uri=self.container_endpoint, status_code=204, validate=dict( headers={'x-container-meta-temp-url-key': key} ), ), dict( method='HEAD', uri=self.container_endpoint, headers={'x-container-meta-temp-url-key': key}, ), ] ) sot.set_temp_url_key(self.cloud.object_store, key) self.assert_calls() def test_set_temp_url_key_second(self): sot = container.Container.new(name=self.container) key = self.getUniqueString() self.register_uris( [ dict( method='POST', uri=self.container_endpoint, status_code=204, validate=dict( headers={'x-container-meta-temp-url-key-2': key} ), ), dict( method='HEAD', uri=self.container_endpoint, headers={'x-container-meta-temp-url-key-2': key}, ), ] ) sot.set_temp_url_key(self.cloud.object_store, key, secondary=True) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/object_store/v1/test_info.py0000664000175000017500000000536200000000000025721 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.object_store.v1 import info from openstack.tests.unit import base class TestInfo(base.TestCase): def setUp(self): super().setUp() def test_get_info_url(self): sot = info.Info() test_urls = { 'http://object.cloud.example.com': 'http://object.cloud.example.com/info', 'http://object.cloud.example.com/': 'http://object.cloud.example.com/info', 'http://object.cloud.example.com/v1': 'http://object.cloud.example.com/info', 'http://object.cloud.example.com/v1/': 'http://object.cloud.example.com/info', 'http://object.cloud.example.com/swift': 'http://object.cloud.example.com/swift/info', 'http://object.cloud.example.com/swift/': 'http://object.cloud.example.com/swift/info', 'http://object.cloud.example.com/v1.0': 'http://object.cloud.example.com/info', 'http://object.cloud.example.com/swift/v1.0': 'http://object.cloud.example.com/swift/info', 'http://object.cloud.example.com/v111': 'http://object.cloud.example.com/info', 'http://object.cloud.example.com/v111/test': 'http://object.cloud.example.com/info', 'http://object.cloud.example.com/v1/test': 'http://object.cloud.example.com/info', 'http://object.cloud.example.com/swift/v1.0/test': 'http://object.cloud.example.com/swift/info', 'http://object.cloud.example.com/v1.0/test': 'http://object.cloud.example.com/info', 'https://object.cloud.example.com/swift/v1/AUTH_%(tenant_id)s': 'https://object.cloud.example.com/swift/info', 'https://object.cloud.example.com/swift/v1/AUTH_%(project_id)s': 'https://object.cloud.example.com/swift/info', 'https://object.cloud.example.com/services/swift/v1/AUTH_%(project_id)s': 'https://object.cloud.example.com/services/swift/info', 'https://object.cloud.example.com/services/swift/v1/AUTH_%(project_id)s/': 'https://object.cloud.example.com/services/swift/info', 'https://object.cloud.example.com/info/v1/AUTH_%(project_id)s/': 'https://object.cloud.example.com/info/info', } for uri_k, uri_v in test_urls.items(): self.assertEqual(sot._get_info_url(uri_k), uri_v) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/object_store/v1/test_obj.py0000664000175000017500000001603400000000000025536 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.object_store.v1 import obj from openstack.tests.unit.cloud import test_object as base_test_object # Object can receive both last-modified in headers and last_modified in # the body. However, originally, only last-modified was handled as an # expected prop but it was named last_modified. Under Python 3, creating # an Object with the body value last_modified causes the _attrs dictionary # size to change while iterating over its values as we have an attribute # called `last_modified` and we attempt to grow an additional attribute # called `last-modified`, which is the "name" of `last_modified`. # The same is true of content_type and content-type, or any prop # attribute which would follow the same pattern. # This example should represent the body values returned by a GET, so the keys # must be underscores. class TestObject(base_test_object.BaseTestObject): def setUp(self): super().setUp() self.the_data = b'test body' self.the_data_length = len(self.the_data) # TODO(mordred) Make the_data be from getUniqueString and then # have hash and etag be actual md5 sums of that string self.body = { "hash": "243f87b91224d85722564a80fd3cb1f1", "last_modified": "2014-07-13T18:41:03.319240", "bytes": self.the_data_length, "name": self.object, "content_type": "application/octet-stream", } self.headers = { 'Content-Length': str(len(self.the_data)), 'Content-Type': 'application/octet-stream', 'Accept-Ranges': 'bytes', 'Last-Modified': 'Thu, 15 Dec 2016 13:34:14 GMT', 'Etag': '"b5c454b44fbd5344793e3fb7e3850768"', 'X-Timestamp': '1481808853.65009', 'X-Trans-Id': 'tx68c2a2278f0c469bb6de1-005857ed80dfw1', 'Date': 'Mon, 19 Dec 2016 14:24:00 GMT', 'X-Static-Large-Object': 'True', 'X-Object-Meta-Mtime': '1481513709.168512', 'X-Delete-At': '1453416226.16744', } def test_basic(self): sot = obj.Object.new(**self.body) self.assert_no_calls() self.assertIsNone(sot.resources_key) self.assertEqual('name', sot._alternate_id()) self.assertEqual('/%(container)s', sot.base_path) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_head) self.assertDictEqual( { 'filename': 'filename', 'format': 'format', 'limit': 'limit', 'marker': 'marker', 'multipart_manifest': 'multipart-manifest', 'prefix': 'prefix', 'symlink': 'symlink', 'temp_url_expires': 'temp_url_expires', 'temp_url_sig': 'temp_url_sig', }, sot._query_mapping._mapping, ) def test_new(self): sot = obj.Object.new(container=self.container, name=self.object) self.assert_no_calls() self.assertEqual(self.object, sot.name) self.assertEqual(self.container, sot.container) def test_from_body(self): sot = obj.Object.existing(container=self.container, **self.body) self.assert_no_calls() # Attributes from header self.assertEqual(self.container, sot.container) self.assertEqual(int(self.body['bytes']), sot.content_length) self.assertEqual(self.body['last_modified'], sot.last_modified_at) self.assertEqual(self.body['hash'], sot.etag) self.assertEqual(self.body['content_type'], sot.content_type) def test_from_headers(self): sot = obj.Object.existing(container=self.container, **self.headers) self.assert_no_calls() # Attributes from header self.assertEqual(self.container, sot.container) self.assertEqual( int(self.headers['Content-Length']), sot.content_length ) self.assertEqual(self.headers['Accept-Ranges'], sot.accept_ranges) self.assertEqual(self.headers['Last-Modified'], sot.last_modified_at) self.assertEqual(self.headers['Etag'], sot.etag) self.assertEqual(self.headers['X-Timestamp'], sot.timestamp) self.assertEqual(self.headers['Content-Type'], sot.content_type) self.assertEqual(self.headers['X-Delete-At'], sot.delete_at) # Verify that we also properly process lowcased headers # All headers are processed in _base._set_metadata therefore invoke it # here directly sot._set_metadata(headers={"x-object-meta-foo": "bar"}) self.assert_no_calls() # Attributes from header self.assertEqual("bar", sot.metadata["foo"]) def test_download(self): headers = { 'X-Newest': 'True', 'If-Match': self.headers['Etag'], 'Accept': '*/*', } self.register_uris( [ dict( method='GET', uri=self.object_endpoint, headers=self.headers, content=self.the_data, validate=dict(headers=headers), ) ] ) sot = obj.Object.new(container=self.container, name=self.object) sot.is_newest = True # if_match is a list type, but we're passing a string. This tests # the up-conversion works properly. sot.if_match = self.headers['Etag'] rv = sot.download(self.cloud.object_store) self.assertEqual(self.the_data, rv) self.assert_calls() def _test_create(self, method, data): sot = obj.Object.new( container=self.container, name=self.object, data=data, metadata={'foo': 'bar'}, ) sot.is_newest = True sent_headers = {"x-newest": 'True', "X-Object-Meta-foo": "bar"} self.register_uris( [ dict( method=method, uri=self.object_endpoint, headers=self.headers, validate=dict(headers=sent_headers), ) ] ) rv = sot.create(self.cloud.object_store) self.assertEqual(rv.etag, self.headers['Etag']) self.assert_calls() def test_create_data(self): self._test_create('PUT', self.the_data) def test_create_no_data(self): self._test_create('PUT', None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/object_store/v1/test_proxy.py0000664000175000017500000005537000000000000026153 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from hashlib import sha1 import random import string import tempfile import time from unittest import mock import requests_mock from testscenarios import load_tests_apply_scenarios as load_tests # noqa from openstack.object_store.v1 import account from openstack.object_store.v1 import container from openstack.object_store.v1 import obj from openstack.tests.unit.cloud import test_object as base_test_object from openstack.tests.unit import test_proxy_base class FakeResponse: def __init__(self, response, status_code=200, headers=None): self.body = response self.status_code = status_code self.headers = headers if headers else {} def json(self): return self.body class TestObjectStoreProxy(test_proxy_base.TestProxyBase): kwargs_to_path_args = False def setUp(self): super().setUp() self.proxy = self.cloud.object_store self.container = self.getUniqueString() self.endpoint = self.cloud.object_store.get_endpoint() + '/' self.container_endpoint = '{endpoint}{container}'.format( endpoint=self.endpoint, container=self.container ) def test_account_metadata_get(self): self.verify_head( self.proxy.get_account_metadata, account.Account, method_args=[] ) def test_container_metadata_get(self): self.verify_head( self.proxy.get_container_metadata, container.Container, method_args=["container"], ) def test_container_delete(self): self.verify_delete( self.proxy.delete_container, container.Container, False ) def test_container_delete_ignore(self): self.verify_delete( self.proxy.delete_container, container.Container, True ) def test_container_create_attrs(self): self.verify_create( self.proxy.create_container, container.Container, method_args=['container_name'], expected_args=[], expected_kwargs={'name': 'container_name', "x": 1, "y": 2, "z": 3}, ) def test_object_metadata_get(self): self._verify( "openstack.proxy.Proxy._head", self.proxy.get_object_metadata, method_args=['object'], method_kwargs={'container': 'container'}, expected_args=[obj.Object, 'object'], expected_kwargs={'container': 'container'}, ) def _test_object_delete(self, ignore): expected_kwargs = { "ignore_missing": ignore, "container": "name", } self._verify( "openstack.proxy.Proxy._delete", self.proxy.delete_object, method_args=["resource"], method_kwargs=expected_kwargs, expected_args=[obj.Object, "resource"], expected_kwargs=expected_kwargs, ) def test_object_delete(self): self._test_object_delete(False) def test_object_delete_ignore(self): self._test_object_delete(True) def test_object_create_attrs(self): kwargs = { "name": "test", "data": "data", "container": "name", "metadata": {}, } self._verify( "openstack.proxy.Proxy._create", self.proxy.upload_object, method_kwargs=kwargs, expected_args=[obj.Object], expected_kwargs=kwargs, ) def test_object_create_no_container(self): self.assertRaises(TypeError, self.proxy.upload_object) def test_object_get(self): with requests_mock.Mocker() as m: m.get("%scontainer/object" % self.endpoint, text="data") res = self.proxy.get_object("object", container="container") self.assertIsNone(res.data) def test_object_get_write_file(self): with requests_mock.Mocker() as m: m.get("%scontainer/object" % self.endpoint, text="data") with tempfile.NamedTemporaryFile() as f: self.proxy.get_object( "object", container="container", outfile=f.name ) dt = open(f.name).read() self.assertEqual(dt, "data") def test_object_get_remember_content(self): with requests_mock.Mocker() as m: m.get("%scontainer/object" % self.endpoint, text="data") res = self.proxy.get_object( "object", container="container", remember_content=True ) self.assertEqual(res.data, "data") def test_set_temp_url_key(self): key = 'super-secure-key' self.register_uris( [ dict( method='POST', uri=self.endpoint, status_code=204, validate=dict( headers={'x-account-meta-temp-url-key': key} ), ), dict( method='HEAD', uri=self.endpoint, headers={'x-account-meta-temp-url-key': key}, ), ] ) self.proxy.set_account_temp_url_key(key) self.assert_calls() def test_set_account_temp_url_key_second(self): key = 'super-secure-key' self.register_uris( [ dict( method='POST', uri=self.endpoint, status_code=204, validate=dict( headers={'x-account-meta-temp-url-key-2': key} ), ), dict( method='HEAD', uri=self.endpoint, headers={'x-account-meta-temp-url-key-2': key}, ), ] ) self.proxy.set_account_temp_url_key(key, secondary=True) self.assert_calls() def test_set_container_temp_url_key(self): key = 'super-secure-key' self.register_uris( [ dict( method='POST', uri=self.container_endpoint, status_code=204, validate=dict( headers={'x-container-meta-temp-url-key': key} ), ), dict( method='HEAD', uri=self.container_endpoint, headers={'x-container-meta-temp-url-key': key}, ), ] ) self.proxy.set_container_temp_url_key(self.container, key) self.assert_calls() def test_set_container_temp_url_key_second(self): key = 'super-secure-key' self.register_uris( [ dict( method='POST', uri=self.container_endpoint, status_code=204, validate=dict( headers={'x-container-meta-temp-url-key-2': key} ), ), dict( method='HEAD', uri=self.container_endpoint, headers={'x-container-meta-temp-url-key-2': key}, ), ] ) self.proxy.set_container_temp_url_key( self.container, key, secondary=True ) self.assert_calls() def test_copy_object(self): self.assertRaises(NotImplementedError, self.proxy.copy_object) def test_file_segment(self): file_size = 4200 content = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(file_size) ).encode('latin-1') self.imagefile = tempfile.NamedTemporaryFile(delete=False) self.imagefile.write(content) self.imagefile.close() segments = self.proxy._get_file_segments( endpoint='test_container/test_image', filename=self.imagefile.name, file_size=file_size, segment_size=1000, ) self.assertEqual(len(segments), 5) segment_content = b'' for index, (name, segment) in enumerate(segments.items()): self.assertEqual( f'test_container/test_image/{index:0>6}', name, ) segment_content += segment.read() self.assertEqual(content, segment_content) class TestDownloadObject(base_test_object.BaseTestObject): def setUp(self): super().setUp() self.the_data = b'test body' self.register_uris( [ dict( method='GET', uri=self.object_endpoint, headers={ 'Content-Length': str(len(self.the_data)), 'Content-Type': 'application/octet-stream', 'Accept-Ranges': 'bytes', 'Last-Modified': 'Thu, 15 Dec 2016 13:34:14 GMT', 'Etag': '"b5c454b44fbd5344793e3fb7e3850768"', 'X-Timestamp': '1481808853.65009', 'X-Trans-Id': 'tx68c2a2278f0c469bb6de1-005857ed80dfw1', 'Date': 'Mon, 19 Dec 2016 14:24:00 GMT', 'X-Static-Large-Object': 'True', 'X-Object-Meta-Mtime': '1481513709.168512', }, content=self.the_data, ) ] ) def test_download(self): data = self.cloud.object_store.download_object( self.object, container=self.container ) self.assertEqual(data, self.the_data) self.assert_calls() def test_stream(self): chunk_size = 2 for index, chunk in enumerate( self.cloud.object_store.stream_object( self.object, container=self.container, chunk_size=chunk_size ) ): chunk_len = len(chunk) start = index * chunk_size end = start + chunk_len self.assertLessEqual(chunk_len, chunk_size) self.assertEqual(chunk, self.the_data[start:end]) self.assert_calls() class TestExtractName(TestObjectStoreProxy): scenarios = [ ('discovery', dict(url='/', parts=['account'])), ('endpoints', dict(url='/endpoints', parts=['endpoints'])), ( 'container', dict(url='/AUTH_123/container_name', parts=['container']), ), ('object', dict(url='/container_name/object_name', parts=['object'])), ( 'object_long', dict( url='/v1/AUTH_123/cnt/path/deep/object_name', parts=['object'] ), ), ] def test_extract_name(self): results = self.proxy._extract_name(self.url, project_id='123') self.assertEqual(self.parts, results) class TestTempURL(TestObjectStoreProxy): expires_iso8601_format = '%Y-%m-%dT%H:%M:%SZ' short_expires_iso8601_format = '%Y-%m-%d' time_errmsg = ( 'time must either be a whole number or in specific ISO 8601 format.' ) path_errmsg = 'path must be full path to an object e.g. /v1/a/c/o' url = '/v1/AUTH_account/c/o' seconds = 3600 key = 'correcthorsebatterystaple' method = 'GET' expected_url = url + ( '?temp_url_sig=temp_url_signature&temp_url_expires=1400003600' ) expected_body = '\n'.join( [ method, '1400003600', url, ] ).encode('utf-8') @mock.patch('hmac.HMAC') @mock.patch('time.time', return_value=1400000000) def test_generate_temp_url(self, time_mock, hmac_mock): hmac_mock().hexdigest.return_value = 'temp_url_signature' url = self.proxy.generate_temp_url( self.url, self.seconds, self.method, temp_url_key=self.key ) key = self.key if not isinstance(key, bytes): key = key.encode('utf-8') self.assertEqual(url, self.expected_url) self.assertEqual( hmac_mock.mock_calls, [ mock.call(), mock.call(key, self.expected_body, sha1), mock.call().hexdigest(), ], ) self.assertIsInstance(url, type(self.url)) @mock.patch('hmac.HMAC') @mock.patch('time.time', return_value=1400000000) def test_generate_temp_url_ip_range(self, time_mock, hmac_mock): hmac_mock().hexdigest.return_value = 'temp_url_signature' ip_ranges = [ '1.2.3.4', '1.2.3.4/24', '2001:db8::', b'1.2.3.4', b'1.2.3.4/24', b'2001:db8::', ] path = '/v1/AUTH_account/c/o/' expected_url = path + ( '?temp_url_sig=temp_url_signature' '&temp_url_expires=1400003600' '&temp_url_ip_range=' ) for ip_range in ip_ranges: hmac_mock.reset_mock() url = self.proxy.generate_temp_url( path, self.seconds, self.method, temp_url_key=self.key, ip_range=ip_range, ) key = self.key if not isinstance(key, bytes): key = key.encode('utf-8') if isinstance(ip_range, bytes): ip_range_expected_url = expected_url + ip_range.decode('utf-8') expected_body = '\n'.join( [ 'ip=' + ip_range.decode('utf-8'), self.method, '1400003600', path, ] ).encode('utf-8') else: ip_range_expected_url = expected_url + ip_range expected_body = '\n'.join( [ 'ip=' + ip_range, self.method, '1400003600', path, ] ).encode('utf-8') self.assertEqual(url, ip_range_expected_url) self.assertEqual( hmac_mock.mock_calls, [ mock.call(key, expected_body, sha1), mock.call().hexdigest(), ], ) self.assertIsInstance(url, type(path)) @mock.patch('hmac.HMAC') def test_generate_temp_url_iso8601_argument(self, hmac_mock): hmac_mock().hexdigest.return_value = 'temp_url_signature' url = self.proxy.generate_temp_url( self.url, '2014-05-13T17:53:20Z', self.method, temp_url_key=self.key, ) self.assertEqual(url, self.expected_url) # Don't care about absolute arg. url = self.proxy.generate_temp_url( self.url, '2014-05-13T17:53:20Z', self.method, temp_url_key=self.key, absolute=True, ) self.assertEqual(url, self.expected_url) lt = time.localtime() expires = time.strftime(self.expires_iso8601_format[:-1], lt) if not isinstance(self.expected_url, str): expected_url = self.expected_url.replace( b'1400003600', bytes(str(int(time.mktime(lt))), encoding='ascii'), ) else: expected_url = self.expected_url.replace( '1400003600', str(int(time.mktime(lt))) ) url = self.proxy.generate_temp_url( self.url, expires, self.method, temp_url_key=self.key ) self.assertEqual(url, expected_url) expires = time.strftime(self.short_expires_iso8601_format, lt) lt = time.strptime(expires, self.short_expires_iso8601_format) if not isinstance(self.expected_url, str): expected_url = self.expected_url.replace( b'1400003600', bytes(str(int(time.mktime(lt))), encoding='ascii'), ) else: expected_url = self.expected_url.replace( '1400003600', str(int(time.mktime(lt))) ) url = self.proxy.generate_temp_url( self.url, expires, self.method, temp_url_key=self.key ) self.assertEqual(url, expected_url) @mock.patch('hmac.HMAC') @mock.patch('time.time', return_value=1400000000) def test_generate_temp_url_iso8601_output(self, time_mock, hmac_mock): hmac_mock().hexdigest.return_value = 'temp_url_signature' url = self.proxy.generate_temp_url( self.url, self.seconds, self.method, temp_url_key=self.key, iso8601=True, ) key = self.key if not isinstance(key, bytes): key = key.encode('utf-8') expires = time.strftime( self.expires_iso8601_format, time.gmtime(1400003600) ) if not isinstance(self.url, str): self.assertTrue(url.endswith(bytes(expires, 'utf-8'))) else: self.assertTrue(url.endswith(expires)) self.assertEqual( hmac_mock.mock_calls, [ mock.call(), mock.call(key, self.expected_body, sha1), mock.call().hexdigest(), ], ) self.assertIsInstance(url, type(self.url)) @mock.patch('hmac.HMAC') @mock.patch('time.time', return_value=1400000000) def test_generate_temp_url_prefix(self, time_mock, hmac_mock): hmac_mock().hexdigest.return_value = 'temp_url_signature' prefixes = ['', 'o', 'p0/p1/'] for p in prefixes: hmac_mock.reset_mock() path = '/v1/AUTH_account/c/' + p expected_url = path + ( '?temp_url_sig=temp_url_signature' '&temp_url_expires=1400003600' '&temp_url_prefix=' + p ) expected_body = '\n'.join( [ self.method, '1400003600', 'prefix:' + path, ] ).encode('utf-8') url = self.proxy.generate_temp_url( path, self.seconds, self.method, prefix=True, temp_url_key=self.key, ) key = self.key if not isinstance(key, bytes): key = key.encode('utf-8') self.assertEqual(url, expected_url) self.assertEqual( hmac_mock.mock_calls, [ mock.call(key, expected_body, sha1), mock.call().hexdigest(), ], ) self.assertIsInstance(url, type(path)) def test_generate_temp_url_invalid_path(self): self.assertRaisesRegex( ValueError, 'path must be representable as UTF-8', self.proxy.generate_temp_url, b'/v1/a/c/\xff', self.seconds, self.method, temp_url_key=self.key, ) @mock.patch('hmac.HMAC.hexdigest', return_value="temp_url_signature") def test_generate_absolute_expiry_temp_url(self, hmac_mock): if isinstance(self.expected_url, bytes): expected_url = self.expected_url.replace( b'1400003600', b'2146636800' ) else: expected_url = self.expected_url.replace( '1400003600', '2146636800' ) url = self.proxy.generate_temp_url( self.url, 2146636800, self.method, absolute=True, temp_url_key=self.key, ) self.assertEqual(url, expected_url) def test_generate_temp_url_bad_time(self): for bad_time in [ 'not_an_int', -1, 1.1, '-1', '1.1', '2015-05', '2015-05-01T01:00', ]: self.assertRaisesRegex( ValueError, self.time_errmsg, self.proxy.generate_temp_url, self.url, bad_time, self.method, temp_url_key=self.key, ) def test_generate_temp_url_bad_path(self): for bad_path in [ '/v1/a/c', 'v1/a/c/o', 'blah/v1/a/c/o', '/v1//c/o', '/v1/a/c/', '/v1/a/c', ]: self.assertRaisesRegex( ValueError, self.path_errmsg, self.proxy.generate_temp_url, bad_path, 60, self.method, temp_url_key=self.key, ) class TestTempURLUnicodePathAndKey(TestTempURL): url = '/v1/\u00e4/c/\u00f3' key = 'k\u00e9y' expected_url = ( '%s?temp_url_sig=temp_url_signature' '&temp_url_expires=1400003600' ) % url expected_body = '\n'.join( [ 'GET', '1400003600', url, ] ).encode('utf-8') class TestTempURLUnicodePathBytesKey(TestTempURL): url = '/v1/\u00e4/c/\u00f3' key = 'k\u00e9y'.encode() expected_url = ( '%s?temp_url_sig=temp_url_signature' '&temp_url_expires=1400003600' ) % url expected_body = '\n'.join( [ 'GET', '1400003600', url, ] ).encode('utf-8') class TestTempURLBytesPathUnicodeKey(TestTempURL): url = '/v1/\u00e4/c/\u00f3'.encode() key = 'k\u00e9y' expected_url = url + ( b'?temp_url_sig=temp_url_signature' b'&temp_url_expires=1400003600' ) expected_body = b'\n'.join( [ b'GET', b'1400003600', url, ] ) class TestTempURLBytesPathAndKey(TestTempURL): url = '/v1/\u00e4/c/\u00f3'.encode() key = 'k\u00e9y'.encode() expected_url = url + ( b'?temp_url_sig=temp_url_signature' b'&temp_url_expires=1400003600' ) expected_body = b'\n'.join( [ b'GET', b'1400003600', url, ] ) class TestTempURLBytesPathAndNonUtf8Key(TestTempURL): url = '/v1/\u00e4/c/\u00f3'.encode() key = b'k\xffy' expected_url = url + ( b'?temp_url_sig=temp_url_signature' b'&temp_url_expires=1400003600' ) expected_body = b'\n'.join( [ b'GET', b'1400003600', url, ] ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1725296385.493433 openstacksdk-4.0.0/openstack/tests/unit/orchestration/0000775000175000017500000000000000000000000023223 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/orchestration/__init__.py0000664000175000017500000000000000000000000025322 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/orchestration/test_version.py0000664000175000017500000000261100000000000026321 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.orchestration import version from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'links': '2', 'status': '3', } class TestVersion(base.TestCase): def test_basic(self): sot = version.Version() self.assertEqual('version', sot.resource_key) self.assertEqual('versions', sot.resources_key) self.assertEqual('/', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = version.Version(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['status'], sot.status) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4974349 openstacksdk-4.0.0/openstack/tests/unit/orchestration/v1/0000775000175000017500000000000000000000000023551 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/orchestration/v1/__init__.py0000664000175000017500000000000000000000000025650 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/orchestration/v1/hello_world.yaml0000664000175000017500000000211200000000000026743 0ustar00zuulzuul00000000000000# # Minimal HOT template defining a single compute server. # heat_template_version: 2013-05-23 description: > Minimal HOT template for stack parameters: key_name: type: string description: Name of an existing key pair to use for the server constraints: - custom_constraint: nova.keypair flavor: type: string description: Flavor for the server to be created default: m1.small constraints: - custom_constraint: nova.flavor image: type: string description: Image ID or image name to use for the server constraints: - custom_constraint: glance.image network: type: string description: Network used by the server resources: server: type: OS::Nova::Server properties: key_name: { get_param: key_name } image: { get_param: image } flavor: { get_param: flavor } networks: [{network: {get_param: network} }] metadata: message: {get_file: helloworld.txt} outputs: server_networks: description: The networks of the deployed server value: { get_attr: [server, networks] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/orchestration/v1/helloworld.txt0000664000175000017500000000000600000000000026461 0ustar00zuulzuul00000000000000Hello ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/orchestration/v1/test_proxy.py0000664000175000017500000004420400000000000026347 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from testscenarios import load_tests_apply_scenarios as load_tests # noqa from openstack import exceptions from openstack.orchestration.v1 import _proxy from openstack.orchestration.v1 import resource from openstack.orchestration.v1 import software_config as sc from openstack.orchestration.v1 import software_deployment as sd from openstack.orchestration.v1 import stack from openstack.orchestration.v1 import stack_environment from openstack.orchestration.v1 import stack_event from openstack.orchestration.v1 import stack_files from openstack.orchestration.v1 import stack_template from openstack.orchestration.v1 import template from openstack import proxy from openstack.tests.unit import test_proxy_base class TestOrchestrationProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) class TestOrchestrationStack(TestOrchestrationProxy): def test_create_stack(self): self.verify_create(self.proxy.create_stack, stack.Stack) def test_create_stack_preview(self): self.verify_create( self.proxy.create_stack, stack.Stack, method_kwargs={"preview": True, "x": 1, "y": 2, "z": 3}, expected_kwargs={"x": 1, "y": 2, "z": 3}, ) def test_find_stack(self): self.verify_find( self.proxy.find_stack, stack.Stack, expected_kwargs={'resolve_outputs': True}, ) # mock_method="openstack.proxy.Proxy._find" # test_method=self.proxy.find_stack # method_kwargs = { # 'resolve_outputs': False, # 'ignore_missing': False # } # method_args=["name_or_id"] # self._verify( # mock_method, test_method, # method_args=method_args, # method_kwargs=method_kwargs, # expected_args=[stack.Stack, "name_or_id"], # expected_kwargs=method_kwargs, # expected_result="result") # # method_kwargs = { # 'resolve_outputs': True, # 'ignore_missing': True # } # self._verify( # mock_method, test_method, # method_args=method_args, # method_kwargs=method_kwargs, # expected_args=[stack.Stack, "name_or_id"], # expected_kwargs=method_kwargs, # expected_result="result") def test_stacks(self): self.verify_list(self.proxy.stacks, stack.Stack) def test_get_stack(self): self.verify_get( self.proxy.get_stack, stack.Stack, method_kwargs={'resolve_outputs': False}, expected_kwargs={'resolve_outputs': False}, ) self.verify_get_overrided( self.proxy, stack.Stack, 'openstack.orchestration.v1.stack.Stack' ) def test_update_stack(self): self._verify( 'openstack.orchestration.v1.stack.Stack.update', self.proxy.update_stack, expected_result='result', method_args=['stack'], method_kwargs={'preview': False}, expected_args=[self.proxy, False], ) def test_update_stack_preview(self): self._verify( 'openstack.orchestration.v1.stack.Stack.update', self.proxy.update_stack, expected_result='result', method_args=['stack'], method_kwargs={'preview': True}, expected_args=[self.proxy, True], ) def test_abandon_stack(self): self._verify( 'openstack.orchestration.v1.stack.Stack.abandon', self.proxy.abandon_stack, expected_result='result', method_args=['stack'], expected_args=[self.proxy], ) @mock.patch.object(stack.Stack, 'find') def test_export_stack_with_identity(self, mock_find): stack_id = '1234' stack_name = 'test_stack' stk = stack.Stack(id=stack_id, name=stack_name) mock_find.return_value = stk self._verify( 'openstack.orchestration.v1.stack.Stack.export', self.proxy.export_stack, method_args=['IDENTITY'], expected_args=[self.proxy], ) mock_find.assert_called_once_with( mock.ANY, 'IDENTITY', ignore_missing=False ) def test_export_stack_with_object(self): stack_id = '1234' stack_name = 'test_stack' stk = stack.Stack(id=stack_id, name=stack_name) self._verify( 'openstack.orchestration.v1.stack.Stack.export', self.proxy.export_stack, method_args=[stk], expected_args=[self.proxy], ) def test_suspend_stack(self): self._verify( 'openstack.orchestration.v1.stack.Stack.suspend', self.proxy.suspend_stack, method_args=['stack'], expected_args=[self.proxy], ) def test_resume_stack(self): self._verify( 'openstack.orchestration.v1.stack.Stack.resume', self.proxy.resume_stack, method_args=['stack'], expected_args=[self.proxy], ) def test_delete_stack(self): self.verify_delete(self.proxy.delete_stack, stack.Stack, False) def test_delete_stack_ignore(self): self.verify_delete(self.proxy.delete_stack, stack.Stack, True) @mock.patch.object(stack.Stack, 'check') def test_check_stack_with_stack_object(self, mock_check): stk = stack.Stack(id='FAKE_ID') res = self.proxy.check_stack(stk) self.assertIsNone(res) mock_check.assert_called_once_with(self.proxy) @mock.patch.object(stack.Stack, 'existing') def test_check_stack_with_stack_ID(self, mock_stack): stk = mock.Mock() mock_stack.return_value = stk res = self.proxy.check_stack('FAKE_ID') self.assertIsNone(res) mock_stack.assert_called_once_with(id='FAKE_ID') stk.check.assert_called_once_with(self.proxy) class TestOrchestrationStackEnvironment(TestOrchestrationProxy): @mock.patch.object(stack.Stack, 'find') def test_get_stack_environment_with_stack_identity(self, mock_find): stack_id = '1234' stack_name = 'test_stack' stk = stack.Stack(id=stack_id, name=stack_name) mock_find.return_value = stk self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_stack_environment, method_args=['IDENTITY'], expected_args=[stack_environment.StackEnvironment], expected_kwargs={ 'requires_id': False, 'stack_name': stack_name, 'stack_id': stack_id, }, ) mock_find.assert_called_once_with( mock.ANY, 'IDENTITY', ignore_missing=False ) def test_get_stack_environment_with_stack_object(self): stack_id = '1234' stack_name = 'test_stack' stk = stack.Stack(id=stack_id, name=stack_name) self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_stack_environment, method_args=[stk], expected_args=[stack_environment.StackEnvironment], expected_kwargs={ 'requires_id': False, 'stack_name': stack_name, 'stack_id': stack_id, }, ) class TestOrchestrationStackFiles(TestOrchestrationProxy): @mock.patch.object(stack_files.StackFiles, 'fetch') @mock.patch.object(stack.Stack, 'find') def test_get_stack_files_with_stack_identity(self, mock_find, mock_fetch): stack_id = '1234' stack_name = 'test_stack' stk = stack.Stack(id=stack_id, name=stack_name) mock_find.return_value = stk mock_fetch.return_value = {'file': 'content'} res = self.proxy.get_stack_files('IDENTITY') self.assertEqual({'file': 'content'}, res) mock_find.assert_called_once_with( mock.ANY, 'IDENTITY', ignore_missing=False ) mock_fetch.assert_called_once_with(self.proxy) @mock.patch.object(stack_files.StackFiles, 'fetch') def test_get_stack_files_with_stack_object(self, mock_fetch): stack_id = '1234' stack_name = 'test_stack' stk = stack.Stack(id=stack_id, name=stack_name) mock_fetch.return_value = {'file': 'content'} res = self.proxy.get_stack_files(stk) self.assertEqual({'file': 'content'}, res) mock_fetch.assert_called_once_with(self.proxy) class TestOrchestrationStackTemplate(TestOrchestrationProxy): @mock.patch.object(stack.Stack, 'find') def test_get_stack_template_with_stack_identity(self, mock_find): stack_id = '1234' stack_name = 'test_stack' stk = stack.Stack(id=stack_id, name=stack_name) mock_find.return_value = stk self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_stack_template, method_args=['IDENTITY'], expected_args=[stack_template.StackTemplate], expected_kwargs={ 'requires_id': False, 'stack_name': stack_name, 'stack_id': stack_id, }, ) mock_find.assert_called_once_with( mock.ANY, 'IDENTITY', ignore_missing=False ) def test_get_stack_template_with_stack_object(self): stack_id = '1234' stack_name = 'test_stack' stk = stack.Stack(id=stack_id, name=stack_name) self._verify( 'openstack.proxy.Proxy._get', self.proxy.get_stack_template, method_args=[stk], expected_args=[stack_template.StackTemplate], expected_kwargs={ 'requires_id': False, 'stack_name': stack_name, 'stack_id': stack_id, }, ) class TestOrchestrationResource(TestOrchestrationProxy): @mock.patch.object(stack.Stack, 'find') def test_resources_with_stack_object(self, mock_find): stack_id = '1234' stack_name = 'test_stack' stk = stack.Stack(id=stack_id, name=stack_name) self.verify_list( self.proxy.resources, resource.Resource, method_args=[stk], expected_args=[], expected_kwargs={'stack_name': stack_name, 'stack_id': stack_id}, ) self.assertEqual(0, mock_find.call_count) @mock.patch.object(stack.Stack, 'find') def test_resources_with_stack_name(self, mock_find): stack_id = '1234' stack_name = 'test_stack' stk = stack.Stack(id=stack_id, name=stack_name) mock_find.return_value = stk self.verify_list( self.proxy.resources, resource.Resource, method_args=[stack_id], expected_args=[], expected_kwargs={'stack_name': stack_name, 'stack_id': stack_id}, ) mock_find.assert_called_once_with( mock.ANY, stack_id, ignore_missing=False ) @mock.patch.object(stack.Stack, 'find') @mock.patch.object(resource.Resource, 'list') def test_resources_stack_not_found(self, mock_list, mock_find): stack_name = 'test_stack' mock_find.side_effect = exceptions.NotFoundException( 'No stack found for test_stack' ) ex = self.assertRaises( exceptions.NotFoundException, self.proxy.resources, stack_name ) self.assertEqual('No stack found for test_stack', str(ex)) class TestOrchestrationSoftwareConfig(TestOrchestrationProxy): def test_create_software_config(self): self.verify_create( self.proxy.create_software_config, sc.SoftwareConfig ) def test_software_configs(self): self.verify_list(self.proxy.software_configs, sc.SoftwareConfig) def test_get_software_config(self): self.verify_get(self.proxy.get_software_config, sc.SoftwareConfig) def test_delete_software_config(self): self.verify_delete( self.proxy.delete_software_config, sc.SoftwareConfig, True ) self.verify_delete( self.proxy.delete_software_config, sc.SoftwareConfig, False ) class TestOrchestrationSoftwareDeployment(TestOrchestrationProxy): def test_create_software_deployment(self): self.verify_create( self.proxy.create_software_deployment, sd.SoftwareDeployment ) def test_software_deployments(self): self.verify_list( self.proxy.software_deployments, sd.SoftwareDeployment ) def test_get_software_deployment(self): self.verify_get( self.proxy.get_software_deployment, sd.SoftwareDeployment ) def test_update_software_deployment(self): self.verify_update( self.proxy.update_software_deployment, sd.SoftwareDeployment ) def test_delete_software_deployment(self): self.verify_delete( self.proxy.delete_software_deployment, sd.SoftwareDeployment, True ) self.verify_delete( self.proxy.delete_software_deployment, sd.SoftwareDeployment, False ) class TestOrchestrationTemplate(TestOrchestrationProxy): @mock.patch.object(template.Template, 'validate') def test_validate_template(self, mock_validate): tmpl = mock.Mock() env = mock.Mock() tmpl_url = 'A_URI' ignore_errors = 'a_string' res = self.proxy.validate_template(tmpl, env, tmpl_url, ignore_errors) mock_validate.assert_called_once_with( self.proxy, tmpl, environment=env, template_url=tmpl_url, ignore_errors=ignore_errors, ) self.assertEqual(mock_validate.return_value, res) def test_validate_template_no_env(self): tmpl = "openstack/tests/unit/orchestration/v1/hello_world.yaml" res = self.proxy.read_env_and_templates(tmpl) self.assertIsInstance(res, dict) self.assertIsInstance(res["files"], dict) def test_validate_template_invalid_request(self): err = self.assertRaises( exceptions.InvalidRequest, self.proxy.validate_template, None, template_url=None, ) self.assertEqual( "'template_url' must be specified when template is None", str(err), ) class TestExtractName(TestOrchestrationProxy): scenarios = [ ('stacks', dict(url='/stacks', parts=['stacks'])), ('name_id', dict(url='/stacks/name/id', parts=['stack'])), ('identity', dict(url='/stacks/id', parts=['stack'])), ( 'preview', dict(url='/stacks/name/preview', parts=['stack', 'preview']), ), ( 'stack_act', dict(url='/stacks/name/id/preview', parts=['stack', 'preview']), ), ( 'stack_subres', dict( url='/stacks/name/id/resources', parts=['stack', 'resources'] ), ), ( 'stack_subres_id', dict( url='/stacks/name/id/resources/id', parts=['stack', 'resource'] ), ), ( 'stack_subres_id_act', dict( url='/stacks/name/id/resources/id/action', parts=['stack', 'resource', 'action'], ), ), ( 'event', dict( url='/stacks/ignore/ignore/resources/ignore/events/id', parts=['stack', 'resource', 'event'], ), ), ( 'sd_metadata', dict( url='/software_deployments/metadata/ignore', parts=['software_deployment', 'metadata'], ), ), ] def test_extract_name(self): results = self.proxy._extract_name(self.url) self.assertEqual(self.parts, results) class TestOrchestrationStackEvents(TestOrchestrationProxy): def test_stack_events_with_stack_object(self): stack_id = '1234' stack_name = 'test_stack' stk = stack.Stack(id=stack_id, name=stack_name) self._verify( 'openstack.proxy.Proxy._list', self.proxy.stack_events, method_args=[stk], expected_args=[stack_event.StackEvent], expected_kwargs={ 'stack_name': stack_name, 'stack_id': stack_id, }, ) @mock.patch.object(proxy.Proxy, '_get') def test_stack_events_with_stack_id(self, mock_get): stack_id = '1234' stack_name = 'test_stack' stk = stack.Stack(id=stack_id, name=stack_name) mock_get.return_value = stk self._verify( 'openstack.proxy.Proxy._list', self.proxy.stack_events, method_args=[stk], expected_args=[stack_event.StackEvent], expected_kwargs={ 'stack_name': stack_name, 'stack_id': stack_id, }, ) def test_stack_events_with_resource_name(self): stack_id = '1234' stack_name = 'test_stack' resource_name = 'id' base_path = '/stacks/%(stack_name)s/%(stack_id)s/resources/%(resource_name)s/events' stk = stack.Stack(id=stack_id, name=stack_name) self._verify( 'openstack.proxy.Proxy._list', self.proxy.stack_events, method_args=[stk, resource_name], expected_args=[stack_event.StackEvent], expected_kwargs={ 'stack_name': stack_name, 'stack_id': stack_id, 'resource_name': resource_name, 'base_path': base_path, }, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/orchestration/v1/test_resource.py0000664000175000017500000000451000000000000027011 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.orchestration.v1 import resource from openstack.tests.unit import base FAKE_ID = '32e39358-2422-4ad0-a1b5-dd60696bf564' FAKE_NAME = 'test_stack' FAKE = { 'links': [ {'href': 'http://res_link', 'rel': 'self'}, {'href': 'http://stack_link', 'rel': 'stack'}, ], 'logical_resource_id': 'the_resource', 'name': 'the_resource', 'physical_resource_id': '9f38ab5a-37c8-4e40-9702-ce27fc5f6954', 'required_by': [], 'resource_type': 'OS::Heat::FakeResource', 'status': 'CREATE_COMPLETE', 'status_reason': 'state changed', 'updated_time': '2015-03-09T12:15:57.233772', } class TestResource(base.TestCase): def test_basic(self): sot = resource.Resource() self.assertEqual('resource', sot.resource_key) self.assertEqual('resources', sot.resources_key) self.assertEqual( '/stacks/%(stack_name)s/%(stack_id)s/resources', sot.base_path ) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_retrieve) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = resource.Resource(**FAKE) self.assertEqual(FAKE['links'], sot.links) self.assertEqual(FAKE['logical_resource_id'], sot.logical_resource_id) self.assertEqual(FAKE['name'], sot.name) self.assertEqual( FAKE['physical_resource_id'], sot.physical_resource_id ) self.assertEqual(FAKE['required_by'], sot.required_by) self.assertEqual(FAKE['resource_type'], sot.resource_type) self.assertEqual(FAKE['status'], sot.status) self.assertEqual(FAKE['status_reason'], sot.status_reason) self.assertEqual(FAKE['updated_time'], sot.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/orchestration/v1/test_software_config.py0000664000175000017500000000370400000000000030345 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.orchestration.v1 import software_config from openstack.tests.unit import base FAKE_ID = 'ce8ae86c-9810-4cb1-8888-7fb53bc523bf' FAKE_NAME = 'test_software_config' FAKE = { 'id': FAKE_ID, 'name': FAKE_NAME, 'config': 'fake config', 'creation_time': '2015-03-09T12:15:57', 'group': 'fake group', 'inputs': [{'foo': 'bar'}], 'outputs': [{'baz': 'zoo'}], 'options': {'key': 'value'}, } class TestSoftwareConfig(base.TestCase): def test_basic(self): sot = software_config.SoftwareConfig() self.assertEqual('software_config', sot.resource_key) self.assertEqual('software_configs', sot.resources_key) self.assertEqual('/software_configs', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = software_config.SoftwareConfig(**FAKE) self.assertEqual(FAKE_ID, sot.id) self.assertEqual(FAKE_NAME, sot.name) self.assertEqual(FAKE['config'], sot.config) self.assertEqual(FAKE['creation_time'], sot.created_at) self.assertEqual(FAKE['group'], sot.group) self.assertEqual(FAKE['inputs'], sot.inputs) self.assertEqual(FAKE['outputs'], sot.outputs) self.assertEqual(FAKE['options'], sot.options) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/orchestration/v1/test_software_deployment.py0000664000175000017500000000453200000000000031260 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.orchestration.v1 import software_deployment from openstack.tests.unit import base FAKE = { 'id': 'ce8ae86c-9810-4cb1-8888-7fb53bc523bf', 'action': 'CREATE', 'config_id': 'CONFIG ID', 'creation_time': '2015-03-09T12:15:57', 'server_id': 'FAKE_SERVER', 'stack_user_project_id': 'ANOTHER PROJECT', 'status': 'IN_PROGRESS', 'status_reason': 'Why are we here?', 'input_values': {'foo': 'bar'}, 'output_values': {'baz': 'zoo'}, 'updated_time': '2015-03-09T12:15:57', } class TestSoftwareDeployment(base.TestCase): def test_basic(self): sot = software_deployment.SoftwareDeployment() self.assertEqual('software_deployment', sot.resource_key) self.assertEqual('software_deployments', sot.resources_key) self.assertEqual('/software_deployments', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = software_deployment.SoftwareDeployment(**FAKE) self.assertEqual(FAKE['id'], sot.id) self.assertEqual(FAKE['action'], sot.action) self.assertEqual(FAKE['config_id'], sot.config_id) self.assertEqual(FAKE['creation_time'], sot.created_at) self.assertEqual(FAKE['server_id'], sot.server_id) self.assertEqual( FAKE['stack_user_project_id'], sot.stack_user_project_id ) self.assertEqual(FAKE['input_values'], sot.input_values) self.assertEqual(FAKE['output_values'], sot.output_values) self.assertEqual(FAKE['status'], sot.status) self.assertEqual(FAKE['status_reason'], sot.status_reason) self.assertEqual(FAKE['updated_time'], sot.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/orchestration/v1/test_stack.py0000664000175000017500000003055500000000000026277 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack import exceptions from openstack.orchestration.v1 import stack from openstack import resource from openstack.tests.unit import base from openstack.tests.unit import test_resource FAKE_ID = 'ce8ae86c-9810-4cb1-8888-7fb53bc523bf' FAKE_NAME = 'test_stack' FAKE = { 'capabilities': '1', 'creation_time': '2015-03-09T12:15:57.233772', 'deletion_time': '2015-03-09T12:15:57.233772', 'description': '3', 'disable_rollback': True, 'environment': {'var1': 'val1'}, 'environment_files': [], 'files': {'file1': 'content'}, 'files_container': 'dummy_container', 'id': FAKE_ID, 'links': [{'href': f'stacks/{FAKE_NAME}/{FAKE_ID}', 'rel': 'self'}], 'notification_topics': '7', 'outputs': '8', 'parameters': {'OS::stack_id': '9'}, 'name': FAKE_NAME, 'status': '11', 'status_reason': '12', 'tags': ['FOO', 'bar:1'], 'template_description': '13', 'template_url': 'http://www.example.com/wordpress.yaml', 'timeout_mins': '14', 'updated_time': '2015-03-09T12:30:00.000000', } FAKE_CREATE_RESPONSE = { 'stack': { 'id': FAKE_ID, 'links': [{'href': f'stacks/{FAKE_NAME}/{FAKE_ID}', 'rel': 'self'}], } } FAKE_UPDATE_PREVIEW_RESPONSE = { 'unchanged': [ { 'updated_time': 'datetime', 'resource_name': '', 'physical_resource_id': '{resource id or }', 'resource_action': 'CREATE', 'resource_status': 'COMPLETE', 'resource_status_reason': '', 'resource_type': 'restype', 'stack_identity': '{stack_id}', 'stack_name': '{stack_name}', } ], 'updated': [ { 'updated_time': 'datetime', 'resource_name': '', 'physical_resource_id': '{resource id or }', 'resource_action': 'CREATE', 'resource_status': 'COMPLETE', 'resource_status_reason': '', 'resource_type': 'restype', 'stack_identity': '{stack_id}', 'stack_name': '{stack_name}', } ], 'replaced': [ { 'updated_time': 'datetime', 'resource_name': '', 'physical_resource_id': '{resource id or }', 'resource_action': 'CREATE', 'resource_status': 'COMPLETE', 'resource_status_reason': '', 'resource_type': 'restype', 'stack_identity': '{stack_id}', 'stack_name': '{stack_name}', } ], 'added': [ { 'updated_time': 'datetime', 'resource_name': '', 'physical_resource_id': '{resource id or }', 'resource_action': 'CREATE', 'resource_status': 'COMPLETE', 'resource_status_reason': '', 'resource_type': 'restype', 'stack_identity': '{stack_id}', 'stack_name': '{stack_name}', } ], 'deleted': [ { 'updated_time': 'datetime', 'resource_name': '', 'physical_resource_id': '{resource id or }', 'resource_action': 'CREATE', 'resource_status': 'COMPLETE', 'resource_status_reason': '', 'resource_type': 'restype', 'stack_identity': '{stack_id}', 'stack_name': '{stack_name}', } ], } class TestStack(base.TestCase): def test_basic(self): sot = stack.Stack() self.assertEqual('stack', sot.resource_key) self.assertEqual('stacks', sot.resources_key) self.assertEqual('/stacks', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertDictEqual( { 'action': 'action', 'any_tags': 'tags-any', 'limit': 'limit', 'marker': 'marker', 'name': 'name', 'not_any_tags': 'not-tags-any', 'not_tags': 'not-tags', 'owner_id': 'owner_id', 'project_id': 'tenant_id', 'status': 'status', 'tags': 'tags', 'username': 'username', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = stack.Stack(**FAKE) self.assertEqual(FAKE['capabilities'], sot.capabilities) self.assertEqual(FAKE['creation_time'], sot.created_at) self.assertEqual(FAKE['deletion_time'], sot.deleted_at) self.assertEqual(FAKE['description'], sot.description) self.assertEqual(FAKE['environment'], sot.environment) self.assertEqual(FAKE['environment_files'], sot.environment_files) self.assertEqual(FAKE['files'], sot.files) self.assertEqual(FAKE['files_container'], sot.files_container) self.assertTrue(sot.is_rollback_disabled) self.assertEqual(FAKE['id'], sot.id) self.assertEqual(FAKE['links'], sot.links) self.assertEqual(FAKE['notification_topics'], sot.notification_topics) self.assertEqual(FAKE['outputs'], sot.outputs) self.assertEqual(FAKE['parameters'], sot.parameters) self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['status'], sot.status) self.assertEqual(FAKE['status_reason'], sot.status_reason) self.assertEqual(FAKE['tags'], sot.tags) self.assertEqual( FAKE['template_description'], sot.template_description ) self.assertEqual(FAKE['template_url'], sot.template_url) self.assertEqual(FAKE['timeout_mins'], sot.timeout_mins) self.assertEqual(FAKE['updated_time'], sot.updated_at) @mock.patch.object(resource.Resource, 'create') def test_create(self, mock_create): sess = mock.Mock() sot = stack.Stack() res = sot.create(sess) mock_create.assert_called_once_with( sess, prepend_key=False, base_path=None ) self.assertEqual(mock_create.return_value, res) @mock.patch.object(resource.Resource, 'commit') def test_commit(self, mock_commit): sess = mock.Mock() sot = stack.Stack() res = sot.commit(sess) mock_commit.assert_called_once_with( sess, prepend_key=False, has_body=False, base_path=None ) self.assertEqual(mock_commit.return_value, res) def test_check(self): sess = mock.Mock() sot = stack.Stack(**FAKE) sot._action = mock.Mock() sot._action.side_effect = [ test_resource.FakeResponse(None, 200, None), exceptions.BadRequestException(message='oops'), exceptions.NotFoundException(message='oops'), ] body = {'check': ''} sot.check(sess) sot._action.assert_called_with(sess, body) self.assertRaises(exceptions.BadRequestException, sot.check, sess) self.assertRaises(exceptions.NotFoundException, sot.check, sess) def test_fetch(self): sess = mock.Mock() sess.default_microversion = None sot = stack.Stack(**FAKE) sess.get = mock.Mock() sess.get.side_effect = [ test_resource.FakeResponse( {'stack': {'stack_status': 'CREATE_COMPLETE'}}, 200 ), test_resource.FakeResponse( {'stack': {'stack_status': 'CREATE_COMPLETE'}}, 200 ), exceptions.NotFoundException(message='oops'), test_resource.FakeResponse( {'stack': {'stack_status': 'DELETE_COMPLETE'}}, 200 ), ] self.assertEqual(sot, sot.fetch(sess)) sess.get.assert_called_with( f'stacks/{sot.id}', microversion=None, skip_cache=False, ) sot.fetch(sess, resolve_outputs=False) sess.get.assert_called_with( f'stacks/{sot.id}?resolve_outputs=False', microversion=None, skip_cache=False, ) ex = self.assertRaises(exceptions.NotFoundException, sot.fetch, sess) self.assertEqual('oops', str(ex)) ex = self.assertRaises(exceptions.NotFoundException, sot.fetch, sess) self.assertEqual('No stack found for %s' % FAKE_ID, str(ex)) def test_abandon(self): sess = mock.Mock() sess.default_microversion = None mock_response = mock.Mock() mock_response.status_code = 200 mock_response.headers = {} mock_response.json.return_value = {} sess.delete = mock.Mock(return_value=mock_response) sot = stack.Stack(**FAKE) sot.abandon(sess) sess.delete.assert_called_with( f'stacks/{FAKE_NAME}/{FAKE_ID}/abandon', ) def test_export(self): sess = mock.Mock() sess.default_microversion = None mock_response = mock.Mock() mock_response.status_code = 200 mock_response.headers = {} mock_response.json.return_value = {} sess.get = mock.Mock(return_value=mock_response) sot = stack.Stack(**FAKE) sot.export(sess) sess.get.assert_called_with( f'stacks/{FAKE_NAME}/{FAKE_ID}/export', ) def test_update(self): sess = mock.Mock() sess.default_microversion = None mock_response = mock.Mock() mock_response.status_code = 200 mock_response.headers = {} mock_response.json.return_value = {} sess.put = mock.Mock(return_value=mock_response) sot = stack.Stack(**FAKE) body = sot._body.dirty.copy() sot.update(sess) sess.put.assert_called_with( f'/stacks/{FAKE_NAME}/{FAKE_ID}', headers={}, microversion=None, json=body, ) def test_update_preview(self): sess = mock.Mock() sess.default_microversion = None mock_response = mock.Mock() mock_response.status_code = 200 mock_response.headers = {} mock_response.json.return_value = FAKE_UPDATE_PREVIEW_RESPONSE.copy() sess.put = mock.Mock(return_value=mock_response) sot = stack.Stack(**FAKE) body = sot._body.dirty.copy() ret = sot.update(sess, preview=True) sess.put.assert_called_with( f'stacks/{FAKE_NAME}/{FAKE_ID}/preview', headers={}, microversion=None, json=body, ) self.assertEqual(FAKE_UPDATE_PREVIEW_RESPONSE['added'], ret.added) self.assertEqual(FAKE_UPDATE_PREVIEW_RESPONSE['deleted'], ret.deleted) self.assertEqual( FAKE_UPDATE_PREVIEW_RESPONSE['replaced'], ret.replaced ) self.assertEqual( FAKE_UPDATE_PREVIEW_RESPONSE['unchanged'], ret.unchanged ) self.assertEqual(FAKE_UPDATE_PREVIEW_RESPONSE['updated'], ret.updated) def test_suspend(self): sess = mock.Mock() mock_response = mock.Mock() mock_response.status_code = 200 mock_response.headers = {} mock_response.json.return_value = {} sess.post = mock.Mock(return_value=mock_response) url = "stacks/%s/actions" % FAKE_ID body = {"suspend": None} sot = stack.Stack(**FAKE) res = sot.suspend(sess) self.assertIsNone(res) sess.post.assert_called_with(url, json=body, microversion=None) def test_resume(self): sess = mock.Mock() mock_response = mock.Mock() mock_response.status_code = 200 mock_response.headers = {} mock_response.json.return_value = {} sess.post = mock.Mock(return_value=mock_response) url = "stacks/%s/actions" % FAKE_ID body = {"resume": None} sot = stack.Stack(**FAKE) res = sot.resume(sess) self.assertIsNone(res) sess.post.assert_called_with(url, json=body, microversion=None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/orchestration/v1/test_stack_environment.py0000664000175000017500000000317100000000000030715 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.orchestration.v1 import stack_environment as se from openstack.tests.unit import base FAKE = { 'encrypted_param_names': ['n1', 'n2'], 'event_sinks': {'s1': 'v1'}, 'parameters': {'key_name': {'type': 'string'}}, 'parameter_defaults': {'p1': 'def1'}, 'resource_registry': {'resources': {'type1': 'type2'}}, } class TestStackTemplate(base.TestCase): def test_basic(self): sot = se.StackEnvironment() self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) def test_make_it(self): sot = se.StackEnvironment(**FAKE) self.assertEqual( FAKE['encrypted_param_names'], sot.encrypted_param_names ) self.assertEqual(FAKE['event_sinks'], sot.event_sinks) self.assertEqual(FAKE['parameters'], sot.parameters) self.assertEqual(FAKE['parameter_defaults'], sot.parameter_defaults) self.assertEqual(FAKE['resource_registry'], sot.resource_registry) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/orchestration/v1/test_stack_event.py0000664000175000017500000000401000000000000027463 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.orchestration.v1 import stack_event from openstack.tests.unit import base FAKE_ID = 'ce8ae86c-9810-4cb1-8888-7fb53bc523bf' FAKE_NAME = 'test_stack' FAKE = { 'event_time': '2015-03-09T12:15:57.233772', 'id': FAKE_ID, 'links': [{'href': f'stacks/{FAKE_NAME}/{FAKE_ID}', 'rel': 'self'}], 'logical_resource_id': 'my_test_group', 'physical_resource_id': 'my_test_group', 'resource_name': 'my_test_resource', 'resource_status': 'CREATE_IN_PROGRESS', 'resource_status_reason': 'state changed', } class TestStackEvent(base.TestCase): def test_basic(self): sot = stack_event.StackEvent() self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = stack_event.StackEvent(**FAKE) self.assertEqual(FAKE['event_time'], sot.event_time) self.assertEqual(FAKE['id'], sot.id) self.assertEqual(FAKE['links'], sot.links) self.assertEqual(FAKE['logical_resource_id'], sot.logical_resource_id) self.assertEqual( FAKE['physical_resource_id'], sot.physical_resource_id ) self.assertEqual(FAKE['resource_name'], sot.resource_name) self.assertEqual(FAKE['resource_status'], sot.resource_status) self.assertEqual( FAKE['resource_status_reason'], sot.resource_status_reason ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/orchestration/v1/test_stack_files.py0000664000175000017500000000361700000000000027460 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.orchestration.v1 import stack_files as sf from openstack import resource from openstack.tests.unit import base FAKE = {'stack_id': 'ID', 'stack_name': 'NAME'} class TestStackFiles(base.TestCase): def test_basic(self): sot = sf.StackFiles() self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) def test_make_it(self): sot = sf.StackFiles(**FAKE) self.assertEqual(FAKE['stack_id'], sot.stack_id) self.assertEqual(FAKE['stack_name'], sot.stack_name) @mock.patch.object(resource.Resource, '_prepare_request') def test_get(self, mock_prepare_request): resp = mock.Mock() resp.json = mock.Mock(return_value={'file': 'file-content'}) sess = mock.Mock() sess.get = mock.Mock(return_value=resp) sot = sf.StackFiles(**FAKE) req = mock.MagicMock() req.url = '/stacks/{stack_name}/{stack_id}/files'.format( stack_name=FAKE['stack_name'], stack_id=FAKE['stack_id'], ) mock_prepare_request.return_value = req files = sot.fetch(sess) sess.get.assert_called_once_with(req.url) self.assertEqual({'file': 'file-content'}, files) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/orchestration/v1/test_stack_template.py0000664000175000017500000000500600000000000030163 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from openstack.orchestration.v1 import stack_template from openstack.tests.unit import base FAKE = { 'description': 'template description', 'heat_template_version': '2014-10-16', 'parameters': {'key_name': {'type': 'string'}}, 'resources': {'resource1': {'type': 'ResourceType'}}, 'conditions': {'cd1': True}, 'outputs': {'key1': 'value1'}, } class TestStackTemplate(base.TestCase): def test_basic(self): sot = stack_template.StackTemplate() self.assertFalse(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) def test_make_it(self): sot = stack_template.StackTemplate(**FAKE) self.assertEqual(FAKE['description'], sot.description) self.assertEqual( FAKE['heat_template_version'], sot.heat_template_version ) self.assertEqual(FAKE['outputs'], sot.outputs) self.assertEqual(FAKE['parameters'], sot.parameters) self.assertEqual(FAKE['resources'], sot.resources) self.assertEqual(FAKE['conditions'], sot.conditions) def test_to_dict(self): fake_sot = copy.deepcopy(FAKE) fake_sot['parameter_groups'] = [ { "description": "server parameters", "parameters": ["key_name", "image_id"], "label": "server_parameters", } ] fake_sot['location'] = None fake_sot['id'] = None fake_sot['name'] = None for temp_version in [ '2016-10-14', '2017-02-24', '2017-02-24', '2017-09-01', '2018-03-02', 'newton', 'ocata', 'pike', 'queens', ]: fake_sot['heat_template_version'] = temp_version sot = stack_template.StackTemplate(**fake_sot) self.assertEqual(fake_sot, sot.to_dict()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/orchestration/v1/test_template.py0000664000175000017500000000634700000000000027007 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.orchestration.v1 import template from openstack import resource from openstack.tests.unit import base FAKE = { 'Description': 'Blah blah', 'Parameters': {'key_name': {'type': 'string'}}, 'ParameterGroups': [{'label': 'Group 1', 'parameters': ['key_name']}], } class TestTemplate(base.TestCase): def test_basic(self): sot = template.Template() self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) def test_make_it(self): sot = template.Template(**FAKE) self.assertEqual(FAKE['Description'], sot.description) self.assertEqual(FAKE['Parameters'], sot.parameters) self.assertEqual(FAKE['ParameterGroups'], sot.parameter_groups) @mock.patch.object(resource.Resource, '_translate_response') def test_validate(self, mock_translate): sess = mock.Mock() sot = template.Template() tmpl = mock.Mock() body = {'template': tmpl} sot.validate(sess, tmpl) sess.post.assert_called_once_with('/validate', json=body) mock_translate.assert_called_once_with(sess.post.return_value) @mock.patch.object(resource.Resource, '_translate_response') def test_validate_with_env(self, mock_translate): sess = mock.Mock() sot = template.Template() tmpl = mock.Mock() env = mock.Mock() body = {'template': tmpl, 'environment': env} sot.validate(sess, tmpl, environment=env) sess.post.assert_called_once_with('/validate', json=body) mock_translate.assert_called_once_with(sess.post.return_value) @mock.patch.object(resource.Resource, '_translate_response') def test_validate_with_template_url(self, mock_translate): sess = mock.Mock() sot = template.Template() template_url = 'http://host1' body = {'template': None, 'template_url': template_url} sot.validate(sess, None, template_url=template_url) sess.post.assert_called_once_with('/validate', json=body) mock_translate.assert_called_once_with(sess.post.return_value) @mock.patch.object(resource.Resource, '_translate_response') def test_validate_with_ignore_errors(self, mock_translate): sess = mock.Mock() sot = template.Template() tmpl = mock.Mock() body = {'template': tmpl} sot.validate(sess, tmpl, ignore_errors='123,456') sess.post.assert_called_once_with( '/validate?ignore_errors=123%2C456', json=body ) mock_translate.assert_called_once_with(sess.post.return_value) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4974349 openstacksdk-4.0.0/openstack/tests/unit/placement/0000775000175000017500000000000000000000000022307 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/placement/__init__.py0000664000175000017500000000000000000000000024406 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4974349 openstacksdk-4.0.0/openstack/tests/unit/placement/v1/0000775000175000017500000000000000000000000022635 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/placement/v1/__init__.py0000664000175000017500000000000000000000000024734 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/placement/v1/test_proxy.py0000664000175000017500000001314600000000000025434 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.placement.v1 import _proxy from openstack.placement.v1 import resource_class from openstack.placement.v1 import resource_provider from openstack.placement.v1 import resource_provider_inventory from openstack.tests.unit import test_proxy_base as test_proxy_base class TestPlacementProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) class TestPlacementResourceClass(TestPlacementProxy): def test_resource_class_create(self): self.verify_create( self.proxy.create_resource_class, resource_class.ResourceClass, ) def test_resource_class_delete(self): self.verify_delete( self.proxy.delete_resource_class, resource_class.ResourceClass, False, ) def test_resource_class_update(self): self.verify_update( self.proxy.update_resource_class, resource_class.ResourceClass, False, ) def test_resource_class_get(self): self.verify_get( self.proxy.get_resource_class, resource_class.ResourceClass, ) def test_resource_classes(self): self.verify_list( self.proxy.resource_classes, resource_class.ResourceClass, ) class TestPlacementResourceProvider(TestPlacementProxy): def test_resource_provider_create(self): self.verify_create( self.proxy.create_resource_provider, resource_provider.ResourceProvider, ) def test_resource_provider_delete(self): self.verify_delete( self.proxy.delete_resource_provider, resource_provider.ResourceProvider, False, ) def test_resource_provider_update(self): self.verify_update( self.proxy.update_resource_provider, resource_provider.ResourceProvider, False, ) def test_resource_provider_get(self): self.verify_get( self.proxy.get_resource_provider, resource_provider.ResourceProvider, ) def test_resource_providers(self): self.verify_list( self.proxy.resource_providers, resource_provider.ResourceProvider, ) def test_resource_provider_set_aggregates(self): self._verify( 'openstack.placement.v1.resource_provider.ResourceProvider.set_aggregates', self.proxy.set_resource_provider_aggregates, method_args=['value', 'a', 'b'], expected_args=[self.proxy], expected_kwargs={'aggregates': ('a', 'b')}, ) def test_resource_provider_get_aggregates(self): self._verify( 'openstack.placement.v1.resource_provider.ResourceProvider.fetch_aggregates', self.proxy.get_resource_provider_aggregates, method_args=['value'], expected_args=[self.proxy], ) class TestPlacementResourceProviderInventory(TestPlacementProxy): def test_resource_provider_inventory_create(self): self.verify_create( self.proxy.create_resource_provider_inventory, resource_provider_inventory.ResourceProviderInventory, method_kwargs={ 'resource_provider': 'test_id', 'resource_class': 'CUSTOM_FOO', 'total': 20, }, expected_kwargs={ 'resource_provider_id': 'test_id', 'resource_class': 'CUSTOM_FOO', 'total': 20, }, ) def test_resource_provider_inventory_delete(self): self.verify_delete( self.proxy.delete_resource_provider_inventory, resource_provider_inventory.ResourceProviderInventory, ignore_missing=False, method_kwargs={'resource_provider': 'test_id'}, expected_kwargs={'resource_provider_id': 'test_id'}, ) def test_resource_provider_inventory_update(self): self.verify_update( self.proxy.update_resource_provider_inventory, resource_provider_inventory.ResourceProviderInventory, method_kwargs={ 'resource_provider': 'test_id', 'resource_provider_generation': 1, }, expected_kwargs={ 'resource_provider_id': 'test_id', 'resource_provider_generation': 1, }, ) def test_resource_provider_inventory_get(self): self.verify_get( self.proxy.get_resource_provider_inventory, resource_provider_inventory.ResourceProviderInventory, method_kwargs={'resource_provider': 'test_id'}, expected_kwargs={'resource_provider_id': 'test_id'}, ) def test_resource_provider_inventories(self): self.verify_list( self.proxy.resource_provider_inventories, resource_provider_inventory.ResourceProviderInventory, method_kwargs={'resource_provider': 'test_id'}, expected_kwargs={'resource_provider_id': 'test_id'}, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/placement/v1/test_resource_class.py0000664000175000017500000000271700000000000027271 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.placement.v1 import resource_class as rc from openstack.tests.unit import base FAKE = { 'name': 'CUSTOM_FPGA', } class TestResourceClass(base.TestCase): def test_basic(self): sot = rc.ResourceClass() self.assertEqual(None, sot.resource_key) self.assertEqual('resource_classes', sot.resources_key) self.assertEqual('/resource_classes', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertFalse(sot.allow_patch) self.assertDictEqual( {'limit': 'limit', 'marker': 'marker'}, sot._query_mapping._mapping ) def test_make_it(self): sot = rc.ResourceClass(**FAKE) self.assertEqual(FAKE['name'], sot.id) self.assertEqual(FAKE['name'], sot.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/placement/v1/test_resource_provider.py0000664000175000017500000000374300000000000030016 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.placement.v1 import resource_provider as rp from openstack.tests.unit import base FAKE = { 'uuid': '751cd30a-df22-4ef8-b028-67c1c5aeddc3', 'name': 'fake-name', 'parent_provider_uuid': '9900cc2d-88e8-429d-927a-182adf1577b0', } class TestResourceProvider(base.TestCase): def test_basic(self): sot = rp.ResourceProvider() self.assertEqual(None, sot.resource_key) self.assertEqual('resource_providers', sot.resources_key) self.assertEqual('/resource_providers', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertFalse(sot.allow_patch) self.assertDictEqual( { 'limit': 'limit', 'marker': 'marker', 'name': 'name', 'member_of': 'member_of', 'resources': 'resources', 'in_tree': 'in_tree', 'required': 'required', 'id': 'uuid', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = rp.ResourceProvider(**FAKE) self.assertEqual(FAKE['uuid'], sot.id) self.assertEqual(FAKE['name'], sot.name) self.assertEqual( FAKE['parent_provider_uuid'], sot.parent_provider_id, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/placement/v1/test_resource_provider_inventory.py0000664000175000017500000000360700000000000032132 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.placement.v1 import resource_provider_inventory from openstack.tests.unit import base FAKE = { 'allocation_ratio': 1.0, 'max_unit': 35, 'min_unit': 1, 'reserved': 0, 'step_size': 1, 'total': 35, } class TestResourceProviderInventory(base.TestCase): def test_basic(self): sot = resource_provider_inventory.ResourceProviderInventory() self.assertIsNone(sot.resource_key) self.assertIsNone(sot.resources_key) self.assertEqual( '/resource_providers/%(resource_provider_id)s/inventories', sot.base_path, ) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertFalse(sot.allow_patch) self.assertDictEqual({}, sot._query_mapping._mapping) def test_make_it(self): sot = resource_provider_inventory.ResourceProviderInventory(**FAKE) self.assertEqual(FAKE['allocation_ratio'], sot.allocation_ratio) self.assertEqual(FAKE['max_unit'], sot.max_unit) self.assertEqual(FAKE['min_unit'], sot.min_unit) self.assertEqual(FAKE['reserved'], sot.reserved) self.assertEqual(FAKE['step_size'], sot.step_size) self.assertEqual(FAKE['total'], sot.total) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/placement/v1/test_trait.py0000664000175000017500000000267500000000000025403 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.placement.v1 import trait as _trait from openstack.tests.unit import base FAKE = { 'name': 'CUSTOM_FOO', } class TestResourceClass(base.TestCase): def test_basic(self): sot = _trait.Trait() self.assertEqual(None, sot.resource_key) self.assertEqual(None, sot.resources_key) self.assertEqual('/traits', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertFalse(sot.allow_patch) self.assertDictEqual( {'name': 'name', 'associated': 'associated'}, sot._query_mapping._mapping, ) def test_make_it(self): sot = _trait.Trait(**FAKE) self.assertEqual(FAKE['name'], sot.id) self.assertEqual(FAKE['name'], sot.name) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.4974349 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/0000775000175000017500000000000000000000000024210 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/__init__.py0000664000175000017500000000000000000000000026307 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.5054388 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/v2/0000775000175000017500000000000000000000000024537 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/v2/__init__.py0000664000175000017500000000000000000000000026636 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/v2/test_availability_zone.py0000664000175000017500000000273200000000000031661 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.shared_file_system.v2 import availability_zone as az from openstack.tests.unit import base IDENTIFIER = '08a87d37-5ca2-4308-86c5-cba06d8d796c' EXAMPLE = { "id": IDENTIFIER, "name": "nova", "created_at": "2021-01-21T20:13:55.000000", "updated_at": None, } class TestAvailabilityZone(base.TestCase): def test_basic(self): az_resource = az.AvailabilityZone() self.assertEqual('availability_zones', az_resource.resources_key) self.assertEqual('/availability-zones', az_resource.base_path) self.assertTrue(az_resource.allow_list) def test_make_availability_zone(self): az_resource = az.AvailabilityZone(**EXAMPLE) self.assertEqual(EXAMPLE['id'], az_resource.id) self.assertEqual(EXAMPLE['name'], az_resource.name) self.assertEqual(EXAMPLE['created_at'], az_resource.created_at) self.assertEqual(EXAMPLE['updated_at'], az_resource.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/v2/test_limit.py0000664000175000017500000000626300000000000027275 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.shared_file_system.v2 import limit from openstack.tests.unit import base EXAMPLE = { "totalShareNetworksUsed": 0, "maxTotalShareGigabytes": 1000, "maxTotalShareNetworks": 10, "totalSharesUsed": 0, "totalShareGigabytesUsed": 0, "totalShareSnapshotsUsed": 0, "maxTotalShares": 50, "totalSnapshotGigabytesUsed": 0, "maxTotalSnapshotGigabytes": 1000, "maxTotalShareSnapshots": 50, "maxTotalShareReplicas": 100, "maxTotalReplicaGigabytes": 1000, "totalShareReplicasUsed": 0, "totalReplicaGigabytesUsed": 0, } class TestLimit(base.TestCase): def test_basic(self): limits = limit.Limit() self.assertEqual('limits', limits.resources_key) self.assertEqual('/limits', limits.base_path) self.assertTrue(limits.allow_list) self.assertFalse(limits.allow_fetch) self.assertFalse(limits.allow_create) self.assertFalse(limits.allow_commit) self.assertFalse(limits.allow_delete) self.assertFalse(limits.allow_head) def test_make_limits(self): limits = limit.Limit(**EXAMPLE) self.assertEqual( EXAMPLE['totalShareNetworksUsed'], limits.totalShareNetworksUsed ) self.assertEqual( EXAMPLE['maxTotalShareGigabytes'], limits.maxTotalShareGigabytes ) self.assertEqual( EXAMPLE['maxTotalShareNetworks'], limits.maxTotalShareNetworks ) self.assertEqual(EXAMPLE['totalSharesUsed'], limits.totalSharesUsed) self.assertEqual( EXAMPLE['totalShareGigabytesUsed'], limits.totalShareGigabytesUsed ) self.assertEqual( EXAMPLE['totalShareSnapshotsUsed'], limits.totalShareSnapshotsUsed ) self.assertEqual(EXAMPLE['maxTotalShares'], limits.maxTotalShares) self.assertEqual( EXAMPLE['totalSnapshotGigabytesUsed'], limits.totalSnapshotGigabytesUsed, ) self.assertEqual( EXAMPLE['maxTotalSnapshotGigabytes'], limits.maxTotalSnapshotGigabytes, ) self.assertEqual( EXAMPLE['maxTotalShareSnapshots'], limits.maxTotalShareSnapshots ) self.assertEqual( EXAMPLE['maxTotalShareReplicas'], limits.maxTotalShareReplicas ) self.assertEqual( EXAMPLE['maxTotalReplicaGigabytes'], limits.maxTotalReplicaGigabytes, ) self.assertEqual( EXAMPLE['totalShareReplicasUsed'], limits.totalShareReplicasUsed ) self.assertEqual( EXAMPLE['totalReplicaGigabytesUsed'], limits.totalReplicaGigabytesUsed, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/v2/test_proxy.py0000664000175000017500000004705400000000000027343 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.shared_file_system.v2 import _proxy from openstack.shared_file_system.v2 import limit from openstack.shared_file_system.v2 import resource_locks from openstack.shared_file_system.v2 import share from openstack.shared_file_system.v2 import share_access_rule from openstack.shared_file_system.v2 import share_group from openstack.shared_file_system.v2 import share_group_snapshot from openstack.shared_file_system.v2 import share_instance from openstack.shared_file_system.v2 import share_network from openstack.shared_file_system.v2 import share_network_subnet from openstack.shared_file_system.v2 import share_snapshot from openstack.shared_file_system.v2 import share_snapshot_instance from openstack.shared_file_system.v2 import storage_pool from openstack.shared_file_system.v2 import user_message from openstack.tests.unit import test_proxy_base class TestSharedFileSystemProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) class TestSharedFileSystemShare(TestSharedFileSystemProxy): def test_shares(self): self.verify_list(self.proxy.shares, share.Share) def test_shares_detailed(self): self.verify_list( self.proxy.shares, share.Share, method_kwargs={"details": True, "query": 1}, expected_kwargs={"query": 1}, ) def test_shares_not_detailed(self): self.verify_list( self.proxy.shares, share.Share, method_kwargs={"details": False, "query": 1}, expected_kwargs={"query": 1}, ) def test_share_get(self): self.verify_get(self.proxy.get_share, share.Share) def test_share_find(self): self.verify_find(self.proxy.find_share, share.Share) def test_share_delete(self): self.verify_delete(self.proxy.delete_share, share.Share, False) def test_share_delete_ignore(self): self.verify_delete(self.proxy.delete_share, share.Share, True) def test_share_create(self): self.verify_create(self.proxy.create_share, share.Share) def test_share_update(self): self.verify_update(self.proxy.update_share, share.Share) def test_share_resize_extend(self): mock_share = share.Share(size=10, id='fakeId') self.proxy._get = mock.Mock(return_value=mock_share) self._verify( "openstack.shared_file_system.v2.share." + "Share.extend_share", self.proxy.resize_share, method_args=['fakeId', 20], expected_args=[self.proxy, 20, False], ) def test_share_resize_shrink(self): mock_share = share.Share(size=30, id='fakeId') self.proxy._get = mock.Mock(return_value=mock_share) self._verify( "openstack.shared_file_system.v2.share." + "Share.shrink_share", self.proxy.resize_share, method_args=['fakeId', 20], expected_args=[self.proxy, 20], ) def test_share_instances(self): self.verify_list( self.proxy.share_instances, share_instance.ShareInstance ) def test_share_instance_get(self): self.verify_get( self.proxy.get_share_instance, share_instance.ShareInstance ) def test_share_instance_reset(self): self._verify( "openstack.shared_file_system.v2.share_instance." + "ShareInstance.reset_status", self.proxy.reset_share_instance_status, method_args=['id', 'available'], expected_args=[self.proxy, 'available'], ) def test_share_instance_delete(self): self._verify( "openstack.shared_file_system.v2.share_instance." + "ShareInstance.force_delete", self.proxy.delete_share_instance, method_args=['id'], expected_args=[self.proxy], ) @mock.patch("openstack.resource.wait_for_status") def test_wait_for(self, mock_wait): mock_resource = mock.Mock() mock_wait.return_value = mock_resource self.proxy.wait_for_status(mock_resource, 'ACTIVE') mock_wait.assert_called_once_with( self.proxy, mock_resource, 'ACTIVE', [], 2, 120, attribute='status' ) class TestSharedFileSystemStoragePool(TestSharedFileSystemProxy): def test_storage_pools(self): self.verify_list(self.proxy.storage_pools, storage_pool.StoragePool) def test_storage_pool_detailed(self): self.verify_list( self.proxy.storage_pools, storage_pool.StoragePool, method_kwargs={"details": True, "backend": "alpha"}, expected_kwargs={"backend": "alpha"}, ) def test_storage_pool_not_detailed(self): self.verify_list( self.proxy.storage_pools, storage_pool.StoragePool, method_kwargs={"details": False, "backend": "alpha"}, expected_kwargs={"backend": "alpha"}, ) class TestSharedFileSystemShareMetadata(TestSharedFileSystemProxy): def test_get_share_metadata(self): self._verify( "openstack.shared_file_system.v2.share.Share.fetch_metadata", self.proxy.get_share_metadata, method_args=["share_id"], expected_args=[self.proxy], expected_result=share.Share( id="share_id", metadata={"key": "value"} ), ) def test_get_share_metadata_item(self): self._verify( "openstack.shared_file_system.v2.share.Share.get_metadata_item", self.proxy.get_share_metadata_item, method_args=["share_id", "key"], expected_args=[self.proxy, "key"], expected_result=share.Share( id="share_id", metadata={"key": "value"} ), ) def test_create_share_metadata(self): metadata = {"foo": "bar", "newFoo": "newBar"} self._verify( "openstack.shared_file_system.v2.share.Share.set_metadata", self.proxy.create_share_metadata, method_args=["share_id"], method_kwargs=metadata, expected_args=[self.proxy], expected_kwargs={"metadata": metadata}, expected_result=share.Share(id="share_id", metadata=metadata), ) def test_update_share_metadata(self): metadata = {"foo": "bar", "newFoo": "newBar"} replace = True self._verify( "openstack.shared_file_system.v2.share.Share.set_metadata", self.proxy.update_share_metadata, method_args=["share_id", metadata, replace], expected_args=[self.proxy], expected_kwargs={"metadata": metadata, "replace": replace}, expected_result=share.Share(id="share_id", metadata=metadata), ) def test_delete_share_metadata(self): self._verify( "openstack.shared_file_system.v2.share.Share.delete_metadata_item", self.proxy.delete_share_metadata, expected_result=None, method_args=["share_id", ["key"]], expected_args=[self.proxy, "key"], ) class TestUserMessageProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) def test_user_messages(self): self.verify_list(self.proxy.user_messages, user_message.UserMessage) def test_user_messages_queried(self): self.verify_list( self.proxy.user_messages, user_message.UserMessage, method_kwargs={"action_id": "1"}, expected_kwargs={"action_id": "1"}, ) def test_user_message_get(self): self.verify_get(self.proxy.get_user_message, user_message.UserMessage) def test_delete_user_message(self): self.verify_delete( self.proxy.delete_user_message, user_message.UserMessage, False ) def test_delete_user_message_true(self): self.verify_delete( self.proxy.delete_user_message, user_message.UserMessage, True ) def test_limit(self): self.verify_list(self.proxy.limits, limit.Limit) class TestShareSnapshotResource(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) def test_share_snapshots(self): self.verify_list( self.proxy.share_snapshots, share_snapshot.ShareSnapshot ) def test_share_snapshots_detailed(self): self.verify_list( self.proxy.share_snapshots, share_snapshot.ShareSnapshot, method_kwargs={"details": True, "name": "my_snapshot"}, expected_kwargs={"name": "my_snapshot"}, ) def test_share_snapshots_not_detailed(self): self.verify_list( self.proxy.share_snapshots, share_snapshot.ShareSnapshot, method_kwargs={"details": False, "name": "my_snapshot"}, expected_kwargs={"name": "my_snapshot"}, ) def test_share_snapshot_get(self): self.verify_get( self.proxy.get_share_snapshot, share_snapshot.ShareSnapshot ) def test_share_snapshot_delete(self): self.verify_delete( self.proxy.delete_share_snapshot, share_snapshot.ShareSnapshot, False, ) def test_share_snapshot_delete_ignore(self): self.verify_delete( self.proxy.delete_share_snapshot, share_snapshot.ShareSnapshot, True, ) def test_share_snapshot_create(self): self.verify_create( self.proxy.create_share_snapshot, share_snapshot.ShareSnapshot ) def test_share_snapshot_update(self): self.verify_update( self.proxy.update_share_snapshot, share_snapshot.ShareSnapshot ) @mock.patch("openstack.resource.wait_for_delete") def test_wait_for_delete(self, mock_wait): mock_resource = mock.Mock() mock_wait.return_value = mock_resource self.proxy.wait_for_delete(mock_resource) mock_wait.assert_called_once_with(self.proxy, mock_resource, 2, 120) class TestShareSnapshotInstanceResource(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) def test_share_snapshot_instances(self): self.verify_list( self.proxy.share_snapshot_instances, share_snapshot_instance.ShareSnapshotInstance, ) def test_share_snapshot_instance_detailed(self): self.verify_list( self.proxy.share_snapshot_instances, share_snapshot_instance.ShareSnapshotInstance, method_kwargs={"details": True, "query": {'snapshot_id': 'fake'}}, expected_kwargs={"query": {'snapshot_id': 'fake'}}, ) def test_share_snapshot_instance_not_detailed(self): self.verify_list( self.proxy.share_snapshot_instances, share_snapshot_instance.ShareSnapshotInstance, method_kwargs={"details": False, "query": {'snapshot_id': 'fake'}}, expected_kwargs={"query": {'snapshot_id': 'fake'}}, ) def test_share_snapshot_instance_get(self): self.verify_get( self.proxy.get_share_snapshot_instance, share_snapshot_instance.ShareSnapshotInstance, ) class TestShareNetworkResource(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) def test_share_networks(self): self.verify_list(self.proxy.share_networks, share_network.ShareNetwork) def test_share_networks_detailed(self): self.verify_list( self.proxy.share_networks, share_network.ShareNetwork, method_kwargs={"details": True, "name": "my_net"}, expected_kwargs={"name": "my_net"}, ) def test_share_networks_not_detailed(self): self.verify_list( self.proxy.share_networks, share_network.ShareNetwork, method_kwargs={"details": False, "name": "my_net"}, expected_kwargs={"name": "my_net"}, ) def test_share_network_get(self): self.verify_get( self.proxy.get_share_network, share_network.ShareNetwork ) def test_share_network_delete(self): self.verify_delete( self.proxy.delete_share_network, share_network.ShareNetwork, False ) def test_share_network_delete_ignore(self): self.verify_delete( self.proxy.delete_share_network, share_network.ShareNetwork, True ) def test_share_network_create(self): self.verify_create( self.proxy.create_share_network, share_network.ShareNetwork ) def test_share_network_update(self): self.verify_update( self.proxy.update_share_network, share_network.ShareNetwork ) class TestShareNetworkSubnetResource(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) def test_share_network_subnets(self): self.verify_list( self.proxy.share_network_subnets, share_network_subnet.ShareNetworkSubnet, method_args=["test_share"], expected_args=[], expected_kwargs={"share_network_id": "test_share"}, ) def test_share_network_subnet_get(self): self.verify_get( self.proxy.get_share_network_subnet, share_network_subnet.ShareNetworkSubnet, method_args=["fake_network_id", "fake_sub_network_id"], expected_args=['fake_sub_network_id'], expected_kwargs={'share_network_id': 'fake_network_id'}, ) def test_share_network_subnet_create(self): self.verify_create( self.proxy.create_share_network_subnet, share_network_subnet.ShareNetworkSubnet, method_args=["fake_network_id"], method_kwargs={"p1": "v1"}, expected_args=[], expected_kwargs={ "share_network_id": "fake_network_id", "p1": "v1", }, ) def test_share_network_subnet_delete(self): self.verify_delete( self.proxy.delete_share_network_subnet, share_network_subnet.ShareNetworkSubnet, False, method_args=["fake_network_id", "fake_sub_network_id"], expected_args=["fake_sub_network_id"], expected_kwargs={'share_network_id': 'fake_network_id'}, ) class TestAccessRuleProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) def test_access_ruless(self): self.verify_list( self.proxy.access_rules, share_access_rule.ShareAccessRule, method_args=["test_share"], expected_args=[], expected_kwargs={"share_id": "test_share"}, ) def test_access_rules_get(self): self.verify_get( self.proxy.get_access_rule, share_access_rule.ShareAccessRule ) def test_access_rules_create(self): self.verify_create( self.proxy.create_access_rule, share_access_rule.ShareAccessRule, method_args=["share_id"], expected_args=[], ) def test_access_rules_delete(self): self._verify( "openstack.shared_file_system.v2.share_access_rule." + "ShareAccessRule.delete", self.proxy.delete_access_rule, method_args=[ 'access_id', 'share_id', 'ignore_missing', ], expected_args=[self.proxy, 'share_id'], expected_kwargs={'unrestrict': False}, ) class TestResourceLocksProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) def test_list_resource_locks(self): self.verify_list( self.proxy.resource_locks, resource_locks.ResourceLock ) def test_resource_lock_get(self): self.verify_get( self.proxy.get_resource_lock, resource_locks.ResourceLock ) def test_resource_lock_delete(self): self.verify_delete( self.proxy.delete_resource_lock, resource_locks.ResourceLock, False ) def test_resource_lock_delete_ignore(self): self.verify_delete( self.proxy.delete_resource_lock, resource_locks.ResourceLock, True ) def test_resource_lock_create(self): self.verify_create( self.proxy.create_resource_lock, resource_locks.ResourceLock ) def test_resource_lock_update(self): self.verify_update( self.proxy.update_resource_lock, resource_locks.ResourceLock ) class TestShareGroupResource(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) def test_share_groups(self): self.verify_list(self.proxy.share_groups, share_group.ShareGroup) def test_share_groups_query(self): self.verify_list( self.proxy.share_groups, share_group.ShareGroup, method_kwargs={"query": 1}, expected_kwargs={"query": 1}, ) def test_share_group_get(self): self.verify_get(self.proxy.get_share_group, share_group.ShareGroup) def test_share_group_find(self): self.verify_find(self.proxy.find_share_group, share_group.ShareGroup) def test_share_group_delete(self): self.verify_delete( self.proxy.delete_share_group, share_group.ShareGroup, False ) def test_share_group_delete_ignore(self): self.verify_delete( self.proxy.delete_share_group, share_group.ShareGroup, True ) def test_share_group_create(self): self.verify_create( self.proxy.create_share_group, share_group.ShareGroup ) def test_share_group_update(self): self.verify_update( self.proxy.update_share_group, share_group.ShareGroup ) def test_share_group_snapshots(self): self.verify_list( self.proxy.share_group_snapshots, share_group_snapshot.ShareGroupSnapshot, ) def test_share_group_snapshot_get(self): self.verify_get( self.proxy.get_share_group_snapshot, share_group_snapshot.ShareGroupSnapshot, ) def test_share_group_snapshot_update(self): self.verify_update( self.proxy.update_share_group_snapshot, share_group_snapshot.ShareGroupSnapshot, ) def test_share_group_snapshot_delete(self): self.verify_delete( self.proxy.delete_share_group_snapshot, share_group_snapshot.ShareGroupSnapshot, False, ) def test_share_group_snapshot_delete_ignore(self): self.verify_delete( self.proxy.delete_share_group_snapshot, share_group_snapshot.ShareGroupSnapshot, True, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/v2/test_quota_class_set.py0000664000175000017500000000712000000000000031341 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.shared_file_system.v2 import quota_class_set from openstack.tests.unit import base EXAMPLE = { "share_groups": 50, "gigabytes": 1000, "share_group_snapshots": 50, "snapshots": 50, "snapshot_gigabytes": 1000, "shares": 50, "id": "default", "share_networks": 10, "share_replicas": 100, "replica_gigabytes": 1000, "per_share_gigabytes": -1, "backups": 50, "backup_gigabytes": 1000, } class TestQuotaClassSet(base.TestCase): def test_basic(self): _quota_class_set = quota_class_set.QuotaClassSet() self.assertEqual('/quota-class-sets', _quota_class_set.base_path) self.assertTrue(_quota_class_set.allow_fetch) self.assertTrue(_quota_class_set.allow_commit) self.assertFalse(_quota_class_set.allow_create) self.assertFalse(_quota_class_set.allow_delete) self.assertFalse(_quota_class_set.allow_list) self.assertFalse(_quota_class_set.allow_head) def test_get_quota_class_set(self): _quota_class_set = quota_class_set.QuotaClassSet(**EXAMPLE) self.assertEqual( EXAMPLE['share_groups'], _quota_class_set.share_groups ) self.assertEqual(EXAMPLE['gigabytes'], _quota_class_set.gigabytes) self.assertEqual( EXAMPLE['share_group_snapshots'], _quota_class_set.share_group_snapshots, ) self.assertEqual(EXAMPLE['snapshots'], _quota_class_set.snapshots) self.assertEqual( EXAMPLE['snapshot_gigabytes'], _quota_class_set.snapshot_gigabytes ) self.assertEqual(EXAMPLE['shares'], _quota_class_set.shares) self.assertEqual(EXAMPLE['id'], _quota_class_set.id) self.assertEqual( EXAMPLE['share_networks'], _quota_class_set.share_networks ) self.assertEqual( EXAMPLE['share_replicas'], _quota_class_set.share_replicas ) self.assertEqual( EXAMPLE['replica_gigabytes'], _quota_class_set.replica_gigabytes ) self.assertEqual( EXAMPLE['per_share_gigabytes'], _quota_class_set.per_share_gigabytes, ) self.assertEqual(EXAMPLE['backups'], _quota_class_set.backups) self.assertEqual( EXAMPLE['backup_gigabytes'], _quota_class_set.backup_gigabytes ) def test_update_quota_class_set(self): _quota_class_set = quota_class_set.QuotaClassSet(**EXAMPLE) updated_attributes = { "share_groups": 100, "gigabytes": 2000, "share_group_snapshots": 100, } _quota_class_set._update(**updated_attributes) self.assertEqual( updated_attributes['share_groups'], _quota_class_set.share_groups ) self.assertEqual( updated_attributes['gigabytes'], _quota_class_set.gigabytes ) self.assertEqual( updated_attributes['share_group_snapshots'], _quota_class_set.share_group_snapshots, ) self.assertEqual(EXAMPLE['snapshots'], _quota_class_set.snapshots) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/v2/test_share.py0000664000175000017500000002036200000000000027255 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.shared_file_system.v2 import share from openstack.tests.unit import base IDENTIFIER = '08a87d37-5ca2-4308-86c5-cba06d8d796c' EXAMPLE = { "id": IDENTIFIER, "size": 2, "availability_zone": "manila-zone-1", "created_at": "2021-02-11T17:38:00.000000", "status": "available", "name": None, "description": None, "project_id": "d19444eb73af4b37bc0794532ef6fc50", "snapshot_id": None, "share_network_id": None, "share_protocol": "NFS", "metadata": {}, "share_type": "cbb18bb7-cc97-477a-b64b-ed7c7f2a1c67", "volume_type": "default", "is_public": False, "is_snapshot_supported": True, "task_state": None, "share_type_name": "default", "access_rules_status": "active", "replication_type": None, "is_replicated": False, "user_id": "6c262cab98de42c2afc4cfccbefc50c7", "is_creating_new_share_from_snapshot_supported": True, "is_reverting_to_snapshot_supported": True, "share_group_id": None, "source_share_group_snapshot_member_id": None, "is_mounting_snapshot_supported": True, "progress": "100%", "share_server_id": None, "host": "new@denver#lvm-single-pool", } class TestShares(base.TestCase): def test_basic(self): shares_resource = share.Share() self.assertEqual('shares', shares_resource.resources_key) self.assertEqual('/shares', shares_resource.base_path) self.assertTrue(shares_resource.allow_list) self.assertTrue(shares_resource.allow_create) self.assertTrue(shares_resource.allow_fetch) self.assertTrue(shares_resource.allow_commit) self.assertTrue(shares_resource.allow_delete) def test_make_shares(self): shares_resource = share.Share(**EXAMPLE) self.assertEqual(EXAMPLE['id'], shares_resource.id) self.assertEqual(EXAMPLE['size'], shares_resource.size) self.assertEqual( EXAMPLE['availability_zone'], shares_resource.availability_zone ) self.assertEqual(EXAMPLE['created_at'], shares_resource.created_at) self.assertEqual(EXAMPLE['status'], shares_resource.status) self.assertEqual(EXAMPLE['name'], shares_resource.name) self.assertEqual(EXAMPLE['description'], shares_resource.description) self.assertEqual(EXAMPLE['project_id'], shares_resource.project_id) self.assertEqual(EXAMPLE['snapshot_id'], shares_resource.snapshot_id) self.assertEqual( EXAMPLE['share_network_id'], shares_resource.share_network_id ) self.assertEqual( EXAMPLE['share_protocol'], shares_resource.share_protocol ) self.assertEqual(EXAMPLE['metadata'], shares_resource.metadata) self.assertEqual(EXAMPLE['share_type'], shares_resource.share_type) self.assertEqual(EXAMPLE['is_public'], shares_resource.is_public) self.assertEqual( EXAMPLE['is_snapshot_supported'], shares_resource.is_snapshot_supported, ) self.assertEqual(EXAMPLE['task_state'], shares_resource.task_state) self.assertEqual( EXAMPLE['share_type_name'], shares_resource.share_type_name ) self.assertEqual( EXAMPLE['access_rules_status'], shares_resource.access_rules_status ) self.assertEqual( EXAMPLE['replication_type'], shares_resource.replication_type ) self.assertEqual( EXAMPLE['is_replicated'], shares_resource.is_replicated ) self.assertEqual(EXAMPLE['user_id'], shares_resource.user_id) self.assertEqual( EXAMPLE['is_creating_new_share_from_snapshot_supported'], (shares_resource.is_creating_new_share_from_snapshot_supported), ) self.assertEqual( EXAMPLE['is_reverting_to_snapshot_supported'], shares_resource.is_reverting_to_snapshot_supported, ) self.assertEqual( EXAMPLE['share_group_id'], shares_resource.share_group_id ) self.assertEqual( EXAMPLE['source_share_group_snapshot_member_id'], shares_resource.source_share_group_snapshot_member_id, ) self.assertEqual( EXAMPLE['is_mounting_snapshot_supported'], shares_resource.is_mounting_snapshot_supported, ) self.assertEqual(EXAMPLE['progress'], shares_resource.progress) self.assertEqual( EXAMPLE['share_server_id'], shares_resource.share_server_id ) self.assertEqual(EXAMPLE['host'], shares_resource.host) class TestShareActions(TestShares): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.status_code = 202 self.resp.json = mock.Mock(return_value=self.resp.body) self.sess = mock.Mock(spec=adapter.Adapter) self.sess.default_microversion = '3.0' self.sess.post = mock.Mock(return_value=self.resp) self.sess._get_connection = mock.Mock(return_value=self.cloud) def test_shrink_share(self): sot = share.Share(**EXAMPLE) microversion = sot._get_microversion(self.sess, action='patch') self.assertIsNone(sot.shrink_share(self.sess, new_size=1)) url = f'shares/{IDENTIFIER}/action' body = {"shrink": {"new_size": 1}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=microversion ) def test_extend_share(self): sot = share.Share(**EXAMPLE) microversion = sot._get_microversion(self.sess, action='patch') self.assertIsNone(sot.extend_share(self.sess, new_size=3)) url = f'shares/{IDENTIFIER}/action' body = {"extend": {"new_size": 3}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=microversion ) def test_revert_to_snapshot(self): sot = share.Share(**EXAMPLE) microversion = sot._get_microversion(self.sess, action='patch') self.assertIsNone(sot.revert_to_snapshot(self.sess, "fake_id")) url = f'shares/{IDENTIFIER}/action' body = {"revert": {"snapshot_id": "fake_id"}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=microversion ) def test_manage_share(self): sot = share.Share() self.resp.headers = {} self.resp.json = mock.Mock( return_value={"share": {"name": "test_share", "size": 1}} ) export_path = ( "10.254.0 .5:/shares/share-42033c24-0261-424f-abda-4fef2f6dbfd5." ) params = {"name": "test_share"} res = sot.manage( self.sess, sot["share_protocol"], export_path, sot["host"], **params, ) self.assertEqual(res.name, "test_share") self.assertEqual(res.size, 1) jsonDict = { "share": { "protocol": sot["share_protocol"], "export_path": export_path, "service_host": sot["host"], "name": "test_share", } } self.sess.post.assert_called_once_with("shares/manage", json=jsonDict) def test_unmanage_share(self): sot = share.Share(**EXAMPLE) microversion = sot._get_microversion(self.sess, action='patch') self.assertIsNone(sot.unmanage(self.sess)) url = 'shares/%s/action' % IDENTIFIER body = {'unmanage': None} self.sess.post.assert_called_with( url, json=body, headers={'Accept': ''}, microversion=microversion ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/v2/test_share_access_rule.py0000664000175000017500000000447600000000000031635 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.shared_file_system.v2 import share_access_rule from openstack.tests.unit import base EXAMPLE = { "access_level": "rw", "state": "error", "id": "507bf114-36f2-4f56-8cf4-857985ca87c1", "share_id": "fb213952-2352-41b4-ad7b-2c4c69d13eef", "access_type": "cert", "access_to": "example.com", "access_key": None, "created_at": "2021-09-12T02:01:04.000000", "updated_at": "2021-09-12T02:01:04.000000", "metadata": {"key1": "value1", "key2": "value2"}, } class TestShareAccessRule(base.TestCase): def test_basic(self): rules_resource = share_access_rule.ShareAccessRule() self.assertEqual('access_list', rules_resource.resources_key) self.assertEqual('/share-access-rules', rules_resource.base_path) self.assertTrue(rules_resource.allow_list) self.assertDictEqual( {"limit": "limit", "marker": "marker", "share_id": "share_id"}, rules_resource._query_mapping._mapping, ) def test_make_share_access_rules(self): rules_resource = share_access_rule.ShareAccessRule(**EXAMPLE) self.assertEqual(EXAMPLE['id'], rules_resource.id) self.assertEqual(EXAMPLE['access_level'], rules_resource.access_level) self.assertEqual(EXAMPLE['state'], rules_resource.state) self.assertEqual(EXAMPLE['id'], rules_resource.id) self.assertEqual(EXAMPLE['access_type'], rules_resource.access_type) self.assertEqual(EXAMPLE['access_to'], rules_resource.access_to) self.assertEqual(EXAMPLE['access_key'], rules_resource.access_key) self.assertEqual(EXAMPLE['created_at'], rules_resource.created_at) self.assertEqual(EXAMPLE['updated_at'], rules_resource.updated_at) self.assertEqual(EXAMPLE['metadata'], rules_resource.metadata) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/v2/test_share_export_locations.py0000664000175000017500000000340000000000000032723 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.shared_file_system.v2 import share_export_locations as el from openstack.tests.unit import base IDENTIFIER = '08a87d37-5ca2-4308-86c5-cba06d8d796c' EXAMPLE = { "id": "f87589cb-f4bc-4a9b-b481-ab701206eb85", "path": ( "199.19.213.225:/opt/stack/data/manila/mnt/" "share-6ba490c5-5225-4c3b-9982-14b8f475c6d9" ), "preferred": False, "share_instance_id": "6ba490c5-5225-4c3b-9982-14b8f475c6d9", "is_admin_only": False, } class TestShareExportLocations(base.TestCase): def test_basic(self): export = el.ShareExportLocation() self.assertEqual('export_locations', export.resources_key) self.assertEqual( '/shares/%(share_id)s/export_locations', export.base_path ) self.assertTrue(export.allow_list) def test_share_export_locations(self): export = el.ShareExportLocation(**EXAMPLE) self.assertEqual(EXAMPLE['id'], export.id) self.assertEqual(EXAMPLE['path'], export.path) self.assertEqual(EXAMPLE['preferred'], export.is_preferred) self.assertEqual( EXAMPLE['share_instance_id'], export.share_instance_id ) self.assertEqual(EXAMPLE['is_admin_only'], export.is_admin) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/v2/test_share_group.py0000664000175000017500000000614400000000000030473 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.shared_file_system.v2 import share_group from openstack.tests.unit import base EXAMPLE = { "status": "creating", "description": None, "links": "[]", "availability_zone": None, "source_share_group_snapshot_id": None, "share_network_id": None, "share_server_id": None, "host": None, "share_group_type_id": "89861c2a-10bf-4013-bdd4-3d020466aee4", "consistent_snapshot_support": None, "id": "f9c1f80c-2392-4e34-bd90-fc89cdc5bf93", "name": None, "created_at": "2021-06-03T19:20:33.974421", "project_id": "e23850eeb91d4fa3866af634223e454c", "share_types": ["ecd11f4c-d811-4471-b656-c755c77e02ba"], } class TestShareGroups(base.TestCase): def test_basic(self): share_groups = share_group.ShareGroup() self.assertEqual('share_groups', share_groups.resources_key) self.assertEqual('/share-groups', share_groups.base_path) self.assertTrue(share_groups.allow_list) self.assertTrue(share_groups.allow_fetch) self.assertTrue(share_groups.allow_create) self.assertTrue(share_groups.allow_commit) self.assertTrue(share_groups.allow_delete) self.assertFalse(share_groups.allow_head) self.assertDictEqual( { "limit": "limit", "marker": "marker", "share_group_id": "share_group_id", }, share_groups._query_mapping._mapping, ) def test_make_share_groups(self): share_group_res = share_group.ShareGroup(**EXAMPLE) self.assertEqual(EXAMPLE['id'], share_group_res.id) self.assertEqual(EXAMPLE['status'], share_group_res.status) self.assertEqual( EXAMPLE['availability_zone'], share_group_res.availability_zone ) self.assertEqual(EXAMPLE['description'], share_group_res.description) self.assertEqual( EXAMPLE['source_share_group_snapshot_id'], share_group_res.share_group_snapshot_id, ) self.assertEqual( EXAMPLE['share_network_id'], share_group_res.share_network_id ) self.assertEqual( EXAMPLE['share_group_type_id'], share_group_res.share_group_type_id ) self.assertEqual( EXAMPLE['consistent_snapshot_support'], share_group_res.consistent_snapshot_support, ) self.assertEqual(EXAMPLE['created_at'], share_group_res.created_at) self.assertEqual(EXAMPLE['project_id'], share_group_res.project_id) self.assertEqual(EXAMPLE['share_types'], share_group_res.share_types) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/v2/test_share_group_snapshot.py0000664000175000017500000000774600000000000032423 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.shared_file_system.v2 import share_group_snapshot from openstack.tests.unit import base IDENTIFIER = '38152b6d-e1b5-465f-91bc-20bca4676a2a' EXAMPLE = { "id": IDENTIFIER, "name": "snapshot_1", "created_at": "2021-10-24T19:36:49.555325", "status": "available", "description": "first snapshot of sg-1", "project_id": "7343d2f7770b4eb6a7bc33f44dcee1e0", "share_group_id": "fb41512f-7c49-4304-afb1-66573c7feb14", } class TestShareGroupSnapshot(base.TestCase): def test_basic(self): share_group_snapshots = share_group_snapshot.ShareGroupSnapshot() self.assertEqual( 'share_group_snapshot', share_group_snapshots.resource_key ) self.assertEqual( 'share_group_snapshots', share_group_snapshots.resources_key ) self.assertEqual( '/share-group-snapshots', share_group_snapshots.base_path ) self.assertTrue(share_group_snapshots.allow_create) self.assertTrue(share_group_snapshots.allow_fetch) self.assertTrue(share_group_snapshots.allow_commit) self.assertTrue(share_group_snapshots.allow_delete) self.assertTrue(share_group_snapshots.allow_list) self.assertFalse(share_group_snapshots.allow_head) def test_make_share_groups(self): share_group_snapshots = share_group_snapshot.ShareGroupSnapshot( **EXAMPLE ) self.assertEqual(EXAMPLE['id'], share_group_snapshots.id) self.assertEqual(EXAMPLE['name'], share_group_snapshots.name) self.assertEqual( EXAMPLE['created_at'], share_group_snapshots.created_at ) self.assertEqual(EXAMPLE['status'], share_group_snapshots.status) self.assertEqual( EXAMPLE['description'], share_group_snapshots.description ) self.assertEqual( EXAMPLE['project_id'], share_group_snapshots.project_id ) self.assertEqual( EXAMPLE['share_group_id'], share_group_snapshots.share_group_id ) class TestShareGroupSnapshotActions(TestShareGroupSnapshot): def setUp(self): super(TestShareGroupSnapshot, self).setUp() self.resp = mock.Mock() self.resp.body = None self.resp.status_code = 200 self.resp.json = mock.Mock(return_value=self.resp.body) self.sess = mock.Mock(spec=adapter.Adapter) self.sess.default_microversion = '3.0' self.sess.post = mock.Mock(return_value=self.resp) self.sess._get_connection = mock.Mock(return_value=self.cloud) def test_reset_status(self): sot = share_group_snapshot.ShareGroupSnapshot(**EXAMPLE) self.assertIsNone(sot.reset_status(self.sess, 'available')) url = f'share-group-snapshots/{IDENTIFIER}/action' body = {"reset_status": {"status": 'available'}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=self.sess.default_microversion, ) def test_get_members(self): sot = share_group_snapshot.ShareGroupSnapshot(**EXAMPLE) sot.get_members(self.sess) url = f'share-group-snapshots/{IDENTIFIER}/members' headers = {'Accept': ''} self.sess.get.assert_called_with( url, headers=headers, microversion=self.sess.default_microversion, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/v2/test_share_instance.py0000664000175000017500000001113200000000000031134 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import adapter from openstack.shared_file_system.v2 import share_instance from openstack.tests.unit import base IDENTIFIER = "75559a8b-c90c-42a7-bda2-edbe86acfb7b" EXAMPLE = { "status": "available", "progress": "100%", "share_id": "d94a8548-2079-4be0-b21c-0a887acd31ca", "availability_zone": "nova", "replica_state": None, "created_at": "2015-09-07T08:51:34.000000", "cast_rules_to_readonly": False, "share_network_id": "713df749-aac0-4a54-af52-10f6c991e80c", "share_server_id": "ba11930a-bf1a-4aa7-bae4-a8dfbaa3cc73", "host": "manila2@generic1#GENERIC1", "access_rules_status": "active", "id": IDENTIFIER, } class TestShareInstances(base.TestCase): def test_basic(self): share_instance_resource = share_instance.ShareInstance() self.assertEqual( 'share_instances', share_instance_resource.resources_key ) self.assertEqual('/share_instances', share_instance_resource.base_path) self.assertTrue(share_instance_resource.allow_list) self.assertFalse(share_instance_resource.allow_create) self.assertTrue(share_instance_resource.allow_fetch) self.assertFalse(share_instance_resource.allow_commit) self.assertFalse(share_instance_resource.allow_delete) def test_make_share_instances(self): share_instance_resource = share_instance.ShareInstance(**EXAMPLE) self.assertEqual(EXAMPLE['status'], share_instance_resource.status) self.assertEqual(EXAMPLE['progress'], share_instance_resource.progress) self.assertEqual(EXAMPLE['share_id'], share_instance_resource.share_id) self.assertEqual( EXAMPLE['availability_zone'], share_instance_resource.availability_zone, ) self.assertEqual( EXAMPLE['replica_state'], share_instance_resource.replica_state ) self.assertEqual( EXAMPLE['created_at'], share_instance_resource.created_at ) self.assertEqual( EXAMPLE['cast_rules_to_readonly'], share_instance_resource.cast_rules_to_readonly, ) self.assertEqual( EXAMPLE['share_network_id'], share_instance_resource.share_network_id, ) self.assertEqual( EXAMPLE['share_server_id'], share_instance_resource.share_server_id ) self.assertEqual(EXAMPLE['host'], share_instance_resource.host) self.assertEqual( EXAMPLE['access_rules_status'], share_instance_resource.access_rules_status, ) self.assertEqual(EXAMPLE['id'], share_instance_resource.id) class TestShareInstanceActions(TestShareInstances): def setUp(self): super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.status_code = 200 self.resp.json = mock.Mock(return_value=self.resp.body) self.sess = mock.Mock(spec=adapter.Adapter) self.sess.default_microversion = '3.0' self.sess.post = mock.Mock(return_value=self.resp) self.sess._get_connection = mock.Mock(return_value=self.cloud) def test_reset_status(self): sot = share_instance.ShareInstance(**EXAMPLE) microversion = sot._get_microversion(self.sess, action='patch') self.assertIsNone(sot.reset_status(self.sess, 'active')) url = f'share_instances/{IDENTIFIER}/action' body = {"reset_status": {"status": 'active'}} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=microversion ) def test_force_delete(self): sot = share_instance.ShareInstance(**EXAMPLE) microversion = sot._get_microversion(self.sess, action='delete') self.assertIsNone(sot.force_delete(self.sess)) url = f'share_instances/{IDENTIFIER}/action' body = {'force_delete': None} headers = {'Accept': ''} self.sess.post.assert_called_with( url, json=body, headers=headers, microversion=microversion ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/v2/test_share_network.py0000664000175000017500000000500000000000000031016 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.shared_file_system.v2 import share_network from openstack.tests.unit import base IDENTIFIER = '6e1821be-c494-4f62-8301-5dcd19f4d615' EXAMPLE = { "id": IDENTIFIER, "project_id": "4b8184eddd6b429a93231c056ae9cd12", "name": "my_share_net", "description": "My share network", "created_at": "2021-06-10T10:11:17.291981", "updated_at": None, "share_network_subnets": [], } class TestShareNetwork(base.TestCase): def test_basic(self): networks = share_network.ShareNetwork() self.assertEqual('share_networks', networks.resources_key) self.assertEqual('/share-networks', networks.base_path) self.assertTrue(networks.allow_list) self.assertTrue(networks.allow_create) self.assertTrue(networks.allow_fetch) self.assertTrue(networks.allow_commit) self.assertTrue(networks.allow_delete) self.assertFalse(networks.allow_head) self.assertDictEqual( { "limit": "limit", "marker": "marker", "project_id": "project_id", "created_since": "created_since", "created_before": "created_before", "offset": "offset", "security_service_id": "security_service_id", "project_id": "project_id", "all_projects": "all_tenants", "name": "name", "description": "description", }, networks._query_mapping._mapping, ) def test_share_network(self): networks = share_network.ShareNetwork(**EXAMPLE) self.assertEqual(EXAMPLE['id'], networks.id) self.assertEqual(EXAMPLE['name'], networks.name) self.assertEqual(EXAMPLE['project_id'], networks.project_id) self.assertEqual(EXAMPLE['description'], networks.description) self.assertEqual(EXAMPLE['created_at'], networks.created_at) self.assertEqual(EXAMPLE['updated_at'], networks.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/v2/test_share_network_subnet.py0000664000175000017500000000536600000000000032415 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.shared_file_system.v2 import share_network_subnet as SNS from openstack.tests.unit import base IDENTIFIER = '9cd5a59f-4d22-496f-8b1a-ea4860c24d39' EXAMPLE = { "id": IDENTIFIER, "availability_zone": None, "share_network_id": "652ef887-b805-4328-b65a-b88c64cb69ec", "share_network_name": None, "created_at": "2021-02-24T02:45:59.000000", "segmentation_id": None, "neutron_subnet_id": None, "updated_at": None, "neutron_net_id": None, "ip_version": None, "cidr": None, "network_type": None, "mtu": None, "gateway": None, } class TestShareNetworkSubnet(base.TestCase): def test_basic(self): SNS_resource = SNS.ShareNetworkSubnet() self.assertEqual('share_network_subnets', SNS_resource.resources_key) self.assertEqual( '/share-networks/%(share_network_id)s/subnets', SNS_resource.base_path, ) self.assertTrue(SNS_resource.allow_list) def test_make_share_network_subnet(self): SNS_resource = SNS.ShareNetworkSubnet(**EXAMPLE) self.assertEqual(EXAMPLE['id'], SNS_resource.id) self.assertEqual( EXAMPLE['availability_zone'], SNS_resource.availability_zone ) self.assertEqual( EXAMPLE['share_network_id'], SNS_resource.share_network_id ) self.assertEqual( EXAMPLE['share_network_name'], SNS_resource.share_network_name ) self.assertEqual(EXAMPLE['created_at'], SNS_resource.created_at) self.assertEqual( EXAMPLE['segmentation_id'], SNS_resource.segmentation_id ) self.assertEqual( EXAMPLE['neutron_subnet_id'], SNS_resource.neutron_subnet_id ) self.assertEqual(EXAMPLE['updated_at'], SNS_resource.updated_at) self.assertEqual( EXAMPLE['neutron_net_id'], SNS_resource.neutron_net_id ) self.assertEqual(EXAMPLE['ip_version'], SNS_resource.ip_version) self.assertEqual(EXAMPLE['cidr'], SNS_resource.cidr) self.assertEqual(EXAMPLE['network_type'], SNS_resource.network_type) self.assertEqual(EXAMPLE['mtu'], SNS_resource.mtu) self.assertEqual(EXAMPLE['gateway'], SNS_resource.gateway) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/v2/test_share_snapshot.py0000664000175000017500000000501000000000000031165 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.shared_file_system.v2 import share_snapshot from openstack.tests.unit import base EXAMPLE = { "status": "creating", "share_id": "406ea93b-32e9-4907-a117-148b3945749f", "user_id": "5c7bdb6eb0504d54a619acf8375c08ce", "name": "snapshot_share1", "created_at": "2021-06-07T11:50:39.756808", "description": "Here is a snapshot of share Share1", "share_proto": "NFS", "share_size": 1, "id": "6d221c1d-0200-461e-8d20-24b4776b9ddb", "project_id": "cadd7139bc3148b8973df097c0911016", "size": 1, } class TestShareSnapshot(base.TestCase): def test_basic(self): snapshot_resource = share_snapshot.ShareSnapshot() self.assertEqual('snapshots', snapshot_resource.resources_key) self.assertEqual('/snapshots', snapshot_resource.base_path) self.assertTrue(snapshot_resource.allow_list) self.assertDictEqual( { "limit": "limit", "marker": "marker", "snapshot_id": "snapshot_id", }, snapshot_resource._query_mapping._mapping, ) def test_make_share_snapshot(self): snapshot_resource = share_snapshot.ShareSnapshot(**EXAMPLE) self.assertEqual(EXAMPLE['id'], snapshot_resource.id) self.assertEqual(EXAMPLE['share_id'], snapshot_resource.share_id) self.assertEqual(EXAMPLE['user_id'], snapshot_resource.user_id) self.assertEqual(EXAMPLE['created_at'], snapshot_resource.created_at) self.assertEqual(EXAMPLE['status'], snapshot_resource.status) self.assertEqual(EXAMPLE['name'], snapshot_resource.name) self.assertEqual(EXAMPLE['description'], snapshot_resource.description) self.assertEqual(EXAMPLE['share_proto'], snapshot_resource.share_proto) self.assertEqual(EXAMPLE['share_size'], snapshot_resource.share_size) self.assertEqual(EXAMPLE['project_id'], snapshot_resource.project_id) self.assertEqual(EXAMPLE['size'], snapshot_resource.size) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/v2/test_share_snapshot_instance.py0000664000175000017500000000435000000000000033057 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.shared_file_system.v2 import share_snapshot_instance from openstack.tests.unit import base EXAMPLE = { "status": "available", "share_id": "618599ab-09a1-432d-973a-c102564c7fec", "share_instance_id": "8edff0cb-e5ce-4bab-aa99-afe02ed6a76a", "snapshot_id": "d447de19-a6d3-40b3-ae9f-895c86798924", "progress": "100%", "created_at": "2021-06-04T00:44:52.000000", "id": "275516e8-c998-4e78-a41e-7dd3a03e71cd", "provider_location": "/path/to/fake...", "updated_at": "2017-06-04T00:44:54.000000", } class TestShareSnapshotInstances(base.TestCase): def test_basic(self): instances = share_snapshot_instance.ShareSnapshotInstance() self.assertEqual('snapshot_instance', instances.resource_key) self.assertEqual('snapshot_instances', instances.resources_key) self.assertEqual('/snapshot-instances', instances.base_path) self.assertTrue(instances.allow_list) def test_make_share_snapshot_instance(self): instance = share_snapshot_instance.ShareSnapshotInstance(**EXAMPLE) self.assertEqual(EXAMPLE['id'], instance.id) self.assertEqual(EXAMPLE['share_id'], instance.share_id) self.assertEqual( EXAMPLE['share_instance_id'], instance.share_instance_id ) self.assertEqual(EXAMPLE['snapshot_id'], instance.snapshot_id) self.assertEqual(EXAMPLE['status'], instance.status) self.assertEqual(EXAMPLE['progress'], instance.progress) self.assertEqual(EXAMPLE['created_at'], instance.created_at) self.assertEqual(EXAMPLE['updated_at'], instance.updated_at) self.assertEqual( EXAMPLE['provider_location'], instance.provider_location ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/v2/test_storage_pool.py0000664000175000017500000000524000000000000030646 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.shared_file_system.v2 import storage_pool from openstack.tests.unit import base EXAMPLE = { "name": "opencloud@alpha#ALPHA_pool", "host": "opencloud", "backend": "alpha", "pool": "ALPHA_pool", "capabilities": { "pool_name": "ALPHA_pool", "total_capacity_gb": 1230.0, "free_capacity_gb": 1210.0, "reserved_percentage": 0, "share_backend_name": "ALPHA", "storage_protocol": "NFS_CIFS", "vendor_name": "Open Source", "driver_version": "1.0", "timestamp": "2021-07-31T00:28:02.935569", "driver_handles_share_servers": True, "snapshot_support": True, "create_share_from_snapshot_support": True, "revert_to_snapshot_support": True, "mount_snapshot_support": True, "dedupe": False, "compression": False, "replication_type": None, "replication_domain": None, "sg_consistent_snapshot_support": "pool", "ipv4_support": True, "ipv6_support": False, }, } class TestStoragePool(base.TestCase): def test_basic(self): pool_resource = storage_pool.StoragePool() self.assertEqual('pools', pool_resource.resources_key) self.assertEqual('/scheduler-stats/pools', pool_resource.base_path) self.assertTrue(pool_resource.allow_list) self.assertDictEqual( { 'pool': 'pool', 'backend': 'backend', 'host': 'host', 'limit': 'limit', 'marker': 'marker', 'capabilities': 'capabilities', 'share_type': 'share_type', }, pool_resource._query_mapping._mapping, ) def test_make_storage_pool(self): pool_resource = storage_pool.StoragePool(**EXAMPLE) self.assertEqual(EXAMPLE['pool'], pool_resource.pool) self.assertEqual(EXAMPLE['host'], pool_resource.host) self.assertEqual(EXAMPLE['name'], pool_resource.name) self.assertEqual(EXAMPLE['backend'], pool_resource.backend) self.assertEqual(EXAMPLE['capabilities'], pool_resource.capabilities) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/shared_file_system/v2/test_user_message.py0000664000175000017500000000534400000000000030640 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.shared_file_system.v2 import user_message from openstack.tests.unit import base IDENTIFIER = "2784bc88-b729-4220-a6bb-a8b7a8f53aad" EXAMPLE = { "id": IDENTIFIER, "project_id": "dcc9de3c5fc8471ba3662dbb2b6166d5", "action_id": "001", "detail_id": "008", "message_level": "ERROR", "created_at": "2021-03-26T05:16:39.000000", "expires_at": "2021-04-25T05:16:39.000000", "request_id": "req-e4b3e6de-ce4d-4ef2-b1e7-0087200e4db3", "resource_type": "SHARE", "resource_id": "c2e4ca07-8c37-4014-92c9-2171c7813fa0", "user_message": ( "allocate host: No storage could be allocated" "for this share request, Capabilities filter" "didn't succeed." ), } class TestUserMessage(base.TestCase): def test_basic(self): message = user_message.UserMessage() self.assertEqual('messages', message.resources_key) self.assertEqual('/messages', message.base_path) self.assertTrue(message.allow_list) self.assertFalse(message.allow_create) self.assertFalse(message.allow_commit) self.assertTrue(message.allow_delete) self.assertTrue(message.allow_fetch) self.assertFalse(message.allow_head) self.assertDictEqual( {"limit": "limit", "marker": "marker", "message_id": "message_id"}, message._query_mapping._mapping, ) def test_user_message(self): messages = user_message.UserMessage(**EXAMPLE) self.assertEqual(EXAMPLE['id'], messages.id) self.assertEqual(EXAMPLE['resource_id'], messages.resource_id) self.assertEqual(EXAMPLE['message_level'], messages.message_level) self.assertEqual(EXAMPLE['user_message'], messages.user_message) self.assertEqual(EXAMPLE['expires_at'], messages.expires_at) self.assertEqual(EXAMPLE['detail_id'], messages.detail_id) self.assertEqual(EXAMPLE['created_at'], messages.created_at) self.assertEqual(EXAMPLE['request_id'], messages.request_id) self.assertEqual(EXAMPLE['project_id'], messages.project_id) self.assertEqual(EXAMPLE['resource_type'], messages.resource_type) self.assertEqual(EXAMPLE['action_id'], messages.action_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/test_connection.py0000664000175000017500000004272500000000000024121 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock import fixtures from keystoneauth1 import session import openstack.config from openstack import connection from openstack import proxy from openstack import service_description from openstack.tests import fakes from openstack.tests.unit import base from openstack.tests.unit.fake import fake_service CONFIG_AUTH_URL = "https://identity.example.com/" CONFIG_USERNAME = "BozoTheClown" CONFIG_PASSWORD = "TopSecret" CONFIG_PROJECT = "TheGrandPrizeGame" CONFIG_CACERT = "TrustMe" CLOUD_CONFIG = """ clouds: sample-cloud: region_name: RegionOne auth: auth_url: {auth_url} username: {username} password: {password} project_name: {project} insecure-cloud: auth: auth_url: {auth_url} username: {username} password: {password} project_name: {project} cacert: {cacert} verify: False insecure-cloud-alternative-format: auth: auth_url: {auth_url} username: {username} password: {password} project_name: {project} insecure: True cacert-cloud: auth: auth_url: {auth_url} username: {username} password: {password} project_name: {project} cacert: {cacert} profiled-cloud: profile: dummy auth: username: {username} password: {password} project_name: {project} cacert: {cacert} """.format( auth_url=CONFIG_AUTH_URL, username=CONFIG_USERNAME, password=CONFIG_PASSWORD, project=CONFIG_PROJECT, cacert=CONFIG_CACERT, ) VENDOR_CONFIG = """ {{ "name": "dummy", "profile": {{ "auth": {{ "auth_url": "{auth_url}" }}, "vendor_hook": "openstack.tests.unit.test_connection:vendor_hook" }} }} """.format( auth_url=CONFIG_AUTH_URL ) PUBLIC_CLOUDS_YAML = """ public-clouds: dummy: auth: auth_url: {auth_url} vendor_hook: openstack.tests.unit.test_connection:vendor_hook """.format( auth_url=CONFIG_AUTH_URL ) class _TestConnectionBase(base.TestCase): def setUp(self): super().setUp() # Create a temporary directory where our test config will live # and insert it into the search path via OS_CLIENT_CONFIG_FILE. config_dir = self.useFixture(fixtures.TempDir()).path config_path = os.path.join(config_dir, "clouds.yaml") with open(config_path, "w") as conf: conf.write(CLOUD_CONFIG) self.useFixture( fixtures.EnvironmentVariable("OS_CLIENT_CONFIG_FILE", config_path) ) self.use_keystone_v2() class TestConnection(_TestConnectionBase): def test_other_parameters(self): conn = connection.Connection(cloud='sample-cloud', cert='cert') self.assertEqual(conn.session.cert, 'cert') def test_session_provided(self): mock_session = mock.Mock(spec=session.Session) mock_session.auth = mock.Mock() mock_session.auth.auth_url = 'https://auth.example.com' conn = connection.Connection(session=mock_session, cert='cert') self.assertEqual(mock_session, conn.session) self.assertEqual('auth.example.com', conn.config.name) def test_create_session(self): conn = connection.Connection(cloud='sample-cloud') self.assertIsNotNone(conn) # TODO(mordred) Rework this - we need to provide requests-mock # entries for each of the proxies below # self.assertEqual('openstack.proxy', # conn.alarm.__class__.__module__) # self.assertEqual('openstack.clustering.v1._proxy', # conn.clustering.__class__.__module__) # self.assertEqual('openstack.compute.v2._proxy', # conn.compute.__class__.__module__) # self.assertEqual('openstack.database.v1._proxy', # conn.database.__class__.__module__) # self.assertEqual('openstack.identity.v2._proxy', # conn.identity.__class__.__module__) # self.assertEqual('openstack.image.v2._proxy', # conn.image.__class__.__module__) # self.assertEqual('openstack.object_store.v1._proxy', # conn.object_store.__class__.__module__) # self.assertEqual('openstack.load_balancer.v2._proxy', # conn.load_balancer.__class__.__module__) # self.assertEqual('openstack.orchestration.v1._proxy', # conn.orchestration.__class__.__module__) # self.assertEqual('openstack.workflow.v2._proxy', # conn.workflow.__class__.__module__) def test_create_unknown_proxy(self): self.register_uris( [ self.get_placement_discovery_mock_dict(), ] ) def closure(): return self.cloud.placement self.assertIsInstance(self.cloud.placement, proxy.Proxy) self.assert_calls() def test_create_connection_version_param_default(self): c1 = connection.Connection(cloud='sample-cloud') conn = connection.Connection(session=c1.session) self.assertEqual( 'openstack.identity.v3._proxy', conn.identity.__class__.__module__ ) def test_create_connection_version_param_string(self): c1 = connection.Connection(cloud='sample-cloud') conn = connection.Connection( session=c1.session, identity_api_version='2' ) self.assertEqual( 'openstack.identity.v2._proxy', conn.identity.__class__.__module__ ) def test_create_connection_version_param_int(self): c1 = connection.Connection(cloud='sample-cloud') conn = connection.Connection( session=c1.session, identity_api_version=3 ) self.assertEqual( 'openstack.identity.v3._proxy', conn.identity.__class__.__module__ ) def test_create_connection_version_param_bogus(self): c1 = connection.Connection(cloud='sample-cloud') conn = connection.Connection( session=c1.session, identity_api_version='red' ) # TODO(mordred) This is obviously silly behavior self.assertEqual( 'openstack.identity.v3._proxy', conn.identity.__class__.__module__ ) def test_from_config_given_config(self): cloud_region = openstack.config.OpenStackConfig().get_one( "sample-cloud" ) sot = connection.from_config(config=cloud_region) self.assertEqual( CONFIG_USERNAME, sot.config.config['auth']['username'] ) self.assertEqual( CONFIG_PASSWORD, sot.config.config['auth']['password'] ) self.assertEqual( CONFIG_AUTH_URL, sot.config.config['auth']['auth_url'] ) self.assertEqual( CONFIG_PROJECT, sot.config.config['auth']['project_name'] ) def test_from_config_given_cloud(self): sot = connection.from_config(cloud="sample-cloud") self.assertEqual( CONFIG_USERNAME, sot.config.config['auth']['username'] ) self.assertEqual( CONFIG_PASSWORD, sot.config.config['auth']['password'] ) self.assertEqual( CONFIG_AUTH_URL, sot.config.config['auth']['auth_url'] ) self.assertEqual( CONFIG_PROJECT, sot.config.config['auth']['project_name'] ) def test_from_config_given_cloud_config(self): cloud_region = openstack.config.OpenStackConfig().get_one( "sample-cloud" ) sot = connection.from_config(cloud_config=cloud_region) self.assertEqual( CONFIG_USERNAME, sot.config.config['auth']['username'] ) self.assertEqual( CONFIG_PASSWORD, sot.config.config['auth']['password'] ) self.assertEqual( CONFIG_AUTH_URL, sot.config.config['auth']['auth_url'] ) self.assertEqual( CONFIG_PROJECT, sot.config.config['auth']['project_name'] ) def test_from_config_given_cloud_name(self): sot = connection.from_config(cloud_name="sample-cloud") self.assertEqual( CONFIG_USERNAME, sot.config.config['auth']['username'] ) self.assertEqual( CONFIG_PASSWORD, sot.config.config['auth']['password'] ) self.assertEqual( CONFIG_AUTH_URL, sot.config.config['auth']['auth_url'] ) self.assertEqual( CONFIG_PROJECT, sot.config.config['auth']['project_name'] ) def test_from_config_verify(self): sot = connection.from_config(cloud="insecure-cloud") self.assertFalse(sot.session.verify) sot = connection.from_config(cloud="cacert-cloud") self.assertEqual(CONFIG_CACERT, sot.session.verify) def test_from_config_insecure(self): # Ensure that the "insecure=True" flag implies "verify=False" sot = connection.from_config("insecure-cloud-alternative-format") self.assertFalse(sot.session.verify) class TestOsloConfig(_TestConnectionBase): def test_from_conf(self): c1 = connection.Connection(cloud='sample-cloud') conn = connection.Connection( session=c1.session, oslo_conf=self._load_ks_cfg_opts() ) # There was no config for keystone self.assertIsInstance( conn.identity, service_description._ServiceDisabledProxyShim ) # But nova was in there self.assertEqual( 'openstack.compute.v2._proxy', conn.compute.__class__.__module__ ) def test_from_conf_filter_service_types(self): c1 = connection.Connection(cloud='sample-cloud') conn = connection.Connection( session=c1.session, oslo_conf=self._load_ks_cfg_opts(), service_types={'orchestration', 'i-am-ignored'}, ) # There was no config for keystone self.assertIsInstance( conn.identity, service_description._ServiceDisabledProxyShim ) # Nova was in there, but disabled because not requested self.assertIsInstance( conn.compute, service_description._ServiceDisabledProxyShim ) class TestNetworkConnection(base.TestCase): # Verify that if the catalog has the suffix we don't mess things up. def test_network_proxy(self): self.os_fixture.v3_token.remove_service('network') svc = self.os_fixture.v3_token.add_service('network') svc.add_endpoint( interface='public', url='https://network.example.com/v2.0', region='RegionOne', ) self.use_keystone_v3() self.assertEqual( 'openstack.network.v2._proxy', self.cloud.network.__class__.__module__, ) self.assert_calls() self.assertEqual( "https://network.example.com/v2.0", self.cloud.network.get_endpoint(), ) class TestNetworkConnectionSuffix(base.TestCase): # We need to do the neutron adapter test differently because it needs # to actually get a catalog. def test_network_proxy(self): self.assertEqual( 'openstack.network.v2._proxy', self.cloud.network.__class__.__module__, ) self.assert_calls() self.assertEqual( "https://network.example.com/v2.0", self.cloud.network.get_endpoint(), ) class TestAuthorize(base.TestCase): def test_authorize_works(self): res = self.cloud.authorize() self.assertEqual('KeystoneToken-1', res) def test_authorize_failure(self): self.use_broken_keystone() self.assertRaises( openstack.exceptions.SDKException, self.cloud.authorize ) class TestNewService(base.TestCase): def test_add_service_v1(self): svc = self.os_fixture.v3_token.add_service('fake') svc.add_endpoint( interface='public', region='RegionOne', url=f'https://fake.example.com/v1/{fakes.PROJECT_ID}', ) self.use_keystone_v3() conn = self.cloud service = fake_service.FakeService('fake') conn.add_service(service) # Ensure no discovery calls made self.assertEqual(0, len(self.adapter.request_history)) self.register_uris( [ dict( method='GET', uri='https://fake.example.com', status_code=404, ), dict( method='GET', uri='https://fake.example.com/v1/', status_code=404, ), dict( method='GET', uri=self.get_mock_url('fake'), status_code=404, ), ] ) self.assertEqual( 'openstack.tests.unit.fake.v1._proxy', conn.fake.__class__.__module__, ) self.assertTrue(conn.fake.dummy()) def test_add_service_v2(self): svc = self.os_fixture.v3_token.add_service('fake') svc.add_endpoint( interface='public', region='RegionOne', url=f'https://fake.example.com/v2/{fakes.PROJECT_ID}', ) self.use_keystone_v3() conn = self.cloud self.register_uris( [ dict( method='GET', uri='https://fake.example.com', status_code=404, ), dict( method='GET', uri='https://fake.example.com/v2/', status_code=404, ), dict( method='GET', uri=self.get_mock_url('fake'), status_code=404, ), ] ) service = fake_service.FakeService('fake') conn.add_service(service) self.assertEqual( 'openstack.tests.unit.fake.v2._proxy', conn.fake.__class__.__module__, ) self.assertFalse(conn.fake.dummy()) def test_replace_system_service(self): svc = self.os_fixture.v3_token.add_service('fake') svc.add_endpoint( interface='public', region='RegionOne', url=f'https://fake.example.com/v2/{fakes.PROJECT_ID}', ) self.use_keystone_v3() conn = self.cloud # delete native dns service delattr(conn, 'dns') self.register_uris( [ dict( method='GET', uri='https://fake.example.com', status_code=404, ), dict( method='GET', uri='https://fake.example.com/v2/', status_code=404, ), dict( method='GET', uri=self.get_mock_url('fake'), status_code=404, ), ] ) # add fake service with alias 'DNS' service = fake_service.FakeService('fake', aliases=['dns']) conn.add_service(service) # ensure dns service responds as we expect from replacement self.assertFalse(conn.dns.dummy()) def vendor_hook(conn): setattr(conn, 'test', 'test_val') class TestVendorProfile(base.TestCase): def setUp(self): super().setUp() # Create a temporary directory where our test config will live # and insert it into the search path via OS_CLIENT_CONFIG_FILE. config_dir = self.useFixture(fixtures.TempDir()).path config_path = os.path.join(config_dir, "clouds.yaml") public_clouds = os.path.join(config_dir, "clouds-public.yaml") with open(config_path, "w") as conf: conf.write(CLOUD_CONFIG) with open(public_clouds, "w") as conf: conf.write(PUBLIC_CLOUDS_YAML) self.useFixture( fixtures.EnvironmentVariable("OS_CLIENT_CONFIG_FILE", config_path) ) self.use_keystone_v2() self.config = openstack.config.loader.OpenStackConfig( vendor_files=[public_clouds] ) def test_conn_from_profile(self): self.cloud = self.config.get_one(cloud='profiled-cloud') conn = connection.Connection(config=self.cloud) self.assertIsNotNone(conn) def test_hook_from_profile(self): self.cloud = self.config.get_one(cloud='profiled-cloud') conn = connection.Connection(config=self.cloud) self.assertEqual('test_val', conn.test) def test_hook_from_connection_param(self): conn = connection.Connection( cloud='sample-cloud', vendor_hook='openstack.tests.unit.test_connection:vendor_hook', ) self.assertEqual('test_val', conn.test) def test_hook_from_connection_ignore_missing(self): conn = connection.Connection( cloud='sample-cloud', vendor_hook='openstack.tests.unit.test_connection:missing', ) self.assertIsNotNone(conn) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/test_exceptions.py0000664000175000017500000001771100000000000024140 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from unittest import mock import uuid from openstack import exceptions from openstack.tests.unit import base class Test_Exception(base.TestCase): def test_method_not_supported(self): exc = exceptions.MethodNotSupported(self.__class__, 'list') expected = ( 'The list method is not supported for ' + 'openstack.tests.unit.test_exceptions.Test_Exception' ) self.assertEqual(expected, str(exc)) class Test_HttpException(base.TestCase): def setUp(self): super().setUp() self.message = "mayday" def _do_raise(self, *args, **kwargs): raise exceptions.HttpException(*args, **kwargs) def test_message(self): exc = self.assertRaises( exceptions.HttpException, self._do_raise, self.message ) self.assertEqual(self.message, exc.message) def test_details(self): details = "some details" exc = self.assertRaises( exceptions.HttpException, self._do_raise, self.message, details=details, ) self.assertEqual(self.message, exc.message) self.assertEqual(details, exc.details) def test_http_status(self): http_status = 123 exc = self.assertRaises( exceptions.HttpException, self._do_raise, self.message, http_status=http_status, ) self.assertEqual(self.message, exc.message) self.assertEqual(http_status, exc.status_code) class TestRaiseFromResponse(base.TestCase): def setUp(self): super().setUp() self.message = "Where is my kitty?" def _do_raise(self, *args, **kwargs): return exceptions.raise_from_response(*args, **kwargs) def test_raise_no_exception(self): response = mock.Mock() response.status_code = 200 self.assertIsNone(self._do_raise(response)) def test_raise_not_found_exception(self): response = mock.Mock() response.status_code = 404 response.headers = { 'content-type': 'application/json', 'x-openstack-request-id': uuid.uuid4().hex, } exc = self.assertRaises( exceptions.NotFoundException, self._do_raise, response, error_message=self.message, ) self.assertEqual(self.message, exc.message) self.assertEqual(response.status_code, exc.status_code) self.assertEqual( response.headers.get('x-openstack-request-id'), exc.request_id ) def test_raise_bad_request_exception(self): response = mock.Mock() response.status_code = 400 response.headers = { 'content-type': 'application/json', 'x-openstack-request-id': uuid.uuid4().hex, } exc = self.assertRaises( exceptions.BadRequestException, self._do_raise, response, error_message=self.message, ) self.assertEqual(self.message, exc.message) self.assertEqual(response.status_code, exc.status_code) self.assertEqual( response.headers.get('x-openstack-request-id'), exc.request_id ) def test_raise_http_exception(self): response = mock.Mock() response.status_code = 403 response.headers = { 'content-type': 'application/json', 'x-openstack-request-id': uuid.uuid4().hex, } exc = self.assertRaises( exceptions.HttpException, self._do_raise, response, error_message=self.message, ) self.assertEqual(self.message, exc.message) self.assertEqual(response.status_code, exc.status_code) self.assertEqual( response.headers.get('x-openstack-request-id'), exc.request_id ) def test_raise_compute_format(self): response = mock.Mock() response.status_code = 404 response.headers = { 'content-type': 'application/json', } response.json.return_value = { 'itemNotFound': { 'message': self.message, 'code': 404, } } exc = self.assertRaises( exceptions.NotFoundException, self._do_raise, response, error_message=self.message, ) self.assertEqual(response.status_code, exc.status_code) self.assertEqual(self.message, exc.details) self.assertIn(self.message, str(exc)) def test_raise_network_format(self): response = mock.Mock() response.status_code = 404 response.headers = { 'content-type': 'application/json', } response.json.return_value = { 'NeutronError': { 'message': self.message, 'type': 'FooNotFound', 'detail': '', } } exc = self.assertRaises( exceptions.NotFoundException, self._do_raise, response, error_message=self.message, ) self.assertEqual(response.status_code, exc.status_code) self.assertEqual(self.message, exc.details) self.assertIn(self.message, str(exc)) def test_raise_baremetal_old_format(self): response = mock.Mock() response.status_code = 404 response.headers = { 'content-type': 'application/json', } response.json.return_value = { 'error_message': json.dumps( { 'faultstring': self.message, 'faultcode': 'Client', 'debuginfo': None, } ) } exc = self.assertRaises( exceptions.NotFoundException, self._do_raise, response, error_message=self.message, ) self.assertEqual(response.status_code, exc.status_code) self.assertEqual(self.message, exc.details) self.assertIn(self.message, str(exc)) def test_raise_baremetal_corrected_format(self): response = mock.Mock() response.status_code = 404 response.headers = { 'content-type': 'application/json', } response.json.return_value = { 'error_message': { 'faultstring': self.message, 'faultcode': 'Client', 'debuginfo': None, } } exc = self.assertRaises( exceptions.NotFoundException, self._do_raise, response, error_message=self.message, ) self.assertEqual(response.status_code, exc.status_code) self.assertEqual(self.message, exc.details) self.assertIn(self.message, str(exc)) def test_raise_wsme_format(self): response = mock.Mock() response.status_code = 404 response.headers = { 'content-type': 'application/json', } response.json.return_value = { 'faultstring': self.message, 'faultcode': 'Client', 'debuginfo': None, } exc = self.assertRaises( exceptions.NotFoundException, self._do_raise, response, error_message=self.message, ) self.assertEqual(response.status_code, exc.status_code) self.assertEqual(self.message, exc.details) self.assertIn(self.message, str(exc)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/test_fakes.py0000664000175000017500000000724100000000000023045 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import format as _format from openstack import resource from openstack.test import fakes from openstack.tests.unit import base class TestGetFake(base.TestCase): def test_generate_fake_resource_one(self): res = fakes.generate_fake_resource(resource.Resource) self.assertIsInstance(res, resource.Resource) def test_generate_fake_resource_list(self): res = list(fakes.generate_fake_resources(resource.Resource, 2)) self.assertEqual(2, len(res)) self.assertIsInstance(res[0], resource.Resource) def test_generate_fake_resource_types(self): class Foo(resource.Resource): a = resource.Body("a", type=str) b = resource.Body("b", type=int) c = resource.Body("c", type=bool) d = resource.Body("d", type=_format.BoolStr) e = resource.Body("e", type=dict) f = resource.URI("path") class Bar(resource.Resource): a = resource.Body("a", type=list, list_type=str) b = resource.Body("b", type=list, list_type=dict) c = resource.Body("c", type=list, list_type=Foo) foo = fakes.generate_fake_resource(Foo) self.assertIsInstance(foo.a, str) self.assertIsInstance(foo.b, int) self.assertIsInstance(foo.c, bool) self.assertIsInstance(foo.d, bool) self.assertIsInstance(foo.e, dict) self.assertIsInstance(foo.f, str) bar = fakes.generate_fake_resource(Bar) self.assertIsInstance(bar.a, list) self.assertEqual(1, len(bar.a)) self.assertIsInstance(bar.a[0], str) self.assertIsInstance(bar.b, list) self.assertEqual(1, len(bar.b)) self.assertIsInstance(bar.b[0], dict) self.assertIsInstance(bar.c, list) self.assertEqual(1, len(bar.c)) self.assertIsInstance(bar.c[0], Foo) self.assertIsInstance(bar.c[0].a, str) self.assertIsInstance(bar.c[0].b, int) self.assertIsInstance(bar.c[0].c, bool) self.assertIsInstance(bar.c[0].d, bool) self.assertIsInstance(bar.c[0].e, dict) self.assertIsInstance(bar.c[0].f, str) def test_generate_fake_resource_attrs(self): class Fake(resource.Resource): a = resource.Body("a", type=str) b = resource.Body("b", type=str) res = fakes.generate_fake_resource(Fake, b="bar") self.assertIsInstance(res.a, str) self.assertIsInstance(res.b, str) self.assertEqual("bar", res.b) def test_generate_fake_resource_types_inherit(self): class Fake(resource.Resource): a = resource.Body("a", type=str) class FakeInherit(resource.Resource): a = resource.Body("a", type=Fake) res = fakes.generate_fake_resource(FakeInherit) self.assertIsInstance(res.a, Fake) self.assertIsInstance(res.a.a, str) def test_unknown_attrs_as_props(self): class Fake(resource.Resource): properties = resource.Body("properties") _store_unknown_attrs_as_properties = True res = fakes.generate_fake_resource(Fake) self.assertIsInstance(res.properties, dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/test_format.py0000664000175000017500000000256100000000000023244 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import format from openstack.tests.unit import base class TestBoolStrFormatter(base.TestCase): def test_deserialize(self): self.assertTrue(format.BoolStr.deserialize(True)) self.assertTrue(format.BoolStr.deserialize('True')) self.assertTrue(format.BoolStr.deserialize('TRUE')) self.assertTrue(format.BoolStr.deserialize('true')) self.assertFalse(format.BoolStr.deserialize(False)) self.assertFalse(format.BoolStr.deserialize('False')) self.assertFalse(format.BoolStr.deserialize('FALSE')) self.assertFalse(format.BoolStr.deserialize('false')) self.assertRaises(ValueError, format.BoolStr.deserialize, None) self.assertRaises(ValueError, format.BoolStr.deserialize, '') self.assertRaises(ValueError, format.BoolStr.deserialize, 'INVALID') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/test_hacking.py0000664000175000017500000000676300000000000023370 0ustar00zuulzuul00000000000000# Copyright 2019 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack._hacking import checks from openstack.tests.unit import base class HackingTestCase(base.TestCase): """This class tests the hacking checks in openstack._hacking.checks. It works by passing strings to the check methods like the pep8/flake8 parser would. The parser loops over each line in the file and then passes the parameters to the check method. The parameter names in the check method dictate what type of object is passed to the check method. The parameter types are:: logical_line: A processed line with the following modifications: - Multi-line statements converted to a single line. - Stripped left and right. - Contents of strings replaced with "xxx" of same length. - Comments removed. physical_line: Raw line of text from the input file. lines: a list of the raw lines from the input file tokens: the tokens that contribute to this logical line line_number: line number in the input file total_lines: number of lines in the input file blank_lines: blank lines before this one indent_char: indentation character in this file (" " or "\t") indent_level: indentation (with tabs expanded to multiples of 8) previous_indent_level: indentation on previous line previous_logical: previous logical line filename: Path of the file being run through pep8 When running a test on a check method the return will be False/None if there is no violation in the sample input. If there is an error a tuple is returned with a position in the line, and a message. So to check the result just assertTrue if the check is expected to fail and assertFalse if it should pass. """ def test_assert_no_setupclass(self): self.assertEqual( len(list(checks.assert_no_setupclass("def setUpClass(cls)"))), 1 ) self.assertEqual( len(list(checks.assert_no_setupclass("# setUpClass is evil"))), 0 ) self.assertEqual( len( list( checks.assert_no_setupclass( "def setUpClassyDrinkingLocation(cls)" ) ) ), 0, ) def test_assert_no_deprecated_exceptions(self): self.assertEqual( len( list( checks.assert_no_deprecated_exceptions( "raise exc.OpenStackCloudTimeout", "openstack/cloud/compute.py", ) ) ), 1, ) self.assertEqual( len( list( checks.assert_no_deprecated_exceptions( "raise exc.OpenStackCloudTimeout", "openstack/cloud/exc.py", ) ) ), 0, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/test_microversions.py0000664000175000017500000000761000000000000024656 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import exceptions from openstack.tests import fakes from openstack.tests.unit import base class TestMicroversions(base.TestCase): def setUp(self): super().setUp() self.use_compute_discovery() def test_get_bad_inferred_max_microversion(self): self.cloud.config.config['compute_api_version'] = '2.61' self.assertRaises( exceptions.ConfigException, self.cloud.get_server, 'doesNotExist', ) self.assert_calls() def test_get_bad_default_max_microversion(self): self.cloud.config.config['compute_default_microversion'] = '2.61' self.assertRaises( exceptions.ConfigException, self.cloud.get_server, 'doesNotExist', ) self.assert_calls() def test_get_bad_inferred_min_microversion(self): self.cloud.config.config['compute_api_version'] = '2.7' self.assertRaises( exceptions.ConfigException, self.cloud.get_server, 'doesNotExist', ) self.assert_calls() def test_get_bad_default_min_microversion(self): self.cloud.config.config['compute_default_microversion'] = '2.7' self.assertRaises( exceptions.ConfigException, self.cloud.get_server, 'doesNotExist', ) self.assert_calls() def test_inferred_default_microversion(self): self.cloud.config.config['compute_api_version'] = '2.42' server1 = fakes.make_fake_server('123', 'mickey') server2 = fakes.make_fake_server('345', 'mouse') self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), request_headers={'OpenStack-API-Version': 'compute 2.42'}, json={'servers': [server1, server2]}, ), ] ) r = self.cloud.get_server('mickey', bare=True) self.assertIsNotNone(r) self.assertEqual(server1['name'], r['name']) self.assert_calls() def test_default_microversion(self): self.cloud.config.config['compute_default_microversion'] = '2.42' server1 = fakes.make_fake_server('123', 'mickey') server2 = fakes.make_fake_server('345', 'mouse') self.register_uris( [ dict( method='GET', uri=self.get_mock_url( 'compute', 'public', append=['servers', 'detail'] ), request_headers={'OpenStack-API-Version': 'compute 2.42'}, json={'servers': [server1, server2]}, ), ] ) r = self.cloud.get_server('mickey', bare=True) self.assertIsNotNone(r) self.assertEqual(server1['name'], r['name']) self.assert_calls() def test_conflicting_implied_and_direct(self): self.cloud.config.config['compute_default_microversion'] = '2.7' self.cloud.config.config['compute_api_version'] = '2.13' self.assertRaises(exceptions.ConfigException, self.cloud.get_server) # We should fail before we even authenticate self.assertEqual(0, len(self.adapter.request_history)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/test_missing_version.py0000664000175000017500000000360000000000000025165 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import warnings import testtools from openstack import exceptions from openstack import proxy from openstack.tests.unit import base class TestMissingVersion(base.TestCase): def setUp(self): super().setUp() self.os_fixture.clear_tokens() svc = self.os_fixture.v3_token.add_service('image') svc.add_endpoint( url='https://example.com/image/', region='RegionOne', interface='public', ) self.use_keystone_v3() self.use_glance( image_version_json='bad-glance-version.json', image_discovery_url='https://example.com/image/', ) def test_unsupported_version(self): with testtools.ExpectedException(exceptions.NotSupported): self.cloud.image.get('/') self.assert_calls() def test_unsupported_version_override(self): self.cloud.config.config['image_api_version'] = '7' with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") warnings.simplefilter("ignore", DeprecationWarning) self.assertIsInstance(self.cloud.image, proxy.Proxy) self.assertEqual(1, len(w)) self.assertIn( "Service image has no discoverable version.", str(w[-1].message), ) self.assert_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/test_placement_rest.py0000664000175000017500000000735000000000000024762 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Red Hat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from keystoneauth1 import exceptions from openstack.tests.unit import base @ddt.ddt class TestPlacementRest(base.TestCase): def setUp(self): super().setUp() self.use_placement() def _register_uris(self, status_code=None): uri = dict( method='GET', uri=self.get_mock_url( 'placement', 'public', append=['allocation_candidates'] ), json={}, ) if status_code is not None: uri['status_code'] = status_code self.register_uris([uri]) def _validate_resp(self, resp, status_code): self.assertEqual(status_code, resp.status_code) self.assertEqual( 'https://placement.example.com/allocation_candidates', resp.url ) self.assert_calls() @ddt.data({}, {'raise_exc': False}, {'raise_exc': True}) def test_discovery(self, get_kwargs): self._register_uris() # Regardless of raise_exc, a <400 response doesn't raise rs = self.cloud.placement.get('/allocation_candidates', **get_kwargs) self._validate_resp(rs, 200) @ddt.data({}, {'raise_exc': False}) def test_discovery_err(self, get_kwargs): self._register_uris(status_code=500) # >=400 doesn't raise by default or with explicit raise_exc=False rs = self.cloud.placement.get('/allocation_candidates', **get_kwargs) self._validate_resp(rs, 500) def test_discovery_exc(self): self._register_uris(status_code=500) # raise_exc=True raises a ksa exception appropriate to the status code ex = self.assertRaises( exceptions.InternalServerError, self.cloud.placement.get, '/allocation_candidates', raise_exc=True, ) self._validate_resp(ex.response, 500) def test_microversion_discovery(self): self.assertEqual( (1, 17), self.cloud.placement.get_endpoint_data().max_microversion ) self.assert_calls() class TestBadPlacementRest(base.TestCase): def setUp(self): self.skipTest('Need to re-add support for broken placement versions') super().setUp() # The bad-placement.json is for older placement that was # missing the status field from its discovery doc. This # lets us show that we can talk to such a placement. self.use_placement(discovery_fixture='bad-placement.json') def _register_uris(self, status_code=None): uri = dict( method='GET', uri=self.get_mock_url( 'placement', 'public', append=['allocation_candidates'] ), json={}, ) if status_code is not None: uri['status_code'] = status_code self.register_uris([uri]) def _validate_resp(self, resp, status_code): self.assertEqual(status_code, resp.status_code) self.assertEqual( 'https://placement.example.com/allocation_candidates', resp.url ) self.assert_calls() def test_discovery(self): self._register_uris() rs = self.cloud.placement.get('/allocation_candidates') self._validate_resp(rs, 200) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/test_proxy.py0000664000175000017500000006636500000000000023151 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import queue from unittest import mock from keystoneauth1 import session from testscenarios import load_tests_apply_scenarios as load_tests # noqa from openstack import exceptions from openstack import proxy from openstack import resource from openstack.tests.unit import base from openstack import utils class DeleteableResource(resource.Resource): allow_delete = True class UpdateableResource(resource.Resource): allow_commit = True class CreateableResource(resource.Resource): allow_create = True class RetrieveableResource(resource.Resource): allow_fetch = True class ListableResource(resource.Resource): allow_list = True class FilterableResource(resource.Resource): allow_list = True base_path = '/fakes' _query_mapping = resource.QueryParameters('a') a = resource.Body('a') b = resource.Body('b') c = resource.Body('c') class HeadableResource(resource.Resource): allow_head = True class TestProxyPrivate(base.TestCase): def setUp(self): super().setUp() def method(self, expected_type, value): return value self.sot = mock.Mock() self.sot.method = method self.session = mock.Mock() self.session._sdk_connection = self.cloud self.fake_proxy = proxy.Proxy(self.session) self.fake_proxy._connection = self.cloud def _test_correct(self, value): decorated = proxy._check_resource(strict=False)(self.sot.method) rv = decorated(self.sot, resource.Resource, value) self.assertEqual(value, rv) def test__check_resource_correct_resource(self): res = resource.Resource() self._test_correct(res) def test__check_resource_notstrict_id(self): self._test_correct("abc123-id") def test__check_resource_strict_id(self): decorated = proxy._check_resource(strict=True)(self.sot.method) self.assertRaisesRegex( ValueError, "A Resource must be passed", decorated, self.sot, resource.Resource, "this-is-not-a-resource", ) def test__check_resource_incorrect_resource(self): class OneType(resource.Resource): pass class AnotherType(resource.Resource): pass value = AnotherType() decorated = proxy._check_resource(strict=False)(self.sot.method) self.assertRaisesRegex( ValueError, "Expected OneType but received AnotherType", decorated, self.sot, OneType, value, ) def test__get_uri_attribute_no_parent(self): class Child(resource.Resource): something = resource.Body("something") attr = "something" value = "nothing" child = Child(something=value) result = self.fake_proxy._get_uri_attribute(child, None, attr) self.assertEqual(value, result) def test__get_uri_attribute_with_parent(self): class Parent(resource.Resource): pass value = "nothing" parent = Parent(id=value) result = self.fake_proxy._get_uri_attribute("child", parent, "attr") self.assertEqual(value, result) def test__get_resource_new(self): value = "hello" fake_type = mock.Mock(spec=resource.Resource) fake_type.new = mock.Mock(return_value=value) attrs = {"first": "Brian", "last": "Curtin"} result = self.fake_proxy._get_resource(fake_type, None, **attrs) fake_type.new.assert_called_with(connection=self.cloud, **attrs) self.assertEqual(value, result) def test__get_resource_from_id(self): id = "eye dee" value = "hello" attrs = {"first": "Brian", "last": "Curtin"} # The isinstance check needs to take a type, not an instance, # so the mock.assert_called_with method isn't helpful here since # we can't pass in a mocked object. This class is a crude version # of that same behavior to let us check that `new` gets # called with the expected arguments. class Fake: call = {} @classmethod def new(cls, **kwargs): cls.call = kwargs return value result = self.fake_proxy._get_resource(Fake, id, **attrs) self.assertDictEqual( dict(id=id, connection=mock.ANY, **attrs), Fake.call ) self.assertEqual(value, result) def test__get_resource_from_resource(self): res = mock.Mock(spec=resource.Resource) res._update = mock.Mock() attrs = {"first": "Brian", "last": "Curtin"} result = self.fake_proxy._get_resource(resource.Resource, res, **attrs) res._update.assert_called_once_with(**attrs) self.assertEqual(result, res) def test__get_resource_from_munch(self): cls = mock.Mock() res = mock.Mock(spec=resource.Resource) res._update = mock.Mock() cls._from_munch.return_value = res m = utils.Munch(answer=42) attrs = {"first": "Brian", "last": "Curtin"} result = self.fake_proxy._get_resource(cls, m, **attrs) cls._from_munch.assert_called_once_with(m, connection=self.cloud) res._update.assert_called_once_with(**attrs) self.assertEqual(result, res) class TestProxyDelete(base.TestCase): def setUp(self): super().setUp() self.session = mock.Mock() self.session._sdk_connection = self.cloud self.fake_id = 1 self.res = mock.Mock(spec=DeleteableResource) self.res.id = self.fake_id self.res.delete = mock.Mock() self.sot = proxy.Proxy(self.session) self.sot._connection = self.cloud DeleteableResource.new = mock.Mock(return_value=self.res) def test_delete(self): self.sot._delete(DeleteableResource, self.res) self.res.delete.assert_called_with(self.sot) self.sot._delete(DeleteableResource, self.fake_id) DeleteableResource.new.assert_called_with( connection=self.cloud, id=self.fake_id ) self.res.delete.assert_called_with(self.sot) # Delete generally doesn't return anything, so we will normally # swallow any return from within a service's proxy, but make sure # we can still return for any cases where values are returned. self.res.delete.return_value = self.fake_id rv = self.sot._delete(DeleteableResource, self.fake_id) self.assertEqual(rv, self.fake_id) def test_delete_ignore_missing(self): self.res.delete.side_effect = exceptions.NotFoundException( message="test", http_status=404 ) rv = self.sot._delete(DeleteableResource, self.fake_id) self.assertIsNone(rv) def test_delete_NotFound(self): self.res.delete.side_effect = exceptions.NotFoundException( message="test", http_status=404 ) self.assertRaisesRegex( exceptions.NotFoundException, # TODO(shade) The mocks here are hiding the thing we want to test. "test", self.sot._delete, DeleteableResource, self.res, ignore_missing=False, ) def test_delete_HttpException(self): self.res.delete.side_effect = exceptions.HttpException( message="test", http_status=500 ) self.assertRaises( exceptions.HttpException, self.sot._delete, DeleteableResource, self.res, ignore_missing=False, ) class TestProxyUpdate(base.TestCase): def setUp(self): super().setUp() self.session = mock.Mock() self.fake_id = 1 self.fake_result = "fake_result" self.res = mock.Mock(spec=UpdateableResource) self.res.commit = mock.Mock(return_value=self.fake_result) self.sot = proxy.Proxy(self.session) self.sot._connection = self.cloud self.attrs = {"x": 1, "y": 2, "z": 3} UpdateableResource.new = mock.Mock(return_value=self.res) def test_update_resource(self): rv = self.sot._update(UpdateableResource, self.res, **self.attrs) self.assertEqual(rv, self.fake_result) self.res._update.assert_called_once_with(**self.attrs) self.res.commit.assert_called_once_with(self.sot, base_path=None) def test_update_resource_override_base_path(self): base_path = 'dummy' rv = self.sot._update( UpdateableResource, self.res, base_path=base_path, **self.attrs ) self.assertEqual(rv, self.fake_result) self.res._update.assert_called_once_with(**self.attrs) self.res.commit.assert_called_once_with(self.sot, base_path=base_path) def test_update_id(self): rv = self.sot._update(UpdateableResource, self.fake_id, **self.attrs) self.assertEqual(rv, self.fake_result) self.res.commit.assert_called_once_with(self.sot, base_path=None) class TestProxyCreate(base.TestCase): def setUp(self): super().setUp() self.session = mock.Mock() self.session._sdk_connection = self.cloud self.fake_result = "fake_result" self.res = mock.Mock(spec=CreateableResource) self.res.create = mock.Mock(return_value=self.fake_result) self.sot = proxy.Proxy(self.session) self.sot._connection = self.cloud def test_create_attributes(self): CreateableResource.new = mock.Mock(return_value=self.res) attrs = {"x": 1, "y": 2, "z": 3} rv = self.sot._create(CreateableResource, **attrs) self.assertEqual(rv, self.fake_result) CreateableResource.new.assert_called_once_with( connection=self.cloud, **attrs ) self.res.create.assert_called_once_with(self.sot, base_path=None) def test_create_attributes_override_base_path(self): CreateableResource.new = mock.Mock(return_value=self.res) base_path = 'dummy' attrs = {"x": 1, "y": 2, "z": 3} rv = self.sot._create(CreateableResource, base_path=base_path, **attrs) self.assertEqual(rv, self.fake_result) CreateableResource.new.assert_called_once_with( connection=self.cloud, **attrs ) self.res.create.assert_called_once_with(self.sot, base_path=base_path) class TestProxyBulkCreate(base.TestCase): def setUp(self): super().setUp() class Res(resource.Resource): pass self.session = mock.Mock() self.result = mock.sentinel self.data = mock.Mock() self.sot = proxy.Proxy(self.session) self.cls = Res self.cls.bulk_create = mock.Mock(return_value=self.result) def test_bulk_create_attributes(self): rv = self.sot._bulk_create(self.cls, self.data) self.assertEqual(rv, self.result) self.cls.bulk_create.assert_called_once_with( self.sot, self.data, base_path=None ) def test_bulk_create_attributes_override_base_path(self): base_path = 'dummy' rv = self.sot._bulk_create(self.cls, self.data, base_path=base_path) self.assertEqual(rv, self.result) self.cls.bulk_create.assert_called_once_with( self.sot, self.data, base_path=base_path ) class TestProxyGet(base.TestCase): def setUp(self): super().setUp() self.session = mock.Mock() self.session._sdk_connection = self.cloud self.fake_id = 1 self.fake_name = "fake_name" self.fake_result = "fake_result" self.res = mock.Mock(spec=RetrieveableResource) self.res.id = self.fake_id self.res.fetch = mock.Mock(return_value=self.fake_result) self.sot = proxy.Proxy(self.session) self.sot._connection = self.cloud RetrieveableResource.new = mock.Mock(return_value=self.res) def test_get_resource(self): rv = self.sot._get(RetrieveableResource, self.res) self.res.fetch.assert_called_with( self.sot, requires_id=True, base_path=None, skip_cache=mock.ANY, error_message=mock.ANY, ) self.assertEqual(rv, self.fake_result) def test_get_resource_with_args(self): args = {"key": "value"} rv = self.sot._get(RetrieveableResource, self.res, **args) self.res._update.assert_called_once_with(**args) self.res.fetch.assert_called_with( self.sot, requires_id=True, base_path=None, skip_cache=mock.ANY, error_message=mock.ANY, ) self.assertEqual(rv, self.fake_result) def test_get_id(self): rv = self.sot._get(RetrieveableResource, self.fake_id) RetrieveableResource.new.assert_called_with( connection=self.cloud, id=self.fake_id ) self.res.fetch.assert_called_with( self.sot, requires_id=True, base_path=None, skip_cache=mock.ANY, error_message=mock.ANY, ) self.assertEqual(rv, self.fake_result) def test_get_base_path(self): base_path = 'dummy' rv = self.sot._get( RetrieveableResource, self.fake_id, base_path=base_path ) RetrieveableResource.new.assert_called_with( connection=self.cloud, id=self.fake_id ) self.res.fetch.assert_called_with( self.sot, requires_id=True, base_path=base_path, skip_cache=mock.ANY, error_message=mock.ANY, ) self.assertEqual(rv, self.fake_result) def test_get_not_found(self): self.res.fetch.side_effect = exceptions.NotFoundException( message="test", http_status=404 ) self.assertRaisesRegex( exceptions.NotFoundException, "test", self.sot._get, RetrieveableResource, self.res, ) class TestProxyList(base.TestCase): def setUp(self): super().setUp() self.session = mock.Mock() self.args = {"a": "A", "b": "B", "c": "C"} self.fake_response = [resource.Resource()] self.sot = proxy.Proxy(self.session) self.sot._connection = self.cloud ListableResource.list = mock.Mock() ListableResource.list.return_value = self.fake_response def _test_list(self, paginated, base_path=None): rv = self.sot._list( ListableResource, paginated=paginated, base_path=base_path, **self.args, ) self.assertEqual(self.fake_response, rv) ListableResource.list.assert_called_once_with( self.sot, paginated=paginated, base_path=base_path, **self.args ) def test_list_paginated(self): self._test_list(True) def test_list_non_paginated(self): self._test_list(False) def test_list_override_base_path(self): self._test_list(False, base_path='dummy') def test_list_filters_jmespath(self): fake_response = [ FilterableResource(a='a1', b='b1', c='c'), FilterableResource(a='a2', b='b2', c='c'), FilterableResource(a='a3', b='b3', c='c'), ] FilterableResource.list = mock.Mock() FilterableResource.list.return_value = fake_response rv = self.sot._list( FilterableResource, paginated=False, base_path=None, jmespath_filters="[?c=='c']", ) self.assertEqual(3, len(rv)) # Test filtering based on unknown attribute rv = self.sot._list( FilterableResource, paginated=False, base_path=None, jmespath_filters="[?d=='c']", ) self.assertEqual(0, len(rv)) class TestProxyHead(base.TestCase): def setUp(self): super().setUp() self.session = mock.Mock() self.session._sdk_connection = self.cloud self.fake_id = 1 self.fake_name = "fake_name" self.fake_result = "fake_result" self.res = mock.Mock(spec=HeadableResource) self.res.id = self.fake_id self.res.head = mock.Mock(return_value=self.fake_result) self.sot = proxy.Proxy(self.session) self.sot._connection = self.cloud HeadableResource.new = mock.Mock(return_value=self.res) def test_head_resource(self): rv = self.sot._head(HeadableResource, self.res) self.res.head.assert_called_with(self.sot, base_path=None) self.assertEqual(rv, self.fake_result) def test_head_resource_base_path(self): base_path = 'dummy' rv = self.sot._head(HeadableResource, self.res, base_path=base_path) self.res.head.assert_called_with(self.sot, base_path=base_path) self.assertEqual(rv, self.fake_result) def test_head_id(self): rv = self.sot._head(HeadableResource, self.fake_id) HeadableResource.new.assert_called_with( connection=self.cloud, id=self.fake_id ) self.res.head.assert_called_with(self.sot, base_path=None) self.assertEqual(rv, self.fake_result) class TestExtractName(base.TestCase): scenarios = [ ('slash_servers_bare', dict(url='/servers', parts=['servers'])), ('slash_servers_arg', dict(url='/servers/1', parts=['server'])), ('servers_bare', dict(url='servers', parts=['servers'])), ('servers_arg', dict(url='servers/1', parts=['server'])), ('networks_bare', dict(url='/v2.0/networks', parts=['networks'])), ('networks_arg', dict(url='/v2.0/networks/1', parts=['network'])), ('tokens', dict(url='/v3/tokens', parts=['tokens'])), ('discovery', dict(url='/', parts=['discovery'])), ( 'secgroups', dict( url='/servers/1/os-security-groups', parts=['server', 'os-security-groups'], ), ), ('bm_chassis', dict(url='/v1/chassis/id', parts=['chassis'])), ] def test_extract_name(self): results = proxy.Proxy(mock.Mock())._extract_name(self.url) self.assertEqual(self.parts, results) class TestProxyCache(base.TestCase): class Res(resource.Resource): base_path = 'fake' allow_commit = True allow_fetch = True foo = resource.Body('foo') def setUp(self): super().setUp(cloud_config_fixture='clouds_cache.yaml') self.session = mock.Mock(spec=session.Session) self.session._sdk_connection = self.cloud self.session.get_project_id = mock.Mock(return_value='fake_prj') self.response = mock.Mock() self.response.status_code = 200 self.response.history = [] self.response.headers = {} self.response.body = {} self.response.json = mock.Mock(return_value=self.response.body) self.session.request = mock.Mock(return_value=self.response) self.sot = proxy.Proxy(self.session) self.sot._connection = self.cloud self.sot.service_type = 'srv' def _get_key(self, id): return "srv.fake.fake/%s.{'microversion': None, 'params': {}}" % id def test_get_not_in_cache(self): self.cloud._cache_expirations['srv.fake'] = 5 self.sot._get(self.Res, '1') self.session.request.assert_called_with( 'fake/1', 'GET', connect_retries=mock.ANY, raise_exc=mock.ANY, global_request_id=mock.ANY, microversion=mock.ANY, params=mock.ANY, endpoint_filter=mock.ANY, headers=mock.ANY, rate_semaphore=mock.ANY, ) self.assertIn(self._get_key(1), self.cloud._api_cache_keys) def test_get_from_cache(self): key = self._get_key(2) self.cloud._cache.set(key, self.response) # set expiration for the resource to respect cache self.cloud._cache_expirations['srv.fake'] = 5 self.sot._get(self.Res, '2') self.session.request.assert_not_called() def test_modify(self): key = self._get_key(3) self.cloud._cache.set(key, self.response) self.cloud._api_cache_keys.add(key) self.cloud._cache_expirations['srv.fake'] = 5 # Ensure first call gets value from cache self.sot._get(self.Res, '3') self.session.request.assert_not_called() # update call invalidates the cache and triggers API rs = self.Res.existing(id='3') self.sot._update(self.Res, rs, foo='bar') self.session.request.assert_called() self.assertIsNotNone(self.cloud._cache.get(key)) self.assertEqual('NoValue', type(self.cloud._cache.get(key)).__name__) self.assertNotIn(key, self.cloud._api_cache_keys) # next get call again triggers API self.sot._get(self.Res, '3') self.session.request.assert_called() def test_get_bypass_cache(self): key = self._get_key(4) resp = copy.deepcopy(self.response) resp.body = {'foo': 'bar'} self.cloud._api_cache_keys.add(key) self.cloud._cache.set(key, resp) # set expiration for the resource to respect cache self.cloud._cache_expirations['srv.fake'] = 5 self.sot._get(self.Res, '4', skip_cache=True) self.session.request.assert_called() # validate we got empty body as expected, and not what is in cache self.assertEqual(dict(), self.response.body) self.assertNotIn(key, self.cloud._api_cache_keys) self.assertEqual('NoValue', type(self.cloud._cache.get(key)).__name__) class TestProxyCleanup(base.TestCase): def setUp(self): super().setUp() self.session = mock.Mock() self.session._sdk_connection = self.cloud self.fake_id = 1 self.fake_name = "fake_name" self.fake_result = "fake_result" self.res = mock.Mock(spec=resource.Resource) self.res.id = self.fake_id self.res.created_at = '2020-01-02T03:04:05' self.res.updated_at = '2020-01-03T03:04:05' self.res_no_updated = mock.Mock(spec=resource.Resource) self.res_no_updated.created_at = '2020-01-02T03:04:05' self.sot = proxy.Proxy(self.session) self.sot.service_type = "block-storage" self.delete_mock = mock.Mock() def test_filters_evaluation_created_at(self): self.assertTrue( self.sot._service_cleanup_resource_filters_evaluation( self.res, filters={'created_at': '2020-02-03T00:00:00'} ) ) def test_filters_evaluation_created_at_not(self): self.assertFalse( self.sot._service_cleanup_resource_filters_evaluation( self.res, filters={'created_at': '2020-01-01T00:00:00'} ) ) def test_filters_evaluation_updated_at(self): self.assertTrue( self.sot._service_cleanup_resource_filters_evaluation( self.res, filters={'updated_at': '2020-02-03T00:00:00'} ) ) def test_filters_evaluation_updated_at_not(self): self.assertFalse( self.sot._service_cleanup_resource_filters_evaluation( self.res, filters={'updated_at': '2020-01-01T00:00:00'} ) ) def test_filters_evaluation_updated_at_missing(self): self.assertFalse( self.sot._service_cleanup_resource_filters_evaluation( self.res_no_updated, filters={'updated_at': '2020-01-01T00:00:00'}, ) ) def test_filters_empty(self): self.assertTrue( self.sot._service_cleanup_resource_filters_evaluation( self.res_no_updated ) ) def test_service_cleanup_dry_run(self): self.assertTrue( self.sot._service_cleanup_del_res( self.delete_mock, self.res, dry_run=True ) ) self.delete_mock.assert_not_called() def test_service_cleanup_dry_run_default(self): self.assertTrue( self.sot._service_cleanup_del_res(self.delete_mock, self.res) ) self.delete_mock.assert_not_called() def test_service_cleanup_real_run(self): self.assertTrue( self.sot._service_cleanup_del_res( self.delete_mock, self.res, dry_run=False, ) ) self.delete_mock.assert_called_with(self.res) def test_service_cleanup_real_run_identified_resources(self): rd = dict() self.assertTrue( self.sot._service_cleanup_del_res( self.delete_mock, self.res, dry_run=False, identified_resources=rd, ) ) self.delete_mock.assert_called_with(self.res) self.assertEqual(self.res, rd[self.res.id]) def test_service_cleanup_resource_evaluation_false(self): self.assertFalse( self.sot._service_cleanup_del_res( self.delete_mock, self.res, dry_run=False, resource_evaluation_fn=lambda x, y, z: False, ) ) self.delete_mock.assert_not_called() def test_service_cleanup_resource_evaluation_true(self): self.assertTrue( self.sot._service_cleanup_del_res( self.delete_mock, self.res, dry_run=False, resource_evaluation_fn=lambda x, y, z: True, ) ) self.delete_mock.assert_called() def test_service_cleanup_resource_evaluation_override_filters(self): self.assertFalse( self.sot._service_cleanup_del_res( self.delete_mock, self.res, dry_run=False, resource_evaluation_fn=lambda x, y, z: False, filters={'created_at': '2200-01-01'}, ) ) def test_service_cleanup_filters(self): self.assertTrue( self.sot._service_cleanup_del_res( self.delete_mock, self.res, dry_run=False, filters={'created_at': '2200-01-01'}, ) ) self.delete_mock.assert_called() def test_service_cleanup_queue(self): q = queue.Queue() self.assertTrue( self.sot._service_cleanup_del_res( self.delete_mock, self.res, dry_run=False, client_status_queue=q, filters={'created_at': '2200-01-01'}, ) ) self.assertEqual(self.res, q.get_nowait()) def test_should_skip_resource_cleanup(self): excluded = ["block_storage.backup"] self.assertTrue( self.sot.should_skip_resource_cleanup("backup", excluded) ) self.assertFalse( self.sot.should_skip_resource_cleanup("volume", excluded) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/test_proxy_base.py0000664000175000017500000002431100000000000024124 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from openstack.tests.unit import base class TestProxyBase(base.TestCase): def setUp(self): super().setUp() self.session = mock.Mock() def _verify( self, mock_method, test_method, *, method_args=None, method_kwargs=None, method_result=None, expected_args=None, expected_kwargs=None, expected_result=None, ): with mock.patch(mock_method) as mocked: mocked.return_value = expected_result if any( [ method_args, method_kwargs, expected_args, expected_kwargs, ] ): method_args = method_args or () method_kwargs = method_kwargs or {} expected_args = expected_args or () expected_kwargs = expected_kwargs or {} if method_result: self.assertEqual( method_result, test_method(*method_args, **method_kwargs), ) else: self.assertEqual( expected_result, test_method(*method_args, **method_kwargs), ) # Check how the mock was called in detail called_args, called_kwargs = mocked.call_args self.assertEqual(expected_args, list(called_args)) # NOTE(gtema): if base_path is not in expected_kwargs or empty # exclude it from the comparison, since some methods might # still invoke method with None value base_path = expected_kwargs.get('base_path', None) if base_path is None: expected_kwargs.pop('base_path', None) called_kwargs.pop('base_path', None) # ditto for paginated paginated = expected_kwargs.get('paginated', None) if paginated is None: expected_kwargs.pop('paginated', None) called_kwargs.pop('paginated', None) # and ignore_missing ignore_missing = expected_kwargs.get('ignore_missing', None) if ignore_missing is None: expected_kwargs.pop('ignore_missing', None) called_kwargs.pop('ignore_missing', None) self.assertDictEqual(expected_kwargs, called_kwargs) else: self.assertEqual(expected_result, test_method()) mocked.assert_called_with(test_method.__self__) def verify_create( self, test_method, resource_type, base_path=None, *, method_args=None, method_kwargs=None, expected_args=None, expected_kwargs=None, expected_result="result", mock_method="openstack.proxy.Proxy._create", ): if method_args is None: method_args = [] if method_kwargs is None: method_kwargs = {"x": 1, "y": 2, "z": 3} if expected_args is None: expected_args = method_args.copy() if expected_kwargs is None: expected_kwargs = method_kwargs.copy() expected_kwargs["base_path"] = base_path self._verify( mock_method, test_method, method_args=method_args, method_kwargs=method_kwargs, expected_args=[resource_type] + expected_args, expected_kwargs=expected_kwargs, expected_result=expected_result, ) def verify_delete( self, test_method, resource_type, ignore_missing=True, *, method_args=None, method_kwargs=None, expected_args=None, expected_kwargs=None, mock_method="openstack.proxy.Proxy._delete", ): if method_args is None: method_args = ['resource_id'] if method_kwargs is None: method_kwargs = {} method_kwargs["ignore_missing"] = ignore_missing if expected_args is None: expected_args = method_args.copy() if expected_kwargs is None: expected_kwargs = method_kwargs.copy() self._verify( mock_method, test_method, method_args=method_args, method_kwargs=method_kwargs, expected_args=[resource_type] + expected_args, expected_kwargs=expected_kwargs, ) def verify_get( self, test_method, resource_type, requires_id=False, base_path=None, *, method_args=None, method_kwargs=None, expected_args=None, expected_kwargs=None, mock_method="openstack.proxy.Proxy._get", ): if method_args is None: method_args = ['resource_id'] if method_kwargs is None: method_kwargs = {} if expected_args is None: expected_args = method_args.copy() if expected_kwargs is None: expected_kwargs = method_kwargs.copy() self._verify( mock_method, test_method, method_args=method_args, method_kwargs=method_kwargs, expected_args=[resource_type] + expected_args, expected_kwargs=expected_kwargs, ) def verify_get_overrided(self, proxy, resource_type, patch_target): with mock.patch(patch_target, autospec=True) as res: proxy._get_resource = mock.Mock(return_value=res) proxy._get(resource_type) res.fetch.assert_called_once_with( proxy, requires_id=True, base_path=None, error_message=mock.ANY, skip_cache=False, ) def verify_head( self, test_method, resource_type, base_path=None, *, method_args=None, method_kwargs=None, expected_args=None, expected_kwargs=None, mock_method="openstack.proxy.Proxy._head", ): if method_args is None: method_args = ['resource_id'] if method_kwargs is None: method_kwargs = {} expected_args = expected_args or method_args.copy() expected_kwargs = expected_kwargs or method_kwargs.copy() self._verify( mock_method, test_method, method_args=method_args, method_kwargs=method_kwargs, expected_args=[resource_type] + expected_args, expected_kwargs=expected_kwargs, ) def verify_find( self, test_method, resource_type, name_or_id='resource_name', ignore_missing=True, *, method_args=None, method_kwargs=None, expected_args=None, expected_kwargs=None, mock_method="openstack.proxy.Proxy._find", ): method_args = [name_or_id] + (method_args or []) method_kwargs = method_kwargs or {} method_kwargs["ignore_missing"] = ignore_missing expected_args = expected_args or method_args.copy() expected_kwargs = expected_kwargs or method_kwargs.copy() self._verify( mock_method, test_method, method_args=method_args, method_kwargs=method_kwargs, expected_args=[resource_type] + expected_args, expected_kwargs=expected_kwargs, ) def verify_list( self, test_method, resource_type, paginated=None, base_path=None, *, method_args=None, method_kwargs=None, expected_args=None, expected_kwargs=None, mock_method="openstack.proxy.Proxy._list", ): if method_args is None: method_args = [] if method_kwargs is None: method_kwargs = {} if paginated is not None: method_kwargs["paginated"] = paginated if expected_args is None: expected_args = method_args.copy() if expected_kwargs is None: expected_kwargs = method_kwargs.copy() if base_path is not None: expected_kwargs["base_path"] = base_path self._verify( mock_method, test_method, method_args=method_args, method_kwargs=method_kwargs, expected_args=[resource_type] + expected_args, expected_kwargs=expected_kwargs, ) def verify_update( self, test_method, resource_type, base_path=None, *, method_args=None, method_kwargs=None, expected_args=None, expected_kwargs=None, expected_result="result", mock_method="openstack.proxy.Proxy._update", ): if method_args is None: method_args = ['resource_id'] if method_kwargs is None: method_kwargs = {"x": 1, "y": 2, "z": 3} method_kwargs["base_path"] = base_path if expected_args is None: expected_args = method_args.copy() if expected_kwargs is None: expected_kwargs = method_kwargs.copy() self._verify( mock_method, test_method, method_args=method_args, method_kwargs=method_kwargs, expected_args=[resource_type] + expected_args, expected_kwargs=expected_kwargs, ) def verify_wait_for_status( self, test_method, mock_method="openstack.resource.wait_for_status", **kwargs, ): self._verify(mock_method, test_method, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/test_resource.py0000664000175000017500000036070200000000000023607 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import json import logging from unittest import mock from keystoneauth1 import adapter import requests from openstack import dns from openstack import exceptions from openstack import format from openstack import resource from openstack.tests.unit import base from openstack import utils class FakeResponse: def __init__(self, response, status_code=200, headers=None): self.body = response self.status_code = status_code headers = headers if headers else {'content-type': 'application/json'} self.headers = requests.structures.CaseInsensitiveDict(headers) def json(self): return self.body class TestComponent(base.TestCase): class ExampleComponent(resource._BaseComponent): key = "_example" # Since we're testing ExampleComponent, which is as isolated as we # can test _BaseComponent due to it's needing to be a data member # of a class that has an attribute on the parent class named `key`, # each test has to implement a class with a name that is the same # as ExampleComponent.key, which should be a dict containing the # keys and values to test against. def test_implementations(self): self.assertEqual("_body", resource.Body.key) self.assertEqual("_header", resource.Header.key) self.assertEqual("_uri", resource.URI.key) def test_creation(self): sot = resource._BaseComponent( "name", type=int, default=1, alternate_id=True, aka="alias" ) self.assertEqual("name", sot.name) self.assertEqual(int, sot.type) self.assertEqual(1, sot.default) self.assertEqual("alias", sot.aka) self.assertTrue(sot.alternate_id) def test_get_no_instance(self): sot = resource._BaseComponent("test") # Test that we short-circuit everything when given no instance. result = sot.__get__(None, None) self.assertIs(sot, result) # NOTE: Some tests will use a default=1 setting when testing result # values that should be None because the default-for-default is also None. def test_get_name_None(self): name = "name" class Parent: _example = {name: None} instance = Parent() sot = TestComponent.ExampleComponent(name, default=1) # Test that we short-circuit any typing of a None value. result = sot.__get__(instance, None) self.assertIsNone(result) def test_get_default(self): expected_result = 123 class Parent: _example = {} instance = Parent() # NOTE: type=dict but the default value is an int. If we didn't # short-circuit the typing part of __get__ it would fail. sot = TestComponent.ExampleComponent( "name", type=dict, default=expected_result ) # Test that we directly return any default value. result = sot.__get__(instance, None) self.assertEqual(expected_result, result) def test_get_name_untyped(self): name = "name" expected_result = 123 class Parent: _example = {name: expected_result} instance = Parent() sot = TestComponent.ExampleComponent("name") # Test that we return any the value as it is set. result = sot.__get__(instance, None) self.assertEqual(expected_result, result) # The code path for typing after a raw value has been found is the same. def test_get_name_typed(self): name = "name" value = "123" class Parent: _example = {name: value} instance = Parent() sot = TestComponent.ExampleComponent("name", type=int) # Test that we run the underlying value through type conversion. result = sot.__get__(instance, None) self.assertEqual(int(value), result) def test_get_name_formatter(self): name = "name" value = "123" expected_result = "one hundred twenty three" class Parent: _example = {name: value} class FakeFormatter(format.Formatter): @classmethod def deserialize(cls, value): return expected_result instance = Parent() sot = TestComponent.ExampleComponent("name", type=FakeFormatter) # Mock out issubclass rather than having an actual format.Formatter # This can't be mocked via decorator, isolate it to wrapping the call. result = sot.__get__(instance, None) self.assertEqual(expected_result, result) def test_set_name_untyped(self): name = "name" expected_value = "123" class Parent: _example = {} instance = Parent() sot = TestComponent.ExampleComponent("name") # Test that we don't run the value through type conversion. sot.__set__(instance, expected_value) self.assertEqual(expected_value, instance._example[name]) def test_set_name_typed(self): expected_value = "123" class Parent: _example = {} instance = Parent() # The type we give to ExampleComponent has to be an actual type, # not an instance, so we can't get the niceties of a mock.Mock # instance that would allow us to call `assert_called_once_with` to # ensure that we're sending the value through the type. # Instead, we use this tiny version of a similar thing. class FakeType: calls = [] def __init__(self, arg): FakeType.calls.append(arg) sot = TestComponent.ExampleComponent("name", type=FakeType) # Test that we run the value through type conversion. sot.__set__(instance, expected_value) self.assertEqual([expected_value], FakeType.calls) def test_set_name_formatter(self): expected_value = "123" class Parent: _example = {} instance = Parent() # As with test_set_name_typed, create a pseudo-Mock to track what # gets called on the type. class FakeFormatter(format.Formatter): calls = [] @classmethod def deserialize(cls, arg): FakeFormatter.calls.append(arg) sot = TestComponent.ExampleComponent("name", type=FakeFormatter) # Test that we run the value through type conversion. sot.__set__(instance, expected_value) self.assertEqual([expected_value], FakeFormatter.calls) def test_delete_name(self): name = "name" expected_value = "123" class Parent: _example = {name: expected_value} instance = Parent() sot = TestComponent.ExampleComponent("name") sot.__delete__(instance) self.assertNotIn(name, instance._example) def test_delete_name_doesnt_exist(self): name = "name" expected_value = "123" class Parent: _example = {"what": expected_value} instance = Parent() sot = TestComponent.ExampleComponent(name) sot.__delete__(instance) self.assertNotIn(name, instance._example) class TestComponentManager(base.TestCase): def test_create_basic(self): sot = resource._ComponentManager() self.assertEqual(dict(), sot.attributes) self.assertEqual(set(), sot._dirty) def test_create_unsynced(self): attrs = {"hey": 1, "hi": 2, "hello": 3} sync = False sot = resource._ComponentManager(attributes=attrs, synchronized=sync) self.assertEqual(attrs, sot.attributes) self.assertEqual(set(attrs.keys()), sot._dirty) def test_create_synced(self): attrs = {"hey": 1, "hi": 2, "hello": 3} sync = True sot = resource._ComponentManager(attributes=attrs, synchronized=sync) self.assertEqual(attrs, sot.attributes) self.assertEqual(set(), sot._dirty) def test_getitem(self): key = "key" value = "value" attrs = {key: value} sot = resource._ComponentManager(attributes=attrs) self.assertEqual(value, sot.__getitem__(key)) def test_setitem_new(self): key = "key" value = "value" sot = resource._ComponentManager() sot.__setitem__(key, value) self.assertIn(key, sot.attributes) self.assertIn(key, sot.dirty) def test_setitem_unchanged(self): key = "key" value = "value" attrs = {key: value} sot = resource._ComponentManager(attributes=attrs, synchronized=True) # This shouldn't end up in the dirty list since we're just re-setting. sot.__setitem__(key, value) self.assertEqual(value, sot.attributes[key]) self.assertNotIn(key, sot.dirty) def test_delitem(self): key = "key" value = "value" attrs = {key: value} sot = resource._ComponentManager(attributes=attrs, synchronized=True) sot.__delitem__(key) self.assertIsNone(sot.dirty[key]) def test_iter(self): attrs = {"key": "value"} sot = resource._ComponentManager(attributes=attrs) self.assertCountEqual(iter(attrs), sot.__iter__()) def test_len(self): attrs = {"key": "value"} sot = resource._ComponentManager(attributes=attrs) self.assertEqual(len(attrs), sot.__len__()) def test_dirty(self): key = "key" key2 = "key2" value = "value" attrs = {key: value} sot = resource._ComponentManager(attributes=attrs, synchronized=False) self.assertEqual({key: value}, sot.dirty) sot.__setitem__(key2, value) self.assertEqual({key: value, key2: value}, sot.dirty) def test_clean(self): key = "key" value = "value" attrs = {key: value} sot = resource._ComponentManager(attributes=attrs, synchronized=False) self.assertEqual(attrs, sot.dirty) sot.clean() self.assertEqual(dict(), sot.dirty) class Test_Request(base.TestCase): def test_create(self): uri = 1 body = 2 headers = 3 sot = resource._Request(uri, body, headers) self.assertEqual(uri, sot.url) self.assertEqual(body, sot.body) self.assertEqual(headers, sot.headers) class TestQueryParameters(base.TestCase): def test_create(self): location = "location" mapping = { "first_name": "first-name", "second_name": {"name": "second-name"}, "third_name": {"name": "third", "type": int}, } sot = resource.QueryParameters(location, **mapping) self.assertEqual( { "location": "location", "first_name": "first-name", "second_name": {"name": "second-name"}, "third_name": {"name": "third", "type": int}, "limit": "limit", "marker": "marker", }, sot._mapping, ) def test_transpose_unmapped(self): def _type(value, rtype): self.assertIs(rtype, mock.sentinel.resource_type) return value * 10 location = "location" mapping = { "first_name": "first-name", "pet_name": {"name": "pet"}, "answer": {"name": "answer", "type": int}, "complex": {"type": _type}, } sot = resource.QueryParameters(location, **mapping) result = sot._transpose( { "location": "Brooklyn", "first_name": "Brian", "pet_name": "Meow", "answer": "42", "last_name": "Curtin", "complex": 1, }, mock.sentinel.resource_type, ) # last_name isn't mapped and shouldn't be included self.assertEqual( { "location": "Brooklyn", "first-name": "Brian", "pet": "Meow", "answer": 42, "complex": 10, }, result, ) def test_transpose_not_in_query(self): location = "location" mapping = { "first_name": "first-name", "pet_name": {"name": "pet"}, "answer": {"name": "answer", "type": int}, } sot = resource.QueryParameters(location, **mapping) result = sot._transpose( {"location": "Brooklyn"}, mock.sentinel.resource_type ) # first_name not being in the query shouldn't affect results self.assertEqual({"location": "Brooklyn"}, result) class TestResource(base.TestCase): def test_initialize_basic(self): body = {"body": 1} header = {"header": 2, "Location": "somewhere"} uri = {"uri": 3} computed = {"computed": 4} everything = dict( itertools.chain( body.items(), header.items(), uri.items(), computed.items(), ) ) mock_collect = mock.Mock() mock_collect.return_value = body, header, uri, computed with mock.patch.object( resource.Resource, "_collect_attrs", mock_collect ): sot = resource.Resource(_synchronized=False, **everything) mock_collect.assert_called_once_with(everything) self.assertIsNone(sot.location) self.assertIsInstance(sot._body, resource._ComponentManager) self.assertEqual(body, sot._body.dirty) self.assertIsInstance(sot._header, resource._ComponentManager) self.assertEqual(header, sot._header.dirty) self.assertIsInstance(sot._uri, resource._ComponentManager) self.assertEqual(uri, sot._uri.dirty) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) self.assertFalse(sot.allow_head) self.assertEqual('PUT', sot.commit_method) self.assertEqual('POST', sot.create_method) def test_repr(self): a = {"a": 1} b = {"b": 2} c = {"c": 3} d = {"d": 4} class Test(resource.Resource): def __init__(self): self._body = mock.Mock() self._body.attributes.items = mock.Mock(return_value=a.items()) self._header = mock.Mock() self._header.attributes.items = mock.Mock( return_value=b.items() ) self._uri = mock.Mock() self._uri.attributes.items = mock.Mock(return_value=c.items()) self._computed = mock.Mock() self._computed.attributes.items = mock.Mock( return_value=d.items() ) the_repr = repr(Test()) # Don't test the arguments all together since the dictionary order # they're rendered in can't be depended on, nor does it matter. self.assertIn("openstack.tests.unit.test_resource.Test", the_repr) self.assertIn("a=1", the_repr) self.assertIn("b=2", the_repr) self.assertIn("c=3", the_repr) self.assertIn("d=4", the_repr) def test_equality(self): class Example(resource.Resource): x = resource.Body("x") y = resource.Header("y") z = resource.URI("z") e1 = Example(x=1, y=2, z=3) e2 = Example(x=1, y=2, z=3) e3 = Example(x=0, y=0, z=0) self.assertEqual(e1, e2) self.assertNotEqual(e1, e3) self.assertNotEqual(e1, None) def test__update(self): sot = resource.Resource() body = "body" header = "header" uri = "uri" computed = "computed" sot._collect_attrs = mock.Mock( return_value=(body, header, uri, computed) ) sot._body.update = mock.Mock() sot._header.update = mock.Mock() sot._uri.update = mock.Mock() sot._computed.update = mock.Mock() args = {"arg": 1} sot._update(**args) sot._collect_attrs.assert_called_once_with(args) sot._body.update.assert_called_once_with(body) sot._header.update.assert_called_once_with(header) sot._uri.update.assert_called_once_with(uri) sot._computed.update.assert_called_with(computed) def test__consume_attrs(self): serverside_key1 = "someKey1" clientside_key1 = "some_key1" serverside_key2 = "someKey2" clientside_key2 = "some_key2" value1 = "value1" value2 = "value2" mapping = { serverside_key1: clientside_key1, serverside_key2: clientside_key2, } other_key = "otherKey" other_value = "other" attrs = { clientside_key1: value1, serverside_key2: value2, other_key: other_value, } sot = resource.Resource() result = sot._consume_attrs(mapping, attrs) # Make sure that the expected key was consumed and we're only # left with the other stuff. self.assertDictEqual({other_key: other_value}, attrs) # Make sure that after we've popped our relevant client-side # key off that we are returning it keyed off of its server-side # name. self.assertDictEqual( {serverside_key1: value1, serverside_key2: value2}, result ) def test__mapping_defaults(self): # Check that even on an empty class, we get the expected # built-in attributes. self.assertIn("location", resource.Resource._computed_mapping()) self.assertIn("name", resource.Resource._body_mapping()) self.assertIn("id", resource.Resource._body_mapping()) def test__mapping_overrides(self): # Iterating through the MRO used to wipe out overrides of mappings # found in base classes. new_name = "MyName" new_id = "MyID" class Test(resource.Resource): name = resource.Body(new_name) id = resource.Body(new_id) mapping = Test._body_mapping() self.assertEqual("name", mapping["MyName"]) self.assertEqual("id", mapping["MyID"]) def test__body_mapping(self): class Test(resource.Resource): x = resource.Body("x") y = resource.Body("y") z = resource.Body("z") self.assertIn("x", Test._body_mapping()) self.assertIn("y", Test._body_mapping()) self.assertIn("z", Test._body_mapping()) def test__header_mapping(self): class Test(resource.Resource): x = resource.Header("x") y = resource.Header("y") z = resource.Header("z") self.assertIn("x", Test._header_mapping()) self.assertIn("y", Test._header_mapping()) self.assertIn("z", Test._header_mapping()) def test__uri_mapping(self): class Test(resource.Resource): x = resource.URI("x") y = resource.URI("y") z = resource.URI("z") self.assertIn("x", Test._uri_mapping()) self.assertIn("y", Test._uri_mapping()) self.assertIn("z", Test._uri_mapping()) def test__getattribute__id_in_body(self): id = "lol" sot = resource.Resource(id=id) result = getattr(sot, "id") self.assertEqual(result, id) def test__getattribute__id_with_alternate(self): id = "lol" class Test(resource.Resource): blah = resource.Body("blah", alternate_id=True) sot = Test(blah=id) result = getattr(sot, "id") self.assertEqual(result, id) def test__getattribute__id_without_alternate(self): class Test(resource.Resource): id = None sot = Test() self.assertIsNone(sot.id) def test__alternate_id_None(self): self.assertEqual("", resource.Resource._alternate_id()) def test__alternate_id(self): class Test(resource.Resource): alt = resource.Body("the_alt", alternate_id=True) self.assertEqual("the_alt", Test._alternate_id()) value1 = "lol" sot = Test(alt=value1) self.assertEqual(sot.alt, value1) self.assertEqual(sot.id, value1) value2 = "rofl" sot = Test(the_alt=value2) self.assertEqual(sot.alt, value2) self.assertEqual(sot.id, value2) def test__alternate_id_from_other_property(self): class Test(resource.Resource): foo = resource.Body("foo") bar = resource.Body("bar", alternate_id=True) # NOTE(redrobot): My expectation looking at the Test class defined # in this test is that because the alternate_id parameter is # is being set to True on the "bar" property of the Test class, # then the _alternate_id() method should return the name of that "bar" # property. self.assertEqual("bar", Test._alternate_id()) sot = Test(bar='bunnies') self.assertEqual(sot.id, 'bunnies') self.assertEqual(sot.bar, 'bunnies') sot = Test(id='chickens', bar='bunnies') self.assertEqual(sot.id, 'chickens') self.assertEqual(sot.bar, 'bunnies') def test__get_id_instance(self): class Test(resource.Resource): id = resource.Body("id") value = "id" sot = Test(id=value) self.assertEqual(value, sot._get_id(sot)) def test__get_id_instance_alternate(self): class Test(resource.Resource): attr = resource.Body("attr", alternate_id=True) value = "id" sot = Test(attr=value) self.assertEqual(value, sot._get_id(sot)) def test__get_id_value(self): value = "id" self.assertEqual(value, resource.Resource._get_id(value)) def test__attributes(self): class Test(resource.Resource): foo = resource.Header('foo') bar = resource.Body('bar', aka='_bar') bar_local = resource.Body('bar_remote') sot = Test() self.assertEqual( sorted( ['foo', 'bar', '_bar', 'bar_local', 'id', 'name', 'location'] ), sorted(sot._attributes()), ) self.assertEqual( sorted(['foo', 'bar', 'bar_local', 'id', 'name', 'location']), sorted(sot._attributes(include_aliases=False)), ) self.assertEqual( sorted( ['foo', 'bar', '_bar', 'bar_remote', 'id', 'name', 'location'] ), sorted(sot._attributes(remote_names=True)), ) self.assertEqual( sorted(['bar', '_bar', 'bar_local', 'id', 'name', 'location']), sorted( sot._attributes( components=tuple([resource.Body, resource.Computed]) ) ), ) self.assertEqual( ('foo',), tuple(sot._attributes(components=tuple([resource.Header]))), ) def test__attributes_iterator(self): class Parent(resource.Resource): foo = resource.Header('foo') bar = resource.Body('bar', aka='_bar') class Child(Parent): foo1 = resource.Header('foo1') bar1 = resource.Body('bar1') sot = Child() expected = ['foo', 'bar', 'foo1', 'bar1'] for attr, component in sot._attributes_iterator(): if attr in expected: expected.remove(attr) self.assertEqual([], expected) expected = ['foo', 'foo1'] # Check we iterate only over headers for attr, component in sot._attributes_iterator( components=tuple([resource.Header]) ): if attr in expected: expected.remove(attr) self.assertEqual([], expected) def test_to_dict(self): class Test(resource.Resource): foo = resource.Header('foo') bar = resource.Body('bar', aka='_bar') res = Test(id='FAKE_ID') expected = { 'id': 'FAKE_ID', 'name': None, 'location': None, 'foo': None, 'bar': None, '_bar': None, } self.assertEqual(expected, res.to_dict()) def test_to_dict_nested(self): class Test(resource.Resource): foo = resource.Header('foo') bar = resource.Body('bar') a_list = resource.Body('a_list') class Sub(resource.Resource): sub = resource.Body('foo') sub = Sub(id='ANOTHER_ID', foo='bar') res = Test(id='FAKE_ID', bar=sub, a_list=[sub]) expected = { 'id': 'FAKE_ID', 'name': None, 'location': None, 'foo': None, 'bar': { 'id': 'ANOTHER_ID', 'name': None, 'sub': 'bar', 'location': None, }, 'a_list': [ { 'id': 'ANOTHER_ID', 'name': None, 'sub': 'bar', 'location': None, } ], } self.assertEqual(expected, res.to_dict()) a_munch = res.to_dict(_to_munch=True) self.assertEqual(a_munch.bar.id, 'ANOTHER_ID') self.assertEqual(a_munch.bar.sub, 'bar') self.assertEqual(a_munch.a_list[0].id, 'ANOTHER_ID') self.assertEqual(a_munch.a_list[0].sub, 'bar') def test_to_dict_no_body(self): class Test(resource.Resource): foo = resource.Header('foo') bar = resource.Body('bar') res = Test(id='FAKE_ID') expected = { 'location': None, 'foo': None, } self.assertEqual(expected, res.to_dict(body=False)) def test_to_dict_no_header(self): class Test(resource.Resource): foo = resource.Header('foo') bar = resource.Body('bar') res = Test(id='FAKE_ID') expected = { 'id': 'FAKE_ID', 'name': None, 'bar': None, 'location': None, } self.assertEqual(expected, res.to_dict(headers=False)) def test_to_dict_ignore_none(self): class Test(resource.Resource): foo = resource.Header('foo') bar = resource.Body('bar') res = Test(id='FAKE_ID', bar='BAR') expected = { 'id': 'FAKE_ID', 'bar': 'BAR', } self.assertEqual(expected, res.to_dict(ignore_none=True)) def test_to_dict_with_mro(self): class Parent(resource.Resource): foo = resource.Header('foo') bar = resource.Body('bar', aka='_bar') class Child(Parent): foo_new = resource.Header('foo_baz_server') bar_new = resource.Body('bar_baz_server') res = Child(id='FAKE_ID', bar='test') expected = { 'foo': None, 'bar': 'test', '_bar': 'test', 'foo_new': None, 'bar_new': None, 'id': 'FAKE_ID', 'location': None, 'name': None, } self.assertEqual(expected, res.to_dict()) def test_to_dict_with_unknown_attrs_in_body(self): class Test(resource.Resource): foo = resource.Body('foo') _allow_unknown_attrs_in_body = True res = Test(id='FAKE_ID', foo='FOO', bar='BAR') expected = { 'id': 'FAKE_ID', 'name': None, 'location': None, 'foo': 'FOO', 'bar': 'BAR', } self.assertEqual(expected, res.to_dict()) def test_json_dumps_from_resource(self): class Test(resource.Resource): foo = resource.Body('foo_remote') res = Test(foo='bar') expected = '{"foo": "bar", "id": null, "location": null, "name": null}' actual = json.dumps(res, sort_keys=True) self.assertEqual(expected, actual) response = FakeResponse({'foo': 'new_bar'}) res._translate_response(response) expected = ( '{"foo": "new_bar", "id": null, "location": null, "name": null}' ) actual = json.dumps(res, sort_keys=True) self.assertEqual(expected, actual) def test_items(self): class Test(resource.Resource): foo = resource.Body('foo') bar = resource.Body('bar') foot = resource.Body('foot') data = {'foo': 'bar', 'bar': 'foo\n', 'foot': 'a:b:c:d'} res = Test(**data) for k, v in res.items(): expected = data.get(k) if expected: self.assertEqual(v, expected) def test_access_by_aka(self): class Test(resource.Resource): foo = resource.Header('foo_remote', aka='foo_alias') res = Test(foo='bar', name='test') self.assertEqual('bar', res['foo_alias']) self.assertEqual('bar', res.foo_alias) self.assertTrue('foo' in res.keys()) self.assertTrue('foo_alias' in res.keys()) expected = utils.Munch( { 'id': None, 'name': 'test', 'location': None, 'foo': 'bar', 'foo_alias': 'bar', } ) actual = utils.Munch(res) self.assertEqual(expected, actual) self.assertEqual(expected, res.toDict()) self.assertEqual(expected, res.to_dict()) self.assertDictEqual(expected, res) self.assertDictEqual(expected, dict(res)) def test_access_by_resource_name(self): class Test(resource.Resource): blah = resource.Body("blah_resource") sot = Test(blah='dummy') result = sot["blah_resource"] self.assertEqual(result, sot.blah) def test_to_dict_value_error(self): class Test(resource.Resource): foo = resource.Header('foo') bar = resource.Body('bar') res = Test(id='FAKE_ID') err = self.assertRaises( ValueError, res.to_dict, body=False, headers=False, computed=False ) self.assertEqual( 'At least one of `body`, `headers` or `computed` must be True', str(err), ) def test_to_dict_with_mro_no_override(self): class Parent(resource.Resource): header = resource.Header('HEADER') body = resource.Body('BODY') class Child(Parent): # The following two properties are not supposed to be overridden # by the parent class property values. header = resource.Header('ANOTHER_HEADER') body = resource.Body('ANOTHER_BODY') res = Child(id='FAKE_ID', body='BODY_VALUE', header='HEADER_VALUE') expected = { 'body': 'BODY_VALUE', 'header': 'HEADER_VALUE', 'id': 'FAKE_ID', 'location': None, 'name': None, } self.assertEqual(expected, res.to_dict()) def test_new(self): class Test(resource.Resource): attr = resource.Body("attr") value = "value" sot = Test.new(attr=value) self.assertIn("attr", sot._body.dirty) self.assertEqual(value, sot.attr) def test_existing(self): class Test(resource.Resource): attr = resource.Body("attr") value = "value" sot = Test.existing(attr=value) self.assertNotIn("attr", sot._body.dirty) self.assertEqual(value, sot.attr) def test_from_munch_new(self): class Test(resource.Resource): attr = resource.Body("body_attr") value = "value" orig = utils.Munch(body_attr=value) sot = Test._from_munch(orig, synchronized=False) self.assertIn("body_attr", sot._body.dirty) self.assertEqual(value, sot.attr) def test_from_munch_existing(self): class Test(resource.Resource): attr = resource.Body("body_attr") value = "value" orig = utils.Munch(body_attr=value) sot = Test._from_munch(orig) self.assertNotIn("body_attr", sot._body.dirty) self.assertEqual(value, sot.attr) def test__prepare_request_with_id(self): class Test(resource.Resource): base_path = "/something" body_attr = resource.Body("x") header_attr = resource.Header("y") the_id = "id" body_value = "body" header_value = "header" sot = Test( id=the_id, body_attr=body_value, header_attr=header_value, _synchronized=False, ) result = sot._prepare_request(requires_id=True) self.assertEqual("something/id", result.url) self.assertEqual({"x": body_value, "id": the_id}, result.body) self.assertEqual({"y": header_value}, result.headers) def test__prepare_request_with_id_marked_clean(self): class Test(resource.Resource): base_path = "/something" body_attr = resource.Body("x") header_attr = resource.Header("y") the_id = "id" body_value = "body" header_value = "header" sot = Test( id=the_id, body_attr=body_value, header_attr=header_value, _synchronized=False, ) sot._body._dirty.discard("id") result = sot._prepare_request(requires_id=True) self.assertEqual("something/id", result.url) self.assertEqual({"x": body_value}, result.body) self.assertEqual({"y": header_value}, result.headers) def test__prepare_request_missing_id(self): sot = resource.Resource(id=None) self.assertRaises( exceptions.InvalidRequest, sot._prepare_request, requires_id=True ) def test__prepare_request_with_resource_key(self): key = "key" class Test(resource.Resource): base_path = "/something" resource_key = key body_attr = resource.Body("x") header_attr = resource.Header("y") body_value = "body" header_value = "header" sot = Test( body_attr=body_value, header_attr=header_value, _synchronized=False ) result = sot._prepare_request(requires_id=False, prepend_key=True) self.assertEqual("/something", result.url) self.assertEqual({key: {"x": body_value}}, result.body) self.assertEqual({"y": header_value}, result.headers) def test__prepare_request_with_override_key(self): default_key = "key" override_key = "other_key" class Test(resource.Resource): base_path = "/something" resource_key = default_key body_attr = resource.Body("x") header_attr = resource.Header("y") body_value = "body" header_value = "header" sot = Test( body_attr=body_value, header_attr=header_value, _synchronized=False ) result = sot._prepare_request( requires_id=False, prepend_key=True, resource_request_key=override_key, ) self.assertEqual("/something", result.url) self.assertEqual({override_key: {"x": body_value}}, result.body) self.assertEqual({"y": header_value}, result.headers) def test__prepare_request_with_patch(self): class Test(resource.Resource): commit_jsonpatch = True base_path = "/something" x = resource.Body("x") y = resource.Body("y") the_id = "id" sot = Test.existing(id=the_id, x=1, y=2) sot.x = 3 result = sot._prepare_request(requires_id=True, patch=True) self.assertEqual("something/id", result.url) self.assertEqual( [{'op': 'replace', 'path': '/x', 'value': 3}], result.body ) def test__prepare_request_with_patch_not_synchronized(self): class Test(resource.Resource): commit_jsonpatch = True base_path = "/something" x = resource.Body("x") y = resource.Body("y") the_id = "id" sot = Test.new(id=the_id, x=1) result = sot._prepare_request(requires_id=True, patch=True) self.assertEqual("something/id", result.url) self.assertEqual( [{'op': 'add', 'path': '/x', 'value': 1}], result.body ) def test__prepare_request_with_patch_params(self): class Test(resource.Resource): commit_jsonpatch = True base_path = "/something" x = resource.Body("x") y = resource.Body("y") the_id = "id" sot = Test.existing(id=the_id, x=1, y=2) sot.x = 3 params = [('foo', 'bar'), ('life', 42)] result = sot._prepare_request( requires_id=True, patch=True, params=params ) self.assertEqual("something/id?foo=bar&life=42", result.url) self.assertEqual( [{'op': 'replace', 'path': '/x', 'value': 3}], result.body ) def test__translate_response_no_body(self): class Test(resource.Resource): attr = resource.Header("attr") response = FakeResponse({}, headers={"attr": "value"}) sot = Test() sot._translate_response(response, has_body=False) self.assertEqual(dict(), sot._header.dirty) self.assertEqual("value", sot.attr) def test__translate_response_with_body_no_resource_key(self): class Test(resource.Resource): attr = resource.Body("attr") body = {"attr": "value"} response = FakeResponse(body) sot = Test() sot._filter_component = mock.Mock(side_effect=[body, dict()]) sot._translate_response(response, has_body=True) self.assertEqual("value", sot.attr) self.assertEqual(dict(), sot._body.dirty) self.assertEqual(dict(), sot._header.dirty) def test__translate_response_with_body_with_resource_key(self): key = "key" class Test(resource.Resource): resource_key = key attr = resource.Body("attr") body = {"attr": "value"} response = FakeResponse({key: body}) sot = Test() sot._filter_component = mock.Mock(side_effect=[body, dict()]) sot._translate_response(response, has_body=True) self.assertEqual("value", sot.attr) self.assertEqual(dict(), sot._body.dirty) self.assertEqual(dict(), sot._header.dirty) def test_cant_do_anything(self): class Test(resource.Resource): allow_create = False allow_fetch = False allow_commit = False allow_delete = False allow_head = False allow_list = False sot = Test() # The first argument to all of these operations is the session, # but we raise before we get to it so just pass anything in. self.assertRaises(exceptions.MethodNotSupported, sot.create, "") self.assertRaises(exceptions.MethodNotSupported, sot.fetch, "") self.assertRaises(exceptions.MethodNotSupported, sot.delete, "") self.assertRaises(exceptions.MethodNotSupported, sot.head, "") # list is a generator so you need to begin consuming # it in order to exercise the failure. the_list = sot.list("") self.assertRaises(exceptions.MethodNotSupported, next, the_list) # Update checks the dirty list first before even trying to see # if the call can be made, so fake a dirty list. sot._body = mock.Mock() sot._body.dirty = mock.Mock(return_value={"x": "y"}) self.assertRaises(exceptions.MethodNotSupported, sot.commit, "") def test_unknown_attrs_under_props_create(self): class Test(resource.Resource): properties = resource.Body("properties") _store_unknown_attrs_as_properties = True sot = Test.new( **{ 'dummy': 'value', } ) self.assertDictEqual({'dummy': 'value'}, sot.properties) self.assertDictEqual({'dummy': 'value'}, sot.to_dict()['properties']) self.assertDictEqual({'dummy': 'value'}, sot['properties']) self.assertEqual('value', sot['properties']['dummy']) sot = Test.new(**{'dummy': 'value', 'properties': 'a,b,c'}) self.assertDictEqual( {'dummy': 'value', 'properties': 'a,b,c'}, sot.properties ) self.assertDictEqual( {'dummy': 'value', 'properties': 'a,b,c'}, sot.to_dict()['properties'], ) sot = Test.new(**{'properties': None}) self.assertIsNone(sot.properties) self.assertIsNone(sot.to_dict()['properties']) def test_unknown_attrs_not_stored(self): class Test(resource.Resource): properties = resource.Body("properties") sot = Test.new( **{ 'dummy': 'value', } ) self.assertIsNone(sot.properties) def test_unknown_attrs_not_stored1(self): class Test(resource.Resource): _store_unknown_attrs_as_properties = True sot = Test.new( **{ 'dummy': 'value', } ) self.assertRaises(KeyError, sot.__getitem__, 'properties') def test_unknown_attrs_under_props_set(self): class Test(resource.Resource): properties = resource.Body("properties") _store_unknown_attrs_as_properties = True sot = Test.new( **{ 'dummy': 'value', } ) sot['properties'] = {'dummy': 'new_value'} self.assertEqual('new_value', sot['properties']['dummy']) sot.properties = {'dummy': 'new_value1'} self.assertEqual('new_value1', sot['properties']['dummy']) def test_unknown_attrs_prepare_request_unpacked(self): class Test(resource.Resource): properties = resource.Body("properties") _store_unknown_attrs_as_properties = True # Unknown attribute given as root attribute sot = Test.new(**{'dummy': 'value', 'properties': 'a,b,c'}) request_body = sot._prepare_request(requires_id=False).body self.assertEqual('value', request_body['dummy']) self.assertEqual('a,b,c', request_body['properties']) # properties are already a dict sot = Test.new( **{'properties': {'properties': 'a,b,c', 'dummy': 'value'}} ) request_body = sot._prepare_request(requires_id=False).body self.assertEqual('value', request_body['dummy']) self.assertEqual('a,b,c', request_body['properties']) def test_unknown_attrs_prepare_request_no_unpack_dict(self): # if props type is not None - ensure no unpacking is done class Test(resource.Resource): properties = resource.Body("properties", type=dict) sot = Test.new( **{'properties': {'properties': 'a,b,c', 'dummy': 'value'}} ) request_body = sot._prepare_request(requires_id=False).body self.assertDictEqual( {'dummy': 'value', 'properties': 'a,b,c'}, request_body['properties'], ) def test_unknown_attrs_prepare_request_patch_unpacked(self): class Test(resource.Resource): properties = resource.Body("properties") _store_unknown_attrs_as_properties = True commit_jsonpatch = True sot = Test.existing(**{'dummy': 'value', 'properties': 'a,b,c'}) sot._update(**{'properties': {'dummy': 'new_value'}}) request_body = sot._prepare_request(requires_id=False, patch=True).body self.assertDictEqual( {'path': '/dummy', 'value': 'new_value', 'op': 'replace'}, request_body[0], ) def test_unknown_attrs_under_props_translate_response(self): class Test(resource.Resource): properties = resource.Body("properties") _store_unknown_attrs_as_properties = True body = {'dummy': 'value', 'properties': 'a,b,c'} response = FakeResponse(body) sot = Test() sot._translate_response(response, has_body=True) self.assertDictEqual( {'dummy': 'value', 'properties': 'a,b,c'}, sot.properties ) def test_unknown_attrs_in_body_create(self): class Test(resource.Resource): known_param = resource.Body("known_param") _allow_unknown_attrs_in_body = True sot = Test.new(**{'known_param': 'v1', 'unknown_param': 'v2'}) self.assertEqual('v1', sot.known_param) self.assertEqual('v2', sot.unknown_param) def test_unknown_attrs_in_body_not_stored(self): class Test(resource.Resource): known_param = resource.Body("known_param") properties = resource.Body("properties") sot = Test.new(**{'known_param': 'v1', 'unknown_param': 'v2'}) self.assertEqual('v1', sot.known_param) self.assertNotIn('unknown_param', sot) def test_unknown_attrs_in_body_set(self): class Test(resource.Resource): known_param = resource.Body("known_param") _allow_unknown_attrs_in_body = True sot = Test.new( **{ 'known_param': 'v1', } ) sot['unknown_param'] = 'v2' self.assertEqual('v1', sot.known_param) self.assertEqual('v2', sot.unknown_param) def test_unknown_attrs_in_body_not_allowed_to_set(self): class Test(resource.Resource): known_param = resource.Body("known_param") _allow_unknown_attrs_in_body = False sot = Test.new( **{ 'known_param': 'v1', } ) try: sot['unknown_param'] = 'v2' except KeyError: self.assertEqual('v1', sot.known_param) self.assertNotIn('unknown_param', sot) return self.fail( "Parameter 'unknown_param' unexpectedly set through the " "dict interface" ) def test_unknown_attrs_in_body_translate_response(self): class Test(resource.Resource): known_param = resource.Body("known_param") _allow_unknown_attrs_in_body = True body = {'known_param': 'v1', 'unknown_param': 'v2'} response = FakeResponse(body) sot = Test() sot._translate_response(response, has_body=True) self.assertEqual('v1', sot.known_param) self.assertEqual('v2', sot.unknown_param) def test_unknown_attrs_not_in_body_translate_response(self): class Test(resource.Resource): known_param = resource.Body("known_param") _allow_unknown_attrs_in_body = False body = {'known_param': 'v1', 'unknown_param': 'v2'} response = FakeResponse(body) sot = Test() sot._translate_response(response, has_body=True) self.assertEqual('v1', sot.known_param) self.assertNotIn('unknown_param', sot) class TestResourceActions(base.TestCase): def setUp(self): super().setUp() self.service_name = "service" self.base_path = "base_path" class Test(resource.Resource): service = self.service_name base_path = self.base_path resources_key = 'resources' allow_create = True allow_fetch = True allow_head = True allow_commit = True allow_delete = True allow_list = True self.test_class = Test self.request = mock.Mock(spec=resource._Request) self.request.url = "uri" self.request.body = "body" self.request.headers = "headers" self.response = FakeResponse({}) self.sot = Test(id="id") self.sot._prepare_request = mock.Mock(return_value=self.request) self.sot._translate_response = mock.Mock() self.session = mock.Mock(spec=adapter.Adapter) self.session.create = mock.Mock(return_value=self.response) self.session.get = mock.Mock(return_value=self.response) self.session.put = mock.Mock(return_value=self.response) self.session.patch = mock.Mock(return_value=self.response) self.session.post = mock.Mock(return_value=self.response) self.session.delete = mock.Mock(return_value=self.response) self.session.head = mock.Mock(return_value=self.response) self.session.session = self.session self.session._get_connection = mock.Mock(return_value=self.cloud) self.session.default_microversion = None self.session.retriable_status_codes = None self.endpoint_data = mock.Mock( max_microversion='1.99', min_microversion=None ) self.session.get_endpoint_data.return_value = self.endpoint_data def _test_create( self, cls, requires_id=False, prepend_key=False, microversion=None, base_path=None, params=None, id_marked_dirty=True, explicit_microversion=None, resource_request_key=None, resource_response_key=None, ): id = "id" if requires_id else None sot = cls(id=id) sot._prepare_request = mock.Mock(return_value=self.request) sot._translate_response = mock.Mock() params = params or {} kwargs = params.copy() if explicit_microversion is not None: kwargs['microversion'] = explicit_microversion microversion = explicit_microversion result = sot.create( self.session, prepend_key=prepend_key, base_path=base_path, resource_request_key=resource_request_key, resource_response_key=resource_response_key, **kwargs, ) id_is_dirty = 'id' in sot._body._dirty self.assertEqual(id_marked_dirty, id_is_dirty) prepare_kwargs = {} if resource_request_key is not None: prepare_kwargs['resource_request_key'] = resource_request_key sot._prepare_request.assert_called_once_with( requires_id=requires_id, prepend_key=prepend_key, base_path=base_path, **prepare_kwargs, ) if requires_id: self.session.put.assert_called_once_with( self.request.url, json=self.request.body, headers=self.request.headers, microversion=microversion, params=params, ) else: self.session.post.assert_called_once_with( self.request.url, json=self.request.body, headers=self.request.headers, microversion=microversion, params=params, ) self.assertEqual(sot.microversion, microversion) res_kwargs = {} if resource_response_key is not None: res_kwargs['resource_response_key'] = resource_response_key sot._translate_response.assert_called_once_with( self.response, has_body=sot.has_body, **res_kwargs ) self.assertEqual(result, sot) def test_put_create(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path allow_create = True create_method = 'PUT' self._test_create(Test, requires_id=True, prepend_key=True) def test_put_create_exclude_id(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path allow_create = True create_method = 'PUT' create_exclude_id_from_body = True self._test_create( Test, requires_id=True, prepend_key=True, id_marked_dirty=False ) def test_put_create_with_microversion(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path allow_create = True create_method = 'PUT' _max_microversion = '1.42' self._test_create( Test, requires_id=True, prepend_key=True, microversion='1.42' ) def test_put_create_with_explicit_microversion(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path allow_create = True create_method = 'PUT' _max_microversion = '1.99' self._test_create( Test, requires_id=True, prepend_key=True, explicit_microversion='1.42', ) def test_put_create_with_params(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path allow_create = True create_method = 'PUT' self._test_create( Test, requires_id=True, prepend_key=True, params={'answer': 42} ) def test_post_create(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path allow_create = True create_method = 'POST' self._test_create(Test, requires_id=False, prepend_key=True) def test_post_create_override_request_key(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path allow_create = True create_method = 'POST' resource_key = 'SomeKey' self._test_create( Test, requires_id=False, prepend_key=True, resource_request_key="OtherKey", ) def test_post_create_override_response_key(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path allow_create = True create_method = 'POST' resource_key = 'SomeKey' self._test_create( Test, requires_id=False, prepend_key=True, resource_response_key="OtherKey", ) def test_post_create_override_key_both(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path allow_create = True create_method = 'POST' resource_key = 'SomeKey' self._test_create( Test, requires_id=False, prepend_key=True, resource_request_key="OtherKey", resource_response_key="SomeOtherKey", ) def test_post_create_base_path(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path allow_create = True create_method = 'POST' self._test_create( Test, requires_id=False, prepend_key=True, base_path='dummy' ) def test_post_create_with_params(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path allow_create = True create_method = 'POST' self._test_create( Test, requires_id=False, prepend_key=True, params={'answer': 42} ) def test_fetch(self): result = self.sot.fetch(self.session) self.sot._prepare_request.assert_called_once_with( requires_id=True, base_path=None ) self.session.get.assert_called_once_with( self.request.url, microversion=None, params={}, skip_cache=False ) self.assertIsNone(self.sot.microversion) self.sot._translate_response.assert_called_once_with(self.response) self.assertEqual(result, self.sot) def test_fetch_with_override_key(self): result = self.sot.fetch(self.session, resource_response_key="SomeKey") self.sot._prepare_request.assert_called_once_with( requires_id=True, base_path=None ) self.session.get.assert_called_once_with( self.request.url, microversion=None, params={}, skip_cache=False ) self.assertIsNone(self.sot.microversion) self.sot._translate_response.assert_called_once_with( self.response, resource_response_key="SomeKey" ) self.assertEqual(result, self.sot) def test_fetch_with_params(self): result = self.sot.fetch(self.session, fields='a,b') self.sot._prepare_request.assert_called_once_with( requires_id=True, base_path=None ) self.session.get.assert_called_once_with( self.request.url, microversion=None, params={'fields': 'a,b'}, skip_cache=False, ) self.assertIsNone(self.sot.microversion) self.sot._translate_response.assert_called_once_with(self.response) self.assertEqual(result, self.sot) def test_fetch_with_microversion(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path allow_fetch = True _max_microversion = '1.42' sot = Test(id='id') sot._prepare_request = mock.Mock(return_value=self.request) sot._translate_response = mock.Mock() result = sot.fetch(self.session) sot._prepare_request.assert_called_once_with( requires_id=True, base_path=None ) self.session.get.assert_called_once_with( self.request.url, microversion='1.42', params={}, skip_cache=False ) self.assertEqual(sot.microversion, '1.42') sot._translate_response.assert_called_once_with(self.response) self.assertEqual(result, sot) def test_fetch_with_explicit_microversion(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path allow_fetch = True _max_microversion = '1.99' sot = Test(id='id') sot._prepare_request = mock.Mock(return_value=self.request) sot._translate_response = mock.Mock() result = sot.fetch(self.session, microversion='1.42') sot._prepare_request.assert_called_once_with( requires_id=True, base_path=None ) self.session.get.assert_called_once_with( self.request.url, microversion='1.42', params={}, skip_cache=False ) self.assertEqual(sot.microversion, '1.42') sot._translate_response.assert_called_once_with(self.response) self.assertEqual(result, sot) def test_fetch_not_requires_id(self): result = self.sot.fetch(self.session, False) self.sot._prepare_request.assert_called_once_with( requires_id=False, base_path=None ) self.session.get.assert_called_once_with( self.request.url, microversion=None, params={}, skip_cache=False ) self.sot._translate_response.assert_called_once_with(self.response) self.assertEqual(result, self.sot) def test_fetch_base_path(self): result = self.sot.fetch(self.session, False, base_path='dummy') self.sot._prepare_request.assert_called_once_with( requires_id=False, base_path='dummy' ) self.session.get.assert_called_once_with( self.request.url, microversion=None, params={}, skip_cache=False ) self.sot._translate_response.assert_called_once_with(self.response) self.assertEqual(result, self.sot) def test_head(self): result = self.sot.head(self.session) self.sot._prepare_request.assert_called_once_with(base_path=None) self.session.head.assert_called_once_with( self.request.url, microversion=None ) self.assertIsNone(self.sot.microversion) self.sot._translate_response.assert_called_once_with( self.response, has_body=False ) self.assertEqual(result, self.sot) def test_head_base_path(self): result = self.sot.head(self.session, base_path='dummy') self.sot._prepare_request.assert_called_once_with(base_path='dummy') self.session.head.assert_called_once_with( self.request.url, microversion=None ) self.assertIsNone(self.sot.microversion) self.sot._translate_response.assert_called_once_with( self.response, has_body=False ) self.assertEqual(result, self.sot) def test_head_with_microversion(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path allow_head = True _max_microversion = '1.42' sot = Test(id='id') sot._prepare_request = mock.Mock(return_value=self.request) sot._translate_response = mock.Mock() result = sot.head(self.session) sot._prepare_request.assert_called_once_with(base_path=None) self.session.head.assert_called_once_with( self.request.url, microversion='1.42' ) self.assertEqual(sot.microversion, '1.42') sot._translate_response.assert_called_once_with( self.response, has_body=False ) self.assertEqual(result, sot) def _test_commit( self, commit_method='PUT', prepend_key=True, has_body=True, microversion=None, commit_args=None, expected_args=None, base_path=None, explicit_microversion=None, ): self.sot.commit_method = commit_method # Need to make sot look dirty so we can attempt an update self.sot._body = mock.Mock() self.sot._body.dirty = mock.Mock(return_value={"x": "y"}) commit_args = commit_args or {} if explicit_microversion is not None: commit_args['microversion'] = explicit_microversion microversion = explicit_microversion self.sot.commit( self.session, prepend_key=prepend_key, has_body=has_body, base_path=base_path, **commit_args, ) self.sot._prepare_request.assert_called_once_with( prepend_key=prepend_key, base_path=base_path ) if commit_method == 'PATCH': self.session.patch.assert_called_once_with( self.request.url, json=self.request.body, headers=self.request.headers, microversion=microversion, **(expected_args or {}), ) elif commit_method == 'POST': self.session.post.assert_called_once_with( self.request.url, json=self.request.body, headers=self.request.headers, microversion=microversion, **(expected_args or {}), ) elif commit_method == 'PUT': self.session.put.assert_called_once_with( self.request.url, json=self.request.body, headers=self.request.headers, microversion=microversion, **(expected_args or {}), ) self.assertEqual(self.sot.microversion, microversion) self.sot._translate_response.assert_called_once_with( self.response, has_body=has_body ) def test_commit_put(self): self._test_commit(commit_method='PUT', prepend_key=True, has_body=True) def test_commit_patch(self): self._test_commit( commit_method='PATCH', prepend_key=False, has_body=False ) def test_commit_base_path(self): self._test_commit( commit_method='PUT', prepend_key=True, has_body=True, base_path='dummy', ) def test_commit_patch_retry_on_conflict(self): self._test_commit( commit_method='PATCH', commit_args={'retry_on_conflict': True}, expected_args={'retriable_status_codes': {409}}, ) def test_commit_put_retry_on_conflict(self): self._test_commit( commit_method='PUT', commit_args={'retry_on_conflict': True}, expected_args={'retriable_status_codes': {409}}, ) def test_commit_patch_no_retry_on_conflict(self): self.session.retriable_status_codes = {409, 503} self._test_commit( commit_method='PATCH', commit_args={'retry_on_conflict': False}, expected_args={'retriable_status_codes': {503}}, ) def test_commit_put_no_retry_on_conflict(self): self.session.retriable_status_codes = {409, 503} self._test_commit( commit_method='PATCH', commit_args={'retry_on_conflict': False}, expected_args={'retriable_status_codes': {503}}, ) def test_commit_put_explicit_microversion(self): self._test_commit( commit_method='PUT', prepend_key=True, has_body=True, explicit_microversion='1.42', ) def test_commit_not_dirty(self): self.sot._body = mock.Mock() self.sot._body.dirty = dict() self.sot._header = mock.Mock() self.sot._header.dirty = dict() self.sot.commit(self.session) self.session.put.assert_not_called() def test_patch_with_sdk_names(self): class Test(resource.Resource): allow_patch = True id = resource.Body('id') attr = resource.Body('attr') nested = resource.Body('renamed') other = resource.Body('other') test_patch = [ {'path': '/attr', 'op': 'replace', 'value': 'new'}, {'path': '/nested/dog', 'op': 'remove'}, {'path': '/nested/cat', 'op': 'add', 'value': 'meow'}, ] expected = [ {'path': '/attr', 'op': 'replace', 'value': 'new'}, {'path': '/renamed/dog', 'op': 'remove'}, {'path': '/renamed/cat', 'op': 'add', 'value': 'meow'}, ] sot = Test.existing(id=1, attr=42, nested={'dog': 'bark'}) sot.patch(self.session, test_patch) self.session.patch.assert_called_once_with( '/1', json=expected, headers=mock.ANY, microversion=None ) def test_patch_with_server_names(self): class Test(resource.Resource): allow_patch = True id = resource.Body('id') attr = resource.Body('attr') nested = resource.Body('renamed') other = resource.Body('other') test_patch = [ {'path': '/attr', 'op': 'replace', 'value': 'new'}, {'path': '/renamed/dog', 'op': 'remove'}, {'path': '/renamed/cat', 'op': 'add', 'value': 'meow'}, ] sot = Test.existing(id=1, attr=42, nested={'dog': 'bark'}) sot.patch(self.session, test_patch) self.session.patch.assert_called_once_with( '/1', json=test_patch, headers=mock.ANY, microversion=None ) def test_patch_with_changed_fields(self): class Test(resource.Resource): allow_patch = True attr = resource.Body('attr') nested = resource.Body('renamed') other = resource.Body('other') sot = Test.existing(id=1, attr=42, nested={'dog': 'bark'}) sot.attr = 'new' sot.patch(self.session, {'path': '/renamed/dog', 'op': 'remove'}) expected = [ {'path': '/attr', 'op': 'replace', 'value': 'new'}, {'path': '/renamed/dog', 'op': 'remove'}, ] self.session.patch.assert_called_once_with( '/1', json=expected, headers=mock.ANY, microversion=None ) def test_delete(self): result = self.sot.delete(self.session) self.sot._prepare_request.assert_called_once_with() self.session.delete.assert_called_once_with( self.request.url, headers='headers', microversion=None ) self.sot._translate_response.assert_called_once_with( self.response, has_body=False ) self.assertEqual(result, self.sot) def test_delete_with_microversion(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path allow_delete = True _max_microversion = '1.42' sot = Test(id='id') sot._prepare_request = mock.Mock(return_value=self.request) sot._translate_response = mock.Mock() result = sot.delete(self.session) sot._prepare_request.assert_called_once_with() self.session.delete.assert_called_once_with( self.request.url, headers='headers', microversion='1.42' ) sot._translate_response.assert_called_once_with( self.response, has_body=False ) self.assertEqual(result, sot) def test_delete_with_explicit_microversion(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path allow_delete = True _max_microversion = '1.99' sot = Test(id='id') sot._prepare_request = mock.Mock(return_value=self.request) sot._translate_response = mock.Mock() result = sot.delete(self.session, microversion='1.42') sot._prepare_request.assert_called_once_with() self.session.delete.assert_called_once_with( self.request.url, headers='headers', microversion='1.42' ) sot._translate_response.assert_called_once_with( self.response, has_body=False ) self.assertEqual(result, sot) # NOTE: As list returns a generator, testing it requires consuming # the generator. Wrap calls to self.sot.list in a `list` # and then test the results as a list of responses. def test_list_empty_response(self): mock_response = mock.Mock() mock_response.status_code = 200 mock_response.json.return_value = {"resources": []} self.session.get.return_value = mock_response result = list(self.sot.list(self.session)) self.session.get.assert_called_once_with( self.base_path, headers={"Accept": "application/json"}, params={}, microversion=None, ) self.assertEqual([], result) def test_list_one_page_response_paginated(self): id_value = 1 mock_response = mock.Mock() mock_response.status_code = 200 mock_response.links = {} mock_response.json.return_value = {"resources": [{"id": id_value}]} self.session.get.return_value = mock_response # Ensure that we break out of the loop on a paginated call # that still only results in one page of data. results = list(self.sot.list(self.session, paginated=True)) self.assertEqual(1, len(results)) self.assertEqual(1, len(self.session.get.call_args_list)) self.assertEqual(id_value, results[0].id) self.assertIsInstance(results[0], self.test_class) def test_list_one_page_response_not_paginated(self): id_value = 1 mock_response = mock.Mock() mock_response.status_code = 200 mock_response.json.return_value = {"resources": [{"id": id_value}]} self.session.get.return_value = mock_response results = list(self.sot.list(self.session, paginated=False)) self.session.get.assert_called_once_with( self.base_path, headers={"Accept": "application/json"}, params={}, microversion=None, ) self.assertEqual(1, len(results)) self.assertEqual(id_value, results[0].id) self.assertIsInstance(results[0], self.test_class) def test_list_one_page_response_resources_key(self): key = "resources" class Test(self.test_class): resources_key = key id_value = 1 mock_response = mock.Mock() mock_response.status_code = 200 mock_response.json.return_value = {key: [{"id": id_value}]} mock_response.links = [] self.session.get.return_value = mock_response sot = Test() results = list(sot.list(self.session)) self.session.get.assert_called_once_with( self.base_path, headers={"Accept": "application/json"}, params={}, microversion=None, ) self.assertEqual(1, len(results)) self.assertEqual(id_value, results[0].id) self.assertIsInstance(results[0], self.test_class) def test_list_response_paginated_without_links(self): ids = [1, 2] mock_response = mock.Mock() mock_response.status_code = 200 mock_response.links = {} mock_response.json.return_value = { "resources": [{"id": ids[0]}], "resources_links": [ { "href": "https://example.com/next-url", "rel": "next", } ], } mock_response2 = mock.Mock() mock_response2.status_code = 200 mock_response2.links = {} mock_response2.json.return_value = { "resources": [{"id": ids[1]}], } self.session.get.side_effect = [mock_response, mock_response2] results = list(self.sot.list(self.session, paginated=True)) self.assertEqual(2, len(results)) self.assertEqual(ids[0], results[0].id) self.assertEqual(ids[1], results[1].id) self.assertEqual( mock.call( 'base_path', headers={'Accept': 'application/json'}, params={}, microversion=None, ), self.session.get.mock_calls[0], ) self.assertEqual( mock.call( 'https://example.com/next-url', headers={'Accept': 'application/json'}, params={}, microversion=None, ), self.session.get.mock_calls[1], ) self.assertEqual(2, len(self.session.get.call_args_list)) self.assertIsInstance(results[0], self.test_class) def test_list_response_paginated_with_links(self): ids = [1, 2] mock_response = mock.Mock() mock_response.status_code = 200 mock_response.links = {} mock_response.json.side_effect = [ { "resources": [{"id": ids[0]}], "resources_links": [ { "href": "https://example.com/next-url", "rel": "next", } ], }, { "resources": [{"id": ids[1]}], }, ] self.session.get.return_value = mock_response results = list(self.sot.list(self.session, paginated=True)) self.assertEqual(2, len(results)) self.assertEqual(ids[0], results[0].id) self.assertEqual(ids[1], results[1].id) self.assertEqual( mock.call( 'base_path', headers={'Accept': 'application/json'}, params={}, microversion=None, ), self.session.get.mock_calls[0], ) self.assertEqual( mock.call( 'https://example.com/next-url', headers={'Accept': 'application/json'}, params={}, microversion=None, ), self.session.get.mock_calls[2], ) self.assertEqual(2, len(self.session.get.call_args_list)) self.assertIsInstance(results[0], self.test_class) def test_list_response_paginated_with_links_and_query(self): q_limit = 1 ids = [1, 2] mock_response = mock.Mock() mock_response.status_code = 200 mock_response.links = {} mock_response.json.side_effect = [ { "resources": [{"id": ids[0]}], "resources_links": [ { "href": "https://example.com/next-url?limit=%d" % q_limit, "rel": "next", } ], }, { "resources": [{"id": ids[1]}], }, { "resources": [], }, ] self.session.get.return_value = mock_response class Test(self.test_class): _query_mapping = resource.QueryParameters("limit") results = list(Test.list(self.session, paginated=True, limit=q_limit)) self.assertEqual(2, len(results)) self.assertEqual(ids[0], results[0].id) self.assertEqual(ids[1], results[1].id) self.assertEqual( mock.call( 'base_path', headers={'Accept': 'application/json'}, params={ 'limit': q_limit, }, microversion=None, ), self.session.get.mock_calls[0], ) self.assertEqual( mock.call( 'https://example.com/next-url', headers={'Accept': 'application/json'}, params={ 'limit': [str(q_limit)], }, microversion=None, ), self.session.get.mock_calls[2], ) self.assertEqual(3, len(self.session.get.call_args_list)) self.assertIsInstance(results[0], self.test_class) def test_list_response_paginated_with_next_field(self): """Test pagination with a 'next' field in the response. Glance doesn't return a 'links' field in the response. Instead, it returns a 'first' field and, if there are more pages, a 'next' field in the response body. Ensure we correctly parse these. """ class Test(resource.Resource): service = self.service_name base_path = '/foos/bars' resources_key = 'bars' allow_list = True _query_mapping = resource.QueryParameters("wow") ids = [1, 2] mock_response = mock.Mock() mock_response.status_code = 200 mock_response.links = {} mock_response.json.side_effect = [ { "bars": [{"id": ids[0]}], "first": "/v2/foos/bars?wow=cool", "next": "/v2/foos/bars?marker=baz&wow=cool", }, { "bars": [{"id": ids[1]}], "first": "/v2/foos/bars?wow=cool", }, ] self.session.get.return_value = mock_response results = list(Test.list(self.session, paginated=True, wow="cool")) self.assertEqual(2, len(results)) self.assertEqual(ids[0], results[0].id) self.assertEqual(ids[1], results[1].id) self.assertEqual( mock.call( Test.base_path, headers={'Accept': 'application/json'}, params={'wow': 'cool'}, microversion=None, ), self.session.get.mock_calls[0], ) self.assertEqual( mock.call( '/foos/bars', headers={'Accept': 'application/json'}, params={'wow': ['cool'], 'marker': ['baz']}, microversion=None, ), self.session.get.mock_calls[2], ) self.assertEqual(2, len(self.session.get.call_args_list)) self.assertIsInstance(results[0], Test) def test_list_response_paginated_with_microversions(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path resources_key = 'resources' allow_list = True _max_microversion = '1.42' ids = [1, 2] mock_response = mock.Mock() mock_response.status_code = 200 mock_response.links = {} mock_response.json.return_value = { "resources": [{"id": ids[0]}], "resources_links": [ { "href": "https://example.com/next-url", "rel": "next", } ], } mock_response2 = mock.Mock() mock_response2.status_code = 200 mock_response2.links = {} mock_response2.json.return_value = { "resources": [{"id": ids[1]}], } self.session.get.side_effect = [mock_response, mock_response2] results = list(Test.list(self.session, paginated=True)) self.assertEqual(2, len(results)) self.assertEqual(ids[0], results[0].id) self.assertEqual(ids[1], results[1].id) self.assertEqual( mock.call( 'base_path', headers={'Accept': 'application/json'}, params={}, microversion='1.42', ), self.session.get.mock_calls[0], ) self.assertEqual( mock.call( 'https://example.com/next-url', headers={'Accept': 'application/json'}, params={}, microversion='1.42', ), self.session.get.mock_calls[1], ) self.assertEqual(2, len(self.session.get.call_args_list)) self.assertIsInstance(results[0], Test) self.assertEqual('1.42', results[0].microversion) def test_list_multi_page_response_not_paginated(self): ids = [1, 2] mock_response = mock.Mock() mock_response.status_code = 200 mock_response.json.side_effect = [ {"resources": [{"id": ids[0]}]}, {"resources": [{"id": ids[1]}]}, ] self.session.get.return_value = mock_response results = list(self.sot.list(self.session, paginated=False)) self.assertEqual(1, len(results)) self.assertEqual(ids[0], results[0].id) self.assertIsInstance(results[0], self.test_class) def test_list_paginated_infinite_loop(self): q_limit = 1 mock_response = mock.Mock() mock_response.status_code = 200 mock_response.links = {} mock_response.json.side_effect = [ { "resources": [{"id": 1}], }, { "resources": [{"id": 1}], }, ] self.session.get.return_value = mock_response class Test(self.test_class): _query_mapping = resource.QueryParameters("limit") res = Test.list(self.session, paginated=True, limit=q_limit) self.assertRaises(exceptions.SDKException, list, res) def test_list_query_params(self): id = 1 qp = "query param!" qp_name = "query-param" uri_param = "uri param!" mock_response = mock.Mock() mock_response.status_code = 200 mock_response.links = {} mock_response.json.return_value = {"resources": [{"id": id}]} mock_empty = mock.Mock() mock_empty.status_code = 200 mock_empty.links = {} mock_empty.json.return_value = {"resources": []} self.session.get.side_effect = [mock_response, mock_empty] class Test(self.test_class): _query_mapping = resource.QueryParameters(query_param=qp_name) base_path = "/%(something)s/blah" something = resource.URI("something") results = list( Test.list( self.session, paginated=True, query_param=qp, something=uri_param, ) ) self.assertEqual(1, len(results)) # Verify URI attribute is set on the resource self.assertEqual(results[0].something, uri_param) # Look at the `params` argument to each of the get calls that # were made. self.assertEqual( self.session.get.call_args_list[0][1]["params"], {qp_name: qp} ) self.assertEqual( self.session.get.call_args_list[0][0][0], Test.base_path % {"something": uri_param}, ) def test_list_with_injected_headers(self): mock_empty = mock.Mock() mock_empty.status_code = 200 mock_empty.json.return_value = {"resources": []} self.session.get.side_effect = [mock_empty] _ = list( self.test_class.list(self.session, headers={'X-Test': 'value'}) ) expected = {'Accept': 'application/json', 'X-Test': 'value'} self.assertEqual( expected, self.session.get.call_args.kwargs['headers'] ) @mock.patch.object(resource.Resource, 'list') def test_list_dns_with_headers(self, mock_resource_list): dns.v2._base.Resource.list( self.session, project_id='1234', all_projects=True, ) expected = { 'x-auth-sudo-project-id': '1234', 'x-auth-all-projects': 'True', } self.assertEqual( expected, mock_resource_list.call_args.kwargs['headers'] ) def test_allow_invalid_list_params(self): qp = "query param!" qp_name = "query-param" uri_param = "uri param!" mock_empty = mock.Mock() mock_empty.status_code = 200 mock_empty.links = {} mock_empty.json.return_value = {"resources": []} self.session.get.side_effect = [mock_empty] class Test(self.test_class): _query_mapping = resource.QueryParameters(query_param=qp_name) base_path = "/%(something)s/blah" something = resource.URI("something") list( Test.list( self.session, paginated=True, query_param=qp, allow_unknown_params=True, something=uri_param, something_wrong=True, ) ) self.session.get.assert_called_once_with( f"/{uri_param}/blah", headers={'Accept': 'application/json'}, microversion=None, params={qp_name: qp}, ) def test_list_client_filters(self): qp = "query param!" uri_param = "uri param!" mock_empty = mock.Mock() mock_empty.status_code = 200 mock_empty.links = {} mock_empty.json.return_value = { "resources": [ {"a": "1", "b": "1"}, {"a": "1", "b": "2"}, ] } self.session.get.side_effect = [mock_empty] class Test(self.test_class): _query_mapping = resource.QueryParameters('a') base_path = "/%(something)s/blah" something = resource.URI("something") a = resource.Body("a") b = resource.Body("b") res = list( Test.list( self.session, paginated=True, query_param=qp, allow_unknown_params=True, something=uri_param, a='1', b='2', ) ) self.session.get.assert_called_once_with( f"/{uri_param}/blah", headers={'Accept': 'application/json'}, microversion=None, params={'a': '1'}, ) self.assertEqual(1, len(res)) self.assertEqual("2", res[0].b) def test_values_as_list_params(self): id = 1 qp = "query param!" qp_name = "query-param" uri_param = "uri param!" mock_response = mock.Mock() mock_response.status_code = 200 mock_response.links = {} mock_response.json.return_value = {"resources": [{"id": id}]} mock_empty = mock.Mock() mock_empty.status_code = 200 mock_empty.links = {} mock_empty.json.return_value = {"resources": []} self.session.get.side_effect = [mock_response, mock_empty] class Test(self.test_class): _query_mapping = resource.QueryParameters(query_param=qp_name) base_path = "/%(something)s/blah" something = resource.URI("something") results = list( Test.list( self.session, paginated=True, something=uri_param, **{qp_name: qp}, ) ) self.assertEqual(1, len(results)) # Look at the `params` argument to each of the get calls that # were made. self.assertEqual( self.session.get.call_args_list[0][1]["params"], {qp_name: qp} ) self.assertEqual( self.session.get.call_args_list[0][0][0], Test.base_path % {"something": uri_param}, ) def test_values_as_list_params_precedence(self): id = 1 qp = "query param!" qp2 = "query param!!!!!" qp_name = "query-param" uri_param = "uri param!" mock_response = mock.Mock() mock_response.status_code = 200 mock_response.links = {} mock_response.json.return_value = {"resources": [{"id": id}]} mock_empty = mock.Mock() mock_empty.status_code = 200 mock_empty.links = {} mock_empty.json.return_value = {"resources": []} self.session.get.side_effect = [mock_response, mock_empty] class Test(self.test_class): _query_mapping = resource.QueryParameters(query_param=qp_name) base_path = "/%(something)s/blah" something = resource.URI("something") results = list( Test.list( self.session, paginated=True, query_param=qp2, something=uri_param, **{qp_name: qp}, ) ) self.assertEqual(1, len(results)) # Look at the `params` argument to each of the get calls that # were made. self.assertEqual( self.session.get.call_args_list[0][1]["params"], {qp_name: qp2} ) self.assertEqual( self.session.get.call_args_list[0][0][0], Test.base_path % {"something": uri_param}, ) def test_list_multi_page_response_paginated(self): ids = [1, 2] resp1 = mock.Mock() resp1.status_code = 200 resp1.links = {} resp1.json.return_value = { "resources": [{"id": ids[0]}], "resources_links": [ { "href": "https://example.com/next-url", "rel": "next", } ], } resp2 = mock.Mock() resp2.status_code = 200 resp2.links = {} resp2.json.return_value = { "resources": [{"id": ids[1]}], "resources_links": [ { "href": "https://example.com/next-url", "rel": "next", } ], } resp3 = mock.Mock() resp3.status_code = 200 resp3.links = {} resp3.json.return_value = {"resources": []} self.session.get.side_effect = [resp1, resp2, resp3] results = self.sot.list(self.session, paginated=True) result0 = next(results) self.assertEqual(result0.id, ids[0]) self.session.get.assert_called_with( self.base_path, headers={"Accept": "application/json"}, params={}, microversion=None, ) result1 = next(results) self.assertEqual(result1.id, ids[1]) self.session.get.assert_called_with( 'https://example.com/next-url', headers={"Accept": "application/json"}, params={}, microversion=None, ) self.assertRaises(StopIteration, next, results) self.session.get.assert_called_with( 'https://example.com/next-url', headers={"Accept": "application/json"}, params={}, microversion=None, ) def test_list_multi_page_no_early_termination(self): # This tests verifies that multipages are not early terminated. # APIs can set max_limit to the number of items returned in each # query. If that max_limit is smaller than the limit given by the # user, the return value would contain less items than the limit, # but that doesn't stand to reason that there are no more records, # we should keep trying to get more results. ids = [1, 2, 3, 4] resp1 = mock.Mock() resp1.status_code = 200 resp1.links = {} resp1.json.return_value = { # API's max_limit is set to 2. "resources": [{"id": ids[0]}, {"id": ids[1]}], } resp2 = mock.Mock() resp2.status_code = 200 resp2.links = {} resp2.json.return_value = { # API's max_limit is set to 2. "resources": [{"id": ids[2]}, {"id": ids[3]}], } resp3 = mock.Mock() resp3.status_code = 200 resp3.json.return_value = { "resources": [], } self.session.get.side_effect = [resp1, resp2, resp3] results = self.sot.list(self.session, limit=3, paginated=True) # First page constains only two items, less than the limit given result0 = next(results) self.assertEqual(result0.id, ids[0]) result1 = next(results) self.assertEqual(result1.id, ids[1]) self.session.get.assert_called_with( self.base_path, headers={"Accept": "application/json"}, params={"limit": 3}, microversion=None, ) # Second page contains another two items result2 = next(results) self.assertEqual(result2.id, ids[2]) result3 = next(results) self.assertEqual(result3.id, ids[3]) self.session.get.assert_called_with( self.base_path, headers={"Accept": "application/json"}, params={"limit": 3, "marker": 2}, microversion=None, ) # Ensure we're done after those four items self.assertRaises(StopIteration, next, results) # Ensure we've given the last try to get more results self.session.get.assert_called_with( self.base_path, headers={"Accept": "application/json"}, params={"limit": 3, "marker": 4}, microversion=None, ) # Ensure we made three calls to get this done self.assertEqual(3, len(self.session.get.call_args_list)) def test_list_multi_page_inferred_additional(self): # If we explicitly request a limit and we receive EXACTLY that # amount of results and there is no next link, we make one additional # call to check to see if there are more records and the service is # just sad. # NOTE(mordred) In a perfect world we would not do this. But it's 2018 # and I don't think anyone has any illusions that we live in a perfect # world anymore. ids = [1, 2, 3] resp1 = mock.Mock() resp1.status_code = 200 resp1.links = {} resp1.json.return_value = { "resources": [{"id": ids[0]}, {"id": ids[1]}], } resp2 = mock.Mock() resp2.status_code = 200 resp2.links = {} resp2.json.return_value = {"resources": [{"id": ids[2]}]} self.session.get.side_effect = [resp1, resp2] results = self.sot.list(self.session, limit=2, paginated=True) # Get the first page's two items result0 = next(results) self.assertEqual(result0.id, ids[0]) result1 = next(results) self.assertEqual(result1.id, ids[1]) self.session.get.assert_called_with( self.base_path, headers={"Accept": "application/json"}, params={"limit": 2}, microversion=None, ) result2 = next(results) self.assertEqual(result2.id, ids[2]) self.session.get.assert_called_with( self.base_path, headers={"Accept": "application/json"}, params={'limit': 2, 'marker': 2}, microversion=None, ) # Ensure we're done after those three items # In python3.7, PEP 479 is enabled for all code, and StopIteration # raised directly from code is turned into a RuntimeError. # Something about how mock is implemented triggers that here. self.assertRaises((StopIteration, RuntimeError), next, results) # Ensure we only made two calls to get this done self.assertEqual(3, len(self.session.get.call_args_list)) def test_list_multi_page_header_count(self): class Test(self.test_class): resources_key = None pagination_key = 'X-Container-Object-Count' self.sot = Test() # Swift returns a total number of objects in a header and we compare # that against the total number returned to know if we need to fetch # more objects. ids = [1, 2, 3] resp1 = mock.Mock() resp1.status_code = 200 resp1.links = {} resp1.headers = {'X-Container-Object-Count': 3} resp1.json.return_value = [{"id": ids[0]}, {"id": ids[1]}] resp2 = mock.Mock() resp2.status_code = 200 resp2.links = {} resp2.headers = {'X-Container-Object-Count': 3} resp2.json.return_value = [{"id": ids[2]}] self.session.get.side_effect = [resp1, resp2] results = self.sot.list(self.session, paginated=True) # Get the first page's two items result0 = next(results) self.assertEqual(result0.id, ids[0]) result1 = next(results) self.assertEqual(result1.id, ids[1]) self.session.get.assert_called_with( self.base_path, headers={"Accept": "application/json"}, params={}, microversion=None, ) result2 = next(results) self.assertEqual(result2.id, ids[2]) self.session.get.assert_called_with( self.base_path, headers={"Accept": "application/json"}, params={'marker': 2}, microversion=None, ) # Ensure we're done after those three items self.assertRaises(StopIteration, next, results) # Ensure we only made two calls to get this done self.assertEqual(2, len(self.session.get.call_args_list)) def test_list_multi_page_link_header(self): # Swift returns a total number of objects in a header and we compare # that against the total number returned to know if we need to fetch # more objects. ids = [1, 2, 3] resp1 = mock.Mock() resp1.status_code = 200 resp1.links = { 'next': {'uri': 'https://example.com/next-url', 'rel': 'next'} } resp1.headers = {} resp1.json.return_value = { "resources": [{"id": ids[0]}, {"id": ids[1]}], } resp2 = mock.Mock() resp2.status_code = 200 resp2.links = {} resp2.headers = {} resp2.json.return_value = {"resources": [{"id": ids[2]}]} self.session.get.side_effect = [resp1, resp2] results = self.sot.list(self.session, paginated=True) # Get the first page's two items result0 = next(results) self.assertEqual(result0.id, ids[0]) result1 = next(results) self.assertEqual(result1.id, ids[1]) self.session.get.assert_called_with( self.base_path, headers={"Accept": "application/json"}, params={}, microversion=None, ) result2 = next(results) self.assertEqual(result2.id, ids[2]) self.session.get.assert_called_with( 'https://example.com/next-url', headers={"Accept": "application/json"}, params={}, microversion=None, ) # Ensure we're done after those three items self.assertRaises(StopIteration, next, results) # Ensure we only made two calls to get this done self.assertEqual(2, len(self.session.get.call_args_list)) def test_bulk_create_invalid_data_passed(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path create_method = 'POST' allow_create = True Test._prepare_request = mock.Mock() self.assertRaises(ValueError, Test.bulk_create, self.session, []) self.assertRaises(ValueError, Test.bulk_create, self.session, None) self.assertRaises(ValueError, Test.bulk_create, self.session, object) self.assertRaises(ValueError, Test.bulk_create, self.session, {}) self.assertRaises(ValueError, Test.bulk_create, self.session, "hi!") self.assertRaises(ValueError, Test.bulk_create, self.session, ["hi!"]) def _test_bulk_create( self, cls, http_method, microversion=None, base_path=None, **params ): req1 = mock.Mock() req2 = mock.Mock() req1.body = {'name': 'resource1'} req2.body = {'name': 'resource2'} req1.url = 'uri' req2.url = 'uri' req1.headers = 'headers' req2.headers = 'headers' request_body = { "tests": [ {'name': 'resource1', 'id': 'id1'}, {'name': 'resource2', 'id': 'id2'}, ] } cls._prepare_request = mock.Mock(side_effect=[req1, req2]) mock_response = mock.Mock() mock_response.status_code = 200 mock_response.links = {} mock_response.json.return_value = request_body http_method.return_value = mock_response res = list( cls.bulk_create( self.session, [{'name': 'resource1'}, {'name': 'resource2'}], base_path=base_path, **params, ) ) self.assertEqual(len(res), 2) self.assertEqual(res[0].id, 'id1') self.assertEqual(res[1].id, 'id2') http_method.assert_called_once_with( self.request.url, json={'tests': [req1.body, req2.body]}, headers=self.request.headers, microversion=microversion, params=params, ) def test_bulk_create_post(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path create_method = 'POST' allow_create = True resources_key = 'tests' self._test_bulk_create(Test, self.session.post) def test_bulk_create_put(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path create_method = 'PUT' allow_create = True resources_key = 'tests' self._test_bulk_create(Test, self.session.put) def test_bulk_create_with_params(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path create_method = 'POST' allow_create = True resources_key = 'tests' self._test_bulk_create(Test, self.session.post, answer=42) def test_bulk_create_with_microversion(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path create_method = 'POST' allow_create = True resources_key = 'tests' _max_microversion = '1.42' self._test_bulk_create(Test, self.session.post, microversion='1.42') def test_bulk_create_with_base_path(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path create_method = 'POST' allow_create = True resources_key = 'tests' self._test_bulk_create(Test, self.session.post, base_path='dummy') def test_bulk_create_fail(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path create_method = 'POST' allow_create = False resources_key = 'tests' self.assertRaises( exceptions.MethodNotSupported, Test.bulk_create, self.session, [{'name': 'name'}], ) def test_bulk_create_fail_on_request(self): class Test(resource.Resource): service = self.service_name base_path = self.base_path create_method = 'POST' allow_create = True resources_key = 'tests' response = FakeResponse({}, status_code=409) response.content = ( '{"TestError": {"message": "Failed to parse ' 'request. Required attribute \'foo\' not ' 'specified", "type": "HTTPBadRequest", ' '"detail": ""}}' ) response.reason = 'Bad Request' self.session.post.return_value = response self.assertRaises( exceptions.ConflictException, Test.bulk_create, self.session, [{'name': 'name'}], ) class TestResourceFind(base.TestCase): result = 1 class Base(resource.Resource): @classmethod def existing(cls, **kwargs): response = mock.Mock() response.status_code = 404 raise exceptions.NotFoundException('Not Found', response=response) @classmethod def list(cls, session, **params): return [] class OneResult(Base): @classmethod def _get_one_match(cls, *args): return TestResourceFind.result class NoResults(Base): @classmethod def _get_one_match(cls, *args): return None class OneResultWithQueryParams(OneResult): _query_mapping = resource.QueryParameters('name') def setUp(self): super().setUp() self.no_results = self.NoResults self.one_result = self.OneResult self.one_result_with_qparams = self.OneResultWithQueryParams def test_find_short_circuit(self): value = 1 class Test(resource.Resource): @classmethod def existing(cls, **kwargs): mock_match = mock.Mock() mock_match.fetch.return_value = value return mock_match result = Test.find(self.cloud.compute, "name") self.assertEqual(result, value) def test_no_match_raise(self): self.assertRaises( exceptions.NotFoundException, self.no_results.find, self.cloud.compute, "name", ignore_missing=False, ) def test_no_match_return(self): self.assertIsNone( self.no_results.find( self.cloud.compute, "name", ignore_missing=True ) ) def test_find_result_name_not_in_query_parameters(self): with mock.patch.object( self.one_result, 'existing', side_effect=self.OneResult.existing ) as mock_existing, mock.patch.object( self.one_result, 'list', side_effect=self.OneResult.list ) as mock_list: self.assertEqual( self.result, self.one_result.find(self.cloud.compute, "name") ) mock_existing.assert_called_once_with( id='name', connection=mock.ANY ) mock_list.assert_called_once_with(mock.ANY) def test_find_result_name_in_query_parameters(self): self.assertEqual( self.result, self.one_result_with_qparams.find(self.cloud.compute, "name"), ) def test_match_empty_results(self): self.assertIsNone(resource.Resource._get_one_match("name", [])) def test_no_match_by_name(self): the_name = "Brian" match = mock.Mock(spec=resource.Resource) match.name = the_name result = resource.Resource._get_one_match("Richard", [match]) self.assertIsNone(result, match) def test_single_match_by_name(self): the_name = "Brian" match = mock.Mock(spec=resource.Resource) match.name = the_name result = resource.Resource._get_one_match(the_name, [match]) self.assertIs(result, match) def test_single_match_by_id(self): the_id = "Brian" match = mock.Mock(spec=resource.Resource) match.id = the_id result = resource.Resource._get_one_match(the_id, [match]) self.assertIs(result, match) def test_single_match_by_alternate_id(self): the_id = "Richard" class Test(resource.Resource): other_id = resource.Body("other_id", alternate_id=True) match = Test(other_id=the_id) result = Test._get_one_match(the_id, [match]) self.assertIs(result, match) def test_multiple_matches(self): the_id = "Brian" match = mock.Mock(spec=resource.Resource) match.id = the_id self.assertRaises( exceptions.DuplicateResource, resource.Resource._get_one_match, the_id, [match, match], ) def test_list_no_base_path(self): with mock.patch.object(self.Base, "list") as list_mock: self.Base.find(self.cloud.compute, "name") list_mock.assert_called_with(self.cloud.compute) def test_list_base_path(self): with mock.patch.object(self.Base, "list") as list_mock: self.Base.find( self.cloud.compute, "name", list_base_path='/dummy/list' ) list_mock.assert_called_with( self.cloud.compute, base_path='/dummy/list' ) class TestWait(base.TestCase): def setUp(self): super().setUp() handler = logging.StreamHandler(self._log_stream) formatter = logging.Formatter('%(asctime)s %(name)-32s %(message)s') handler.setFormatter(formatter) logger = logging.getLogger('openstack.iterate_timeout') logger.setLevel(logging.DEBUG) logger.addHandler(handler) @staticmethod def _fake_resource(statuses=None, progresses=None, *, attribute='status'): if statuses is None: statuses = ['building', 'building', 'building', 'active'] def fetch(*args, **kwargs): # when we get to the last status, keep returning that if statuses: setattr(fake_resource, attribute, statuses.pop(0)) if progresses: fake_resource.progress = progresses.pop(0) return fake_resource spec = ['id', attribute, 'fetch'] if progresses: spec.append('progress') fake_resource = mock.Mock(spec=spec) setattr(fake_resource, attribute, statuses.pop(0)) fake_resource.fetch.side_effect = fetch return fake_resource class TestWaitForStatus(TestWait): def test_immediate_status(self): status = "loling" res = mock.Mock(spec=['id', 'status']) res.status = status result = resource.wait_for_status( self.cloud.compute, res, status, None, interval=1, wait=1, ) self.assertEqual(res, result) def test_immediate_status_case(self): status = "LOLing" res = mock.Mock(spec=['id', 'status']) res.status = status result = resource.wait_for_status( self.cloud.compute, res, 'lOling', None, interval=1, wait=1, ) self.assertEqual(res, result) def test_immediate_status_different_attribute(self): status = "loling" res = mock.Mock(spec=['id', 'mood']) res.mood = status result = resource.wait_for_status( self.cloud.compute, res, status, None, interval=1, wait=1, attribute='mood', ) self.assertEqual(res, result) def test_status_match(self): status = "loling" # other gets past the first check, two anothers gets through # the sleep loop, and the third matches statuses = ["first", "other", "another", "another", status] res = self._fake_resource(statuses) result = resource.wait_for_status( mock.Mock(), res, status, None, interval=1, wait=5, ) self.assertEqual(result, res) def test_status_match_with_none(self): status = "loling" # apparently, None is a correct state in some cases statuses = [None, "other", None, "another", status] res = self._fake_resource(statuses) result = resource.wait_for_status( mock.Mock(), res, status, None, interval=1, wait=5, ) self.assertEqual(result, res) def test_status_match_none(self): status = None # apparently, None can be expected status in some cases statuses = ["first", "other", "another", "another", status] res = self._fake_resource(statuses) result = resource.wait_for_status( mock.Mock(), res, status, None, interval=1, wait=5, ) self.assertEqual(result, res) def test_status_match_different_attribute(self): status = "loling" statuses = ["first", "other", "another", "another", status] res = self._fake_resource(statuses, attribute='mood') result = resource.wait_for_status( mock.Mock(), res, status, None, interval=1, wait=5, attribute='mood', ) self.assertEqual(result, res) def test_status_fails(self): failure = "crying" statuses = ["success", "other", failure] res = self._fake_resource(statuses) self.assertRaises( exceptions.ResourceFailure, resource.wait_for_status, mock.Mock(), res, "loling", [failure], interval=1, wait=5, ) def test_status_fails_different_attribute(self): failure = "crying" statuses = ["success", "other", failure] res = self._fake_resource(statuses, attribute='mood') self.assertRaises( exceptions.ResourceFailure, resource.wait_for_status, mock.Mock(), res, "loling", [failure.upper()], interval=1, wait=5, attribute='mood', ) def test_timeout(self): status = "loling" # The first "other" gets past the first check, and then three # pairs of "other" statuses run through the sleep counter loop, # after which time should be up. This is because we have a # one second interval and three second waiting period. statuses = ["other"] * 7 res = self._fake_resource(statuses) self.assertRaises( exceptions.ResourceTimeout, resource.wait_for_status, self.cloud.compute, res, status, None, 0.01, 0.1, ) def test_no_sleep(self): statuses = ["other"] res = self._fake_resource(statuses) self.assertRaises( exceptions.ResourceTimeout, resource.wait_for_status, self.cloud.compute, res, "status", None, interval=0, wait=-1, ) def test_callback(self): """Callback is called with 'progress' attribute.""" statuses = ['building', 'building', 'building', 'building', 'active'] progresses = [0, 25, 50, 100] res = self._fake_resource(statuses=statuses, progresses=progresses) callback = mock.Mock() result = resource.wait_for_status( mock.Mock(), res, 'active', None, interval=0.1, wait=1, callback=callback, ) self.assertEqual(result, res) callback.assert_has_calls([mock.call(x) for x in progresses]) def test_callback_without_progress(self): """Callback is called with 0 if 'progress' attribute is missing.""" statuses = ['building', 'building', 'building', 'building', 'active'] res = self._fake_resource(statuses=statuses) callback = mock.Mock() result = resource.wait_for_status( mock.Mock(), res, 'active', None, interval=0.1, wait=1, callback=callback, ) self.assertEqual(result, res) # there are 5 statuses but only 3 callback calls since the initial # status and final status don't result in calls callback.assert_has_calls([mock.call(0)] * 3) class TestWaitForDelete(TestWait): def test_success_not_found(self): response = mock.Mock() response.headers = {} response.status_code = 404 res = mock.Mock() res.fetch.side_effect = [ res, res, exceptions.NotFoundException('Not Found', response), ] result = resource.wait_for_delete(self.cloud.compute, res, 1, 3) self.assertEqual(result, res) def test_status(self): """Successful deletion indicated by status.""" statuses = ['active', 'deleting', 'deleting', 'deleting', 'deleted'] res = self._fake_resource(statuses=statuses) result = resource.wait_for_delete( mock.Mock(), res, interval=0.1, wait=1, ) self.assertEqual(result, res) def test_callback(self): """Callback is called with 'progress' attribute.""" statuses = ['active', 'deleting', 'deleting', 'deleting', 'deleted'] progresses = [0, 25, 50, 100] res = self._fake_resource(statuses=statuses, progresses=progresses) callback = mock.Mock() result = resource.wait_for_delete( mock.Mock(), res, interval=1, wait=5, callback=callback, ) self.assertEqual(result, res) callback.assert_has_calls([mock.call(x) for x in progresses]) def test_callback_without_progress(self): """Callback is called with 0 if 'progress' attribute is missing.""" statuses = ['active', 'deleting', 'deleting', 'deleting', 'deleted'] res = self._fake_resource(statuses=statuses) callback = mock.Mock() result = resource.wait_for_delete( mock.Mock(), res, interval=1, wait=5, callback=callback, ) self.assertEqual(result, res) # there are 5 statuses but only 3 callback calls since the initial # status and final status don't result in calls callback.assert_has_calls([mock.call(0)] * 3) def test_timeout(self): res = mock.Mock() res.status = 'ACTIVE' res.fetch.return_value = res self.assertRaises( exceptions.ResourceTimeout, resource.wait_for_delete, self.cloud.compute, res, 0.1, 0.3, ) @mock.patch.object(resource.Resource, '_get_microversion', autospec=True) class TestAssertMicroversionFor(base.TestCase): session = mock.Mock() res = resource.Resource() def test_compatible(self, mock_get_ver): mock_get_ver.return_value = '1.42' self.assertEqual( '1.42', self.res._assert_microversion_for(self.session, 'fetch', '1.6'), ) mock_get_ver.assert_called_once_with(self.session, action='fetch') def test_incompatible(self, mock_get_ver): mock_get_ver.return_value = '1.1' self.assertRaisesRegex( exceptions.NotSupported, '1.6 is required, but 1.1 will be used', self.res._assert_microversion_for, self.session, 'fetch', '1.6', ) mock_get_ver.assert_called_once_with(self.session, action='fetch') def test_custom_message(self, mock_get_ver): mock_get_ver.return_value = '1.1' self.assertRaisesRegex( exceptions.NotSupported, 'boom.*1.6 is required, but 1.1 will be used', self.res._assert_microversion_for, self.session, 'fetch', '1.6', error_message='boom', ) mock_get_ver.assert_called_once_with(self.session, action='fetch') def test_none(self, mock_get_ver): mock_get_ver.return_value = None self.assertRaisesRegex( exceptions.NotSupported, '1.6 is required, but the default version', self.res._assert_microversion_for, self.session, 'fetch', '1.6', ) mock_get_ver.assert_called_once_with(self.session, action='fetch') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/test_stats.py0000664000175000017500000002744600000000000023123 0ustar00zuulzuul00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # Copyright 2014 OpenStack Foundation # Copyright 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import os import pprint import select import socket import threading import time import fixtures from keystoneauth1 import exceptions import prometheus_client from requests import exceptions as rexceptions import testtools.content from openstack.tests.unit import base class StatsdFixture(fixtures.Fixture): def _setUp(self): self.running = True self.thread = threading.Thread(target=self.run) self.thread.daemon = True self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sock.bind(('', 0)) self.port = self.sock.getsockname()[1] self.wake_read, self.wake_write = os.pipe() self.stats = [] self.thread.start() self.addCleanup(self._cleanup) def run(self): while self.running: poll = select.poll() poll.register(self.sock, select.POLLIN) poll.register(self.wake_read, select.POLLIN) ret = poll.poll() for fd, event in ret: if fd == self.sock.fileno(): data = self.sock.recvfrom(1024) if not data: return self.stats.append(data[0]) if fd == self.wake_read: return def _cleanup(self): self.running = False os.write(self.wake_write, b'1\n') self.thread.join() class TestStats(base.TestCase): def setUp(self): self.statsd = StatsdFixture() self.useFixture(self.statsd) # note, use 127.0.0.1 rather than localhost to avoid getting ipv6 # see: https://github.com/jsocol/pystatsd/issues/61 self.useFixture( fixtures.EnvironmentVariable('STATSD_HOST', '127.0.0.1') ) self.useFixture( fixtures.EnvironmentVariable('STATSD_PORT', str(self.statsd.port)) ) self.add_info_on_exception('statsd_content', self.statsd.stats) # Set up the above things before the super setup so that we have the # environment variables set when the Connection is created. super().setUp() self._registry = prometheus_client.CollectorRegistry() self.cloud.config._collector_registry = self._registry self.addOnException(self._add_prometheus_samples) def _add_prometheus_samples(self, exc_info): samples = [] for metric in self._registry.collect(): for s in metric.samples: samples.append(s) self.addDetail( 'prometheus_samples', testtools.content.text_content(pprint.pformat(samples)), ) def assert_reported_stat(self, key, value=None, kind=None): """Check statsd output Check statsd return values. A ``value`` should specify a ``kind``, however a ``kind`` may be specified without a ``value`` for a generic match. Leave both empy to just check for key presence. :arg str key: The statsd key :arg str value: The expected value of the metric ``key`` :arg str kind: The expected type of the metric ``key`` For example - ``c`` counter - ``g`` gauge - ``ms`` timing - ``s`` set """ self.assertIsNotNone(self.statsd) if value: self.assertNotEqual(kind, None) start = time.time() while time.time() < (start + 1): # Note our fake statsd just queues up results in a queue. # We just keep going through them until we find one that # matches, or fail out. If statsd pipelines are used, # large single packets are sent with stats separated by # newlines; thus we first flatten the stats out into # single entries. stats = itertools.chain.from_iterable( [s.decode('utf-8').split('\n') for s in self.statsd.stats] ) for stat in stats: k, v = stat.split(':') if key == k: if kind is None: # key with no qualifiers is found return True s_value, s_kind = v.split('|') # if no kind match, look for other keys if kind != s_kind: continue if value: # special-case value|ms because statsd can turn # timing results into float of indeterminate # length, hence foiling string matching. if kind == 'ms': if float(value) == float(s_value): return True if value == s_value: return True # otherwise keep looking for other matches continue # this key matches return True time.sleep(0.1) raise Exception("Key %s not found in reported stats" % key) def assert_prometheus_stat(self, name, value, labels=None): sample_value = self._registry.get_sample_value(name, labels) self.assertEqual(sample_value, value) def test_list_projects(self): mock_uri = self.get_mock_url( service_type='identity', resource='projects', base_url_append='v3' ) self.register_uris( [ dict( method='GET', uri=mock_uri, status_code=200, json={'projects': []}, ) ] ) self.cloud.list_projects() self.assert_calls() self.assert_reported_stat( 'openstack.api.identity.GET.projects.200', value='1', kind='c' ) self.assert_prometheus_stat( 'openstack_http_requests_total', 1, dict( service_type='identity', endpoint=mock_uri, method='GET', status_code='200', ), ) def test_projects(self): mock_uri = self.get_mock_url( service_type='identity', resource='projects', base_url_append='v3' ) self.register_uris( [ dict( method='GET', uri=mock_uri, status_code=200, json={'projects': []}, ) ] ) list(self.cloud.identity.projects()) self.assert_calls() self.assert_reported_stat( 'openstack.api.identity.GET.projects.200', value='1', kind='c' ) self.assert_prometheus_stat( 'openstack_http_requests_total', 1, dict( service_type='identity', endpoint=mock_uri, method='GET', status_code='200', ), ) def test_servers(self): mock_uri = 'https://compute.example.com/v2.1/servers/detail' self.register_uris( [ self.get_nova_discovery_mock_dict(), dict( method='GET', uri=mock_uri, status_code=200, json={'servers': []}, ), ] ) list(self.cloud.compute.servers()) self.assert_calls() self.assert_reported_stat( 'openstack.api.compute.GET.servers_detail.200', value='1', kind='c' ) self.assert_reported_stat( 'openstack.api.compute.GET.servers_detail.200', value='0', kind='ms', ) self.assert_prometheus_stat( 'openstack_http_requests_total', 1, dict( service_type='compute', endpoint=mock_uri, method='GET', status_code='200', ), ) def test_servers_no_detail(self): mock_uri = 'https://compute.example.com/v2.1/servers' self.register_uris( [ dict( method='GET', uri=mock_uri, status_code=200, json={'servers': []}, ) ] ) self.cloud.compute.get('/servers') self.assert_calls() self.assert_reported_stat( 'openstack.api.compute.GET.servers.200', value='1', kind='c' ) self.assert_reported_stat( 'openstack.api.compute.GET.servers.200', value='0', kind='ms' ) self.assert_reported_stat( 'openstack.api.compute.GET.servers.attempted', value='1', kind='c' ) self.assert_prometheus_stat( 'openstack_http_requests_total', 1, dict( service_type='compute', endpoint=mock_uri, method='GET', status_code='200', ), ) def test_servers_error(self): mock_uri = 'https://compute.example.com/v2.1/servers' self.register_uris( [dict(method='GET', uri=mock_uri, status_code=500, json={})] ) self.cloud.compute.get('/servers') self.assert_calls() self.assert_reported_stat( 'openstack.api.compute.GET.servers.500', value='1', kind='c' ) self.assert_reported_stat( 'openstack.api.compute.GET.servers.500', value='0', kind='ms' ) self.assert_reported_stat( 'openstack.api.compute.GET.servers.attempted', value='1', kind='c' ) self.assert_prometheus_stat( 'openstack_http_requests_total', 1, dict( service_type='compute', endpoint=mock_uri, method='GET', status_code='500', ), ) def test_timeout(self): mock_uri = 'https://compute.example.com/v2.1/servers' self.register_uris( [dict(method='GET', uri=mock_uri, exc=rexceptions.ConnectTimeout)] ) try: self.cloud.compute.get('/servers') except exceptions.ConnectTimeout: pass self.assert_reported_stat( 'openstack.api.compute.GET.servers.failed', value='1', kind='c' ) self.assert_reported_stat( 'openstack.api.compute.GET.servers.attempted', value='1', kind='c' ) class TestNoStats(base.TestCase): def setUp(self): super().setUp() self.statsd = StatsdFixture() self.useFixture(self.statsd) def test_no_stats(self): mock_uri = self.get_mock_url( service_type='identity', resource='projects', base_url_append='v3' ) self.register_uris( [ dict( method='GET', uri=mock_uri, status_code=200, json={'projects': []}, ) ] ) self.cloud.identity._statsd_client = None list(self.cloud.identity.projects()) self.assert_calls() self.assertEqual([], self.statsd.stats) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/test_utils.py0000664000175000017500000003253200000000000023115 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import concurrent.futures import hashlib import logging import sys from unittest import mock import fixtures import os_service_types import testtools import openstack from openstack import exceptions from openstack.tests.unit import base from openstack import utils class Test_enable_logging(base.TestCase): def setUp(self): super().setUp() self.openstack_logger = mock.Mock() self.openstack_logger.handlers = [] self.ksa_logger_root = mock.Mock() self.ksa_logger_root.handlers = [] self.ksa_logger_1 = mock.Mock() self.ksa_logger_1.handlers = [] self.ksa_logger_2 = mock.Mock() self.ksa_logger_2.handlers = [] self.ksa_logger_3 = mock.Mock() self.ksa_logger_3.handlers = [] self.urllib3_logger = mock.Mock() self.urllib3_logger.handlers = [] self.stevedore_logger = mock.Mock() self.stevedore_logger.handlers = [] self.fake_get_logger = mock.Mock() self.fake_get_logger.side_effect = [ self.openstack_logger, self.ksa_logger_root, self.urllib3_logger, self.stevedore_logger, self.ksa_logger_1, self.ksa_logger_2, self.ksa_logger_3, ] self.useFixture( fixtures.MonkeyPatch('logging.getLogger', self.fake_get_logger) ) def _console_tests(self, level, debug, stream): openstack.enable_logging(debug=debug, stream=stream) self.assertEqual(self.openstack_logger.addHandler.call_count, 1) self.openstack_logger.setLevel.assert_called_with(level) def _file_tests(self, level, debug): file_handler = mock.Mock() self.useFixture( fixtures.MonkeyPatch('logging.FileHandler', file_handler) ) fake_path = "fake/path.log" openstack.enable_logging(debug=debug, path=fake_path) file_handler.assert_called_with(fake_path) self.assertEqual(self.openstack_logger.addHandler.call_count, 1) self.openstack_logger.setLevel.assert_called_with(level) def test_none(self): openstack.enable_logging(debug=True) self.fake_get_logger.assert_has_calls([]) self.openstack_logger.setLevel.assert_called_with(logging.DEBUG) self.assertEqual(self.openstack_logger.addHandler.call_count, 1) self.assertIsInstance( self.openstack_logger.addHandler.call_args_list[0][0][0], logging.StreamHandler, ) def test_debug_console_stderr(self): self._console_tests(logging.DEBUG, True, sys.stderr) def test_warning_console_stderr(self): self._console_tests(logging.INFO, False, sys.stderr) def test_debug_console_stdout(self): self._console_tests(logging.DEBUG, True, sys.stdout) def test_warning_console_stdout(self): self._console_tests(logging.INFO, False, sys.stdout) def test_debug_file(self): self._file_tests(logging.DEBUG, True) def test_warning_file(self): self._file_tests(logging.INFO, False) class Test_urljoin(base.TestCase): def test_strings(self): root = "http://www.example.com" leaves = "foo", "bar" result = utils.urljoin(root, *leaves) self.assertEqual(result, "http://www.example.com/foo/bar") def test_with_none(self): root = "http://www.example.com" leaves = "foo", None result = utils.urljoin(root, *leaves) self.assertEqual(result, "http://www.example.com/foo/") def test_unicode_strings(self): root = "http://www.example.com" leaves = "ascii", "extra_chars-™" try: result = utils.urljoin(root, *leaves) except Exception: self.fail("urljoin failed on unicode strings") self.assertEqual(result, "http://www.example.com/ascii/extra_chars-™") class TestSupportsMicroversion(base.TestCase): def setUp(self): super().setUp() self.adapter = mock.Mock(spec=['get_endpoint_data']) self.endpoint_data = mock.Mock( spec=['min_microversion', 'max_microversion'], min_microversion='1.1', max_microversion='1.99', ) self.adapter.get_endpoint_data.return_value = self.endpoint_data def test_requested_supported_no_default(self): self.adapter.default_microversion = None self.assertTrue(utils.supports_microversion(self.adapter, '1.2')) def test_requested_not_supported_no_default(self): self.adapter.default_microversion = None self.assertFalse(utils.supports_microversion(self.adapter, '2.2')) def test_requested_not_supported_no_default_exception(self): self.adapter.default_microversion = None self.assertRaises( exceptions.SDKException, utils.supports_microversion, self.adapter, '2.2', True, ) def test_requested_supported_higher_default(self): self.adapter.default_microversion = '1.8' self.assertTrue(utils.supports_microversion(self.adapter, '1.6')) def test_requested_supported_equal_default(self): self.adapter.default_microversion = '1.8' self.assertTrue(utils.supports_microversion(self.adapter, '1.8')) def test_requested_supported_lower_default(self): self.adapter.default_microversion = '1.2' self.assertFalse(utils.supports_microversion(self.adapter, '1.8')) def test_requested_supported_lower_default_exception(self): self.adapter.default_microversion = '1.2' self.assertRaises( exceptions.SDKException, utils.supports_microversion, self.adapter, '1.8', True, ) @mock.patch('openstack.utils.supports_microversion') def test_require_microversion(self, sm_mock): utils.require_microversion(self.adapter, '1.2') sm_mock.assert_called_with(self.adapter, '1.2', raise_exception=True) class TestMaximumSupportedMicroversion(base.TestCase): def setUp(self): super().setUp() self.adapter = mock.Mock(spec=['get_endpoint_data']) self.endpoint_data = mock.Mock( spec=['min_microversion', 'max_microversion'], min_microversion=None, max_microversion='1.99', ) self.adapter.get_endpoint_data.return_value = self.endpoint_data def test_with_none(self): self.assertIsNone( utils.maximum_supported_microversion(self.adapter, None) ) def test_with_value(self): self.assertEqual( '1.42', utils.maximum_supported_microversion(self.adapter, '1.42') ) def test_value_more_than_max(self): self.assertEqual( '1.99', utils.maximum_supported_microversion(self.adapter, '1.100') ) def test_value_less_than_min(self): self.endpoint_data.min_microversion = '1.42' self.assertIsNone( utils.maximum_supported_microversion(self.adapter, '1.2') ) class TestOsServiceTypesVersion(base.TestCase): def test_ost_version(self): ost_version = '2019-05-01T19:53:21.498745' self.assertEqual( ost_version, os_service_types.ServiceTypes().version, "This project must be pinned to the latest version of " "os-service-types. Please bump requirements.txt and " "lower-constraints.txt accordingly.", ) class TestTinyDAG(base.TestCase): test_graph = { 'a': ['b', 'd', 'f'], 'b': ['c', 'd'], 'c': ['d'], 'd': ['e'], 'e': [], 'f': ['e'], 'g': ['e'], } def _verify_order(self, test_graph, test_list): for k, v in test_graph.items(): for dep in v: self.assertTrue(test_list.index(k) < test_list.index(dep)) def test_from_dict(self): sot = utils.TinyDAG() sot.from_dict(self.test_graph) def test_topological_sort(self): sot = utils.TinyDAG() sot.from_dict(self.test_graph) sorted_list = sot.topological_sort() self._verify_order(sot.graph, sorted_list) self.assertEqual(len(self.test_graph.keys()), len(sorted_list)) def test_walk(self): sot = utils.TinyDAG() sot.from_dict(self.test_graph) sorted_list = [] for node in sot.walk(): sorted_list.append(node) sot.node_done(node) self._verify_order(sot.graph, sorted_list) self.assertEqual(len(self.test_graph.keys()), len(sorted_list)) def test_walk_parallel(self): sot = utils.TinyDAG() sot.from_dict(self.test_graph) sorted_list = [] with concurrent.futures.ThreadPoolExecutor(max_workers=15) as executor: for node in sot.walk(timeout=1): executor.submit(test_walker_fn, sot, node, sorted_list) self._verify_order(sot.graph, sorted_list) self.assertEqual(len(self.test_graph.keys()), len(sorted_list)) def test_walk_raise(self): sot = utils.TinyDAG() sot.from_dict(self.test_graph) bad_node = 'f' with testtools.ExpectedException(exceptions.SDKException): for node in sot.walk(timeout=1): if node != bad_node: sot.node_done(node) def test_add_node_after_edge(self): sot = utils.TinyDAG() sot.add_node('a') sot.add_edge('a', 'b') sot.add_node('a') self.assertEqual(sot._graph['a'], set('b')) def test_walker_fn(graph, node, lst): lst.append(node) graph.node_done(node) class Test_md5(base.TestCase): def setUp(self): super().setUp() self.md5_test_data = b"Openstack forever" try: self.md5_digest = hashlib.md5( # nosec self.md5_test_data ).hexdigest() self.fips_enabled = False except ValueError: self.md5_digest = '0d6dc3c588ae71a04ce9a6beebbbba06' self.fips_enabled = True def test_md5_with_data(self): if not self.fips_enabled: digest = utils.md5(self.md5_test_data).hexdigest() self.assertEqual(digest, self.md5_digest) else: # on a FIPS enabled system, this throws a ValueError: # [digital envelope routines: EVP_DigestInit_ex] disabled for FIPS self.assertRaises(ValueError, utils.md5, self.md5_test_data) if not self.fips_enabled: digest = utils.md5( self.md5_test_data, usedforsecurity=True ).hexdigest() self.assertEqual(digest, self.md5_digest) else: self.assertRaises( ValueError, utils.md5, self.md5_test_data, usedforsecurity=True ) digest = utils.md5( self.md5_test_data, usedforsecurity=False ).hexdigest() self.assertEqual(digest, self.md5_digest) def test_md5_without_data(self): if not self.fips_enabled: test_md5 = utils.md5() test_md5.update(self.md5_test_data) digest = test_md5.hexdigest() self.assertEqual(digest, self.md5_digest) else: self.assertRaises(ValueError, utils.md5) if not self.fips_enabled: test_md5 = utils.md5(usedforsecurity=True) test_md5.update(self.md5_test_data) digest = test_md5.hexdigest() self.assertEqual(digest, self.md5_digest) else: self.assertRaises(ValueError, utils.md5, usedforsecurity=True) test_md5 = utils.md5(usedforsecurity=False) test_md5.update(self.md5_test_data) digest = test_md5.hexdigest() self.assertEqual(digest, self.md5_digest) def test_string_data_raises_type_error(self): if not self.fips_enabled: self.assertRaises(TypeError, hashlib.md5, 'foo') self.assertRaises(TypeError, utils.md5, 'foo') self.assertRaises( TypeError, utils.md5, 'foo', usedforsecurity=True ) else: self.assertRaises(ValueError, hashlib.md5, 'foo') self.assertRaises(ValueError, utils.md5, 'foo') self.assertRaises( ValueError, utils.md5, 'foo', usedforsecurity=True ) self.assertRaises(TypeError, utils.md5, 'foo', usedforsecurity=False) def test_none_data_raises_type_error(self): if not self.fips_enabled: self.assertRaises(TypeError, hashlib.md5, None) self.assertRaises(TypeError, utils.md5, None) self.assertRaises(TypeError, utils.md5, None, usedforsecurity=True) else: self.assertRaises(ValueError, hashlib.md5, None) self.assertRaises(ValueError, utils.md5, None) self.assertRaises( ValueError, utils.md5, None, usedforsecurity=True ) self.assertRaises(TypeError, utils.md5, None, usedforsecurity=False) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.5054388 openstacksdk-4.0.0/openstack/tests/unit/workflow/0000775000175000017500000000000000000000000022211 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/workflow/__init__.py0000664000175000017500000000000000000000000024310 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/workflow/test_cron_trigger.py0000664000175000017500000000645500000000000026320 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.unit import base from openstack.workflow.v2 import cron_trigger FAKE_INPUT = { 'cluster_id': '8c74607c-5a74-4490-9414-a3475b1926c2', 'node_id': 'fba2cc5d-706f-4631-9577-3956048d13a2', 'flavor_id': '1', } FAKE_PARAMS = {} FAKE = { 'id': 'ffaed25e-46f5-4089-8e20-b3b4722fd597', 'pattern': '0 * * * *', 'remaining_executions': 14, 'first_execution_time': '1970-01-01T01:00:00.000000', 'next_execution_time': '1970-01-01T02:00:00.000000', 'workflow_name': 'cluster-coldmigration', 'workflow_id': '1995cf40-c22d-4968-b6e8-558942830642', 'workflow_input': FAKE_INPUT, 'workflow_params': FAKE_PARAMS, } class TestCronTrigger(base.TestCase): def test_basic(self): sot = cron_trigger.CronTrigger() self.assertEqual('cron_trigger', sot.resource_key) self.assertEqual('cron_triggers', sot.resources_key) self.assertEqual('/cron_triggers', sot.base_path) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_delete) self.assertDictEqual( { 'marker': 'marker', 'limit': 'limit', 'sort_keys': 'sort_keys', 'sort_dirs': 'sort_dirs', 'fields': 'fields', 'name': 'name', 'workflow_name': 'workflow_name', 'workflow_id': 'workflow_id', 'workflow_input': 'workflow_input', 'workflow_params': 'workflow_params', 'scope': 'scope', 'pattern': 'pattern', 'remaining_executions': 'remaining_executions', 'project_id': 'project_id', 'first_execution_time': 'first_execution_time', 'next_execution_time': 'next_execution_time', 'created_at': 'created_at', 'updated_at': 'updated_at', 'all_projects': 'all_projects', }, sot._query_mapping._mapping, ) def test_make_it(self): sot = cron_trigger.CronTrigger(**FAKE) self.assertEqual(FAKE['id'], sot.id) self.assertEqual(FAKE['pattern'], sot.pattern) self.assertEqual( FAKE['remaining_executions'], sot.remaining_executions ) self.assertEqual( FAKE['first_execution_time'], sot.first_execution_time ) self.assertEqual(FAKE['next_execution_time'], sot.next_execution_time) self.assertEqual(FAKE['workflow_name'], sot.workflow_name) self.assertEqual(FAKE['workflow_id'], sot.workflow_id) self.assertEqual(FAKE['workflow_input'], sot.workflow_input) self.assertEqual(FAKE['workflow_params'], sot.workflow_params) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/workflow/test_execution.py0000664000175000017500000000314100000000000025624 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.unit import base from openstack.workflow.v2 import execution FAKE_INPUT = { 'cluster_id': '8c74607c-5a74-4490-9414-a3475b1926c2', 'node_id': 'fba2cc5d-706f-4631-9577-3956048d13a2', 'flavor_id': '1', } FAKE = { 'id': 'ffaed25e-46f5-4089-8e20-b3b4722fd597', 'workflow_name': 'cluster-coldmigration', 'input': FAKE_INPUT, } class TestExecution(base.TestCase): def setUp(self): super().setUp() def test_basic(self): sot = execution.Execution() self.assertEqual('execution', sot.resource_key) self.assertEqual('executions', sot.resources_key) self.assertEqual('/executions', sot.base_path) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_delete) def test_instantiate(self): sot = execution.Execution(**FAKE) self.assertEqual(FAKE['id'], sot.id) self.assertEqual(FAKE['workflow_name'], sot.workflow_name) self.assertEqual(FAKE['input'], sot.input) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/workflow/test_version.py0000664000175000017500000000260400000000000025311 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.unit import base from openstack.workflow import version IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'id': IDENTIFIER, 'links': '2', 'status': '3', } class TestVersion(base.TestCase): def test_basic(self): sot = version.Version() self.assertEqual('version', sot.resource_key) self.assertEqual('versions', sot.resources_key) self.assertEqual('/', sot.base_path) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_fetch) self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = version.Version(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['status'], sot.status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/workflow/test_workflow.py0000664000175000017500000000272600000000000025503 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.unit import base from openstack.workflow.v2 import workflow FAKE = { 'scope': 'private', 'id': 'ffaed25e-46f5-4089-8e20-b3b4722fd597', 'definition': 'workflow_def', } class TestWorkflow(base.TestCase): def setUp(self): super().setUp() def test_basic(self): sot = workflow.Workflow() self.assertEqual('workflow', sot.resource_key) self.assertEqual('workflows', sot.resources_key) self.assertEqual('/workflows', sot.base_path) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) def test_instantiate(self): sot = workflow.Workflow(**FAKE) self.assertEqual(FAKE['id'], sot.id) self.assertEqual(FAKE['scope'], sot.scope) self.assertEqual(FAKE['definition'], sot.definition) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.5054388 openstacksdk-4.0.0/openstack/tests/unit/workflow/v2/0000775000175000017500000000000000000000000022540 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/workflow/v2/__init__.py0000664000175000017500000000000000000000000024637 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/tests/unit/workflow/v2/test_proxy.py0000664000175000017500000000572700000000000025345 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.unit import test_proxy_base from openstack.workflow.v2 import _proxy from openstack.workflow.v2 import cron_trigger from openstack.workflow.v2 import execution from openstack.workflow.v2 import workflow class TestWorkflowProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) def test_workflows(self): self.verify_list(self.proxy.workflows, workflow.Workflow) def test_executions(self): self.verify_list(self.proxy.executions, execution.Execution) def test_workflow_get(self): self.verify_get(self.proxy.get_workflow, workflow.Workflow) def test_execution_get(self): self.verify_get(self.proxy.get_execution, execution.Execution) def test_workflow_create(self): self.verify_create(self.proxy.create_workflow, workflow.Workflow) def test_workflow_update(self): self.verify_update(self.proxy.update_workflow, workflow.Workflow) def test_execution_create(self): self.verify_create(self.proxy.create_execution, execution.Execution) def test_workflow_delete(self): self.verify_delete(self.proxy.delete_workflow, workflow.Workflow, True) def test_execution_delete(self): self.verify_delete( self.proxy.delete_execution, execution.Execution, True ) def test_workflow_find(self): self.verify_find(self.proxy.find_workflow, workflow.Workflow) def test_execution_find(self): self.verify_find(self.proxy.find_execution, execution.Execution) class TestCronTriggerProxy(test_proxy_base.TestProxyBase): def setUp(self): super().setUp() self.proxy = _proxy.Proxy(self.session) def test_cron_triggers(self): self.verify_list(self.proxy.cron_triggers, cron_trigger.CronTrigger) def test_cron_trigger_get(self): self.verify_get(self.proxy.get_cron_trigger, cron_trigger.CronTrigger) def test_cron_trigger_create(self): self.verify_create( self.proxy.create_cron_trigger, cron_trigger.CronTrigger ) def test_cron_trigger_delete(self): self.verify_delete( self.proxy.delete_cron_trigger, cron_trigger.CronTrigger, True ) def test_cron_trigger_find(self): self.verify_find( self.proxy.find_cron_trigger, cron_trigger.CronTrigger, expected_kwargs={'all_projects': False}, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/utils.py0000664000175000017500000005053200000000000017735 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections.abc import Mapping import hashlib import queue import string import threading import time import typing as ty import keystoneauth1 from keystoneauth1 import adapter as ks_adapter from keystoneauth1 import discover from openstack import _log from openstack import exceptions def urljoin(*args): """A custom version of urljoin that simply joins strings into a path. The real urljoin takes into account web semantics like when joining a url like /path this should be joined to http://host/path as it is an anchored link. We generally won't care about that in client. """ return '/'.join(str(a or '').strip('/') for a in args) def iterate_timeout(timeout, message, wait=2): """Iterate and raise an exception on timeout. This is a generator that will continually yield and sleep for wait seconds, and if the timeout is reached, will raise an exception with . """ log = _log.setup_logging('openstack.iterate_timeout') try: # None as a wait winds up flowing well in the per-resource cache # flow. We could spread this logic around to all of the calling # points, but just having this treat None as "I don't have a value" # seems friendlier if wait is None: wait = 2 elif wait == 0: # wait should be < timeout, unless timeout is None wait = 0.1 if timeout is None else min(0.1, timeout) wait = float(wait) except ValueError: raise exceptions.SDKException( "Wait value must be an int or float value. {wait} given" " instead".format(wait=wait) ) start = time.time() count = 0 while (timeout is None) or (time.time() < start + timeout): count += 1 yield count log.debug('Waiting %s seconds', wait) time.sleep(wait) raise exceptions.ResourceTimeout(message) def get_string_format_keys(fmt_string, old_style=True): """Gets a list of required keys from a format string Required mostly for parsing base_path urls for required keys, which use the old style string formatting. """ if old_style: class AccessSaver: def __init__(self): self.keys = [] def __getitem__(self, key): self.keys.append(key) a = AccessSaver() fmt_string % a return a.keys else: keys = [] for t in string.Formatter().parse(fmt_string): if t[1] is not None: keys.append(t[1]) return keys def supports_version( adapter: ks_adapter.Adapter, version: str, raise_exception: bool = False, ) -> bool: """Determine if the given adapter supports the given version. Checks the version asserted by the service and ensures this matches the provided version. ``version`` can be a major version or a major-minor version :param adapter: :class:`~keystoneauth1.adapter.Adapter` instance. :param version: String containing the desired version. :param raise_exception: Raise exception when requested version is not supported by the server. :returns: ``True`` if the service supports the version, else ``False``. :raises: :class:`~openstack.exceptions.SDKException` when ``raise_exception`` is ``True`` and requested version is not supported. """ required = discover.normalize_version_number(version) if discover.version_match(required, adapter.get_api_major_version()): return True if raise_exception: raise exceptions.SDKException( f'Required version {version} is not supported by the server' ) return False def supports_microversion(adapter, microversion, raise_exception=False): """Determine if the given adapter supports the given microversion. Checks the min and max microversion asserted by the service and ensures ``min <= microversion <= max``. If set, the current default microversion is taken into consideration to ensure ``microversion <= default``. :param adapter: :class:`~keystoneauth1.adapter.Adapter` instance. :param str microversion: String containing the desired microversion. :param bool raise_exception: Raise exception when requested microversion is not supported by the server or is higher than the current default microversion. :returns: True if the service supports the microversion, else False. :rtype: bool :raises: :class:`~openstack.exceptions.SDKException` when ``raise_exception`` is ``True`` and requested microversion is not supported. """ endpoint_data = adapter.get_endpoint_data() if ( endpoint_data.min_microversion and endpoint_data.max_microversion and discover.version_between( endpoint_data.min_microversion, endpoint_data.max_microversion, microversion, ) ): if adapter.default_microversion is not None: # If default_microversion is set - evaluate # whether it match the expectation candidate = discover.normalize_version_number( adapter.default_microversion ) required = discover.normalize_version_number(microversion) supports = discover.version_match(required, candidate) if raise_exception and not supports: raise exceptions.SDKException( 'Required microversion {ver} is higher than currently ' 'selected {curr}'.format( ver=microversion, curr=adapter.default_microversion ) ) return supports return True if raise_exception: raise exceptions.SDKException( 'Required microversion {ver} is not supported ' 'by the server side'.format(ver=microversion) ) return False def require_microversion(adapter, required): """Require microversion. :param adapter: :class:`~keystoneauth1.adapter.Adapter` instance. :param str microversion: String containing the desired microversion. :raises: :class:`~openstack.exceptions.SDKException` when requested microversion is not supported """ supports_microversion(adapter, required, raise_exception=True) def pick_microversion(session, required): """Get a new microversion if it is higher than session's default. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param required: Minimum version that is required for an action. :type required: String or tuple or None. :return: ``required`` as a string if the ``session``'s default is too low, otherwise the ``session``'s default. Returns ``None`` if both are ``None``. :raises: TypeError if ``required`` is invalid. :raises: :class:`~openstack.exceptions.SDKException` if requested microversion is not supported. """ if required is not None: required = discover.normalize_version_number(required) if session.default_microversion is not None: default = discover.normalize_version_number( session.default_microversion ) if required is None: required = default else: required = ( default if discover.version_match(required, default) else required ) if required is not None: if not supports_microversion(session, required): raise exceptions.SDKException( 'Requested microversion is not supported by the server side ' 'or the default microversion is too low' ) return discover.version_to_string(required) def maximum_supported_microversion(adapter, client_maximum): """Determine the maximum microversion supported by both client and server. :param adapter: :class:`~keystoneauth1.adapter.Adapter` instance. :param client_maximum: Maximum microversion supported by the client. If ``None``, ``None`` is returned. :returns: the maximum supported microversion as string or ``None``. """ if client_maximum is None: return None # NOTE(dtantsur): if we cannot determine supported microversions, fall back # to the default one. try: endpoint_data = adapter.get_endpoint_data() except keystoneauth1.exceptions.discovery.DiscoveryFailure: endpoint_data = None if endpoint_data is None: log = _log.setup_logging('openstack') log.warning( 'Cannot determine endpoint data for service %s', adapter.service_type or adapter.service_name, ) return None if not endpoint_data.max_microversion: return None client_max = discover.normalize_version_number(client_maximum) server_max = discover.normalize_version_number( endpoint_data.max_microversion ) if endpoint_data.min_microversion: server_min = discover.normalize_version_number( endpoint_data.min_microversion ) if client_max < server_min: # NOTE(dtantsur): we may want to raise in this case, but this keeps # the current behavior intact. return None result = min(client_max, server_max) return discover.version_to_string(result) def _hashes_up_to_date(md5, sha256, md5_key, sha256_key): '''Compare md5 and sha256 hashes for being up to date md5 and sha256 are the current values. md5_key and sha256_key are the previous values. ''' up_to_date = False if md5 and md5_key == md5: up_to_date = True if sha256 and sha256_key == sha256: up_to_date = True if md5 and md5_key != md5: up_to_date = False if sha256 and sha256_key != sha256: up_to_date = False return up_to_date try: _test_md5 = hashlib.md5(usedforsecurity=False) # nosec # Python distributions that support a hashlib.md5 with the usedforsecurity # keyword can just use that md5 definition as-is # See https://bugs.python.org/issue9216 # # TODO(alee) Remove this wrapper when the minimum python version is bumped # to 3.9 (which is the first upstream version to support this keyword) # See https://docs.python.org/3.9/library/hashlib.html md5 = hashlib.md5 except TypeError: def md5(string=b'', usedforsecurity=True): """Return an md5 hashlib object without usedforsecurity parameter For python distributions that do not yet support this keyword parameter, we drop the parameter """ return hashlib.md5(string) # nosec def _calculate_data_hashes(data): _md5 = md5(usedforsecurity=False) _sha256 = hashlib.sha256() if hasattr(data, 'read'): for chunk in iter(lambda: data.read(8192), b''): _md5.update(chunk) _sha256.update(chunk) else: _md5.update(data) _sha256.update(data) return (_md5.hexdigest(), _sha256.hexdigest()) def _get_file_hashes(filename): (_md5, _sha256) = (None, None) with open(filename, 'rb') as file_obj: (_md5, _sha256) = _calculate_data_hashes(file_obj) return (_md5, _sha256) class TinyDAG: """Tiny DAG Bases on the Kahn's algorithm, and enables parallel visiting of the nodes (parallel execution of the workflow items). """ def __init__(self, data=None): self._reset() self._lock = threading.Lock() if data and isinstance(data, dict): self.from_dict(data) def _reset(self): self._graph = dict() self._wait_timeout = 120 @property def graph(self): """Get graph as adjacency dict""" return self._graph def add_node(self, node): self._graph.setdefault(node, set()) def add_edge(self, u, v): self._graph[u].add(v) def from_dict(self, data): self._reset() for k, v in data.items(): self.add_node(k) for dep in v: self.add_edge(k, dep) def walk(self, timeout=None): """Start the walking from the beginning.""" if timeout: self._wait_timeout = timeout return self def __iter__(self): self._start_traverse() return self def __next__(self): # Start waiting if it is expected to get something # (counting down from graph length to 0). if self._it_cnt > 0: self._it_cnt -= 1 try: res = self._queue.get(block=True, timeout=self._wait_timeout) return res except queue.Empty: raise exceptions.SDKException( 'Timeout waiting for cleanup task to complete' ) else: raise StopIteration def node_done(self, node): """Mark node as "processed" and put following items into the queue""" self._done.add(node) for v in self._graph[node]: self._run_in_degree[v] -= 1 if self._run_in_degree[v] == 0: self._queue.put(v) def _start_traverse(self): """Initialize graph traversing""" self._run_in_degree = self._get_in_degree() self._queue: queue.Queue[str] = queue.Queue() self._done = set() self._it_cnt = len(self._graph) for k, v in self._run_in_degree.items(): if v == 0: self._queue.put(k) def _get_in_degree(self): """Calculate the in_degree (count incoming) for nodes""" _in_degree: ty.Dict[str, int] = {u: 0 for u in self._graph.keys()} for u in self._graph: for v in self._graph[u]: _in_degree[v] += 1 return _in_degree def topological_sort(self): """Return the graph nodes in the topological order""" result = [] for node in self: result.append(node) self.node_done(node) return result def size(self): return len(self._graph.keys()) def is_complete(self): return len(self._done) == self.size() # Importing Munch is a relatively expensive operation (0.3s) while we do not # really even need much of it. Before we can rework all places where we rely on # it we can have a reduced version. class Munch(dict): """A slightly stripped version of munch.Munch class""" def __init__(self, *args, **kwargs): self.update(*args, **kwargs) # only called if k not found in normal places def __getattr__(self, k): """Gets key if it exists, otherwise throws AttributeError.""" try: return object.__getattribute__(self, k) except AttributeError: try: return self[k] except KeyError: raise AttributeError(k) def __setattr__(self, k, v): """Sets attribute k if it exists, otherwise sets key k. A KeyError raised by set-item (only likely if you subclass Munch) will propagate as an AttributeError instead. """ try: # Throws exception if not in prototype chain object.__getattribute__(self, k) except AttributeError: try: self[k] = v except Exception: raise AttributeError(k) else: object.__setattr__(self, k, v) def __delattr__(self, k): """Deletes attribute k if it exists, otherwise deletes key k. A KeyError raised by deleting the key - such as when the key is missing - will propagate as an AttributeError instead. """ try: # Throws exception if not in prototype chain object.__getattribute__(self, k) except AttributeError: try: del self[k] except KeyError: raise AttributeError(k) else: object.__delattr__(self, k) def toDict(self): """Recursively converts a munch back into a dictionary.""" return unmunchify(self) @property def __dict__(self): return self.toDict() def __repr__(self): """Invertible* string-form of a Munch.""" return f'{self.__class__.__name__}({dict.__repr__(self)})' def __dir__(self): return list(self.keys()) def __getstate__(self): """Implement a serializable interface used for pickling. See https://docs.python.org/3.6/library/pickle.html. """ return {k: v for k, v in self.items()} def __setstate__(self, state): """Implement a serializable interface used for pickling. See https://docs.python.org/3.6/library/pickle.html. """ self.clear() self.update(state) @classmethod def fromDict(cls, d): """Recursively transforms a dictionary into a Munch via copy.""" return munchify(d, cls) def copy(self): return type(self).fromDict(self) def update(self, *args, **kwargs): """ Override built-in method to call custom __setitem__ method that may be defined in subclasses. """ for k, v in dict(*args, **kwargs).items(): self[k] = v def get(self, k, d=None): """ D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None. """ if k not in self: return d return self[k] def setdefault(self, k, d=None): """ D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D """ if k not in self: self[k] = d return self[k] def munchify(x, factory=Munch): """Recursively transforms a dictionary into a Munch via copy.""" # Munchify x, using `seen` to track object cycles seen: ty.Dict[int, ty.Any] = dict() def munchify_cycles(obj): try: return seen[id(obj)] except KeyError: pass seen[id(obj)] = partial = pre_munchify(obj) return post_munchify(partial, obj) def pre_munchify(obj): if isinstance(obj, Mapping): return factory({}) elif isinstance(obj, list): return type(obj)() elif isinstance(obj, tuple): type_factory = getattr(obj, "_make", type(obj)) return type_factory(munchify_cycles(item) for item in obj) else: return obj def post_munchify(partial, obj): if isinstance(obj, Mapping): partial.update((k, munchify_cycles(obj[k])) for k in obj.keys()) elif isinstance(obj, list): partial.extend(munchify_cycles(item) for item in obj) elif isinstance(obj, tuple): for item_partial, item in zip(partial, obj): post_munchify(item_partial, item) return partial return munchify_cycles(x) def unmunchify(x): """Recursively converts a Munch into a dictionary.""" # Munchify x, using `seen` to track object cycles seen: ty.Dict[int, ty.Any] = dict() def unmunchify_cycles(obj): try: return seen[id(obj)] except KeyError: pass seen[id(obj)] = partial = pre_unmunchify(obj) return post_unmunchify(partial, obj) def pre_unmunchify(obj): if isinstance(obj, Mapping): return dict() elif isinstance(obj, list): return type(obj)() elif isinstance(obj, tuple): type_factory = getattr(obj, "_make", type(obj)) return type_factory(unmunchify_cycles(item) for item in obj) else: return obj def post_unmunchify(partial, obj): if isinstance(obj, Mapping): partial.update((k, unmunchify_cycles(obj[k])) for k in obj.keys()) elif isinstance(obj, list): partial.extend(unmunchify_cycles(v) for v in obj) elif isinstance(obj, tuple): for value_partial, value in zip(partial, obj): post_unmunchify(value_partial, value) return partial return unmunchify_cycles(x) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/version.py0000664000175000017500000000120100000000000020247 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version __version__ = pbr.version.VersionInfo('openstacksdk').version_string() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/warnings.py0000664000175000017500000000401500000000000020420 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # API deprecation warnings # # These are for service-related deprecations, such as the removal of an API or # API field due to a microversion. class OpenStackDeprecationWarning(DeprecationWarning): """Base class for warnings about deprecated features in openstacksdk.""" class RemovedResourceWarning(OpenStackDeprecationWarning): """Indicates that a resource has been removed in newer API versions and should not be used. """ class RemovedFieldWarning(OpenStackDeprecationWarning): """Indicates that a field has been removed in newer API versions and should not be used. """ class LegacyAPIWarning(OpenStackDeprecationWarning): """Indicates an API that is in 'legacy' status, a long term deprecation.""" # Package deprecation warnings # # These are for SDK-specific deprecations, such as removed functions or # function parameters. class RemovedInSDK40Warning(DeprecationWarning): """Indicates an argument that is deprecated for removal in SDK 4.0.""" class RemovedInSDK50Warning(PendingDeprecationWarning): """Indicates an argument that is deprecated for removal in SDK 5.0.""" # General warnings # # These are usually related to misconfigurations. class OpenStackWarning(Warning): """Base class for general warnings in openstacksdk.""" class ConfigurationWarning(OpenStackWarning): """Indicates an issue with configuration.""" class UnsupportedServiceVersion(OpenStackWarning): """Indicates a major version that SDK doesn't understand.""" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.5054388 openstacksdk-4.0.0/openstack/workflow/0000775000175000017500000000000000000000000020070 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/workflow/__init__.py0000664000175000017500000000000000000000000022167 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.5054388 openstacksdk-4.0.0/openstack/workflow/v2/0000775000175000017500000000000000000000000020417 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/workflow/v2/__init__.py0000664000175000017500000000000000000000000022516 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/workflow/v2/_proxy.py0000664000175000017500000002757600000000000022332 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import proxy from openstack.workflow.v2 import cron_trigger as _cron_trigger from openstack.workflow.v2 import execution as _execution from openstack.workflow.v2 import workflow as _workflow class Proxy(proxy.Proxy): _resource_registry = { "execution": _execution.Execution, "workflow": _workflow.Workflow, } def create_workflow(self, **attrs): """Create a new workflow from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.workflow.v2.workflow.Workflow`, comprised of the properties on the Workflow class. :returns: The results of workflow creation :rtype: :class:`~openstack.workflow.v2.workflow.Workflow` """ return self._create(_workflow.Workflow, **attrs) def update_workflow(self, workflow, **attrs): """Update workflow from attributes :param workflow: The value can be either the name of a workflow or a :class:`~openstack.workflow.v2.workflow.Workflow` instance. :param dict attrs: Keyword arguments which will be used to update a :class:`~openstack.workflow.v2.workflow.Workflow`, comprised of the properties on the Workflow class. :returns: The results of workflow update :rtype: :class:`~openstack.workflow.v2.workflow.Workflow` """ return self._update(_workflow.Workflow, workflow, **attrs) def get_workflow(self, *attrs): """Get a workflow :param workflow: The value can be the name of a workflow or :class:`~openstack.workflow.v2.workflow.Workflow` instance. :returns: One :class:`~openstack.workflow.v2.workflow.Workflow` :raises: :class:`~openstack.exceptions.NotFoundException` when no workflow matching the name could be found. """ return self._get(_workflow.Workflow, *attrs) def workflows(self, **query): """Retrieve a generator of workflows :param kwargs query: Optional query parameters to be sent to restrict the workflows to be returned. Available parameters include: * limit: Requests at most the specified number of items be returned from the query. * marker: Specifies the ID of the last-seen workflow. Use the limit parameter to make an initial limited request and use the ID of the last-seen workflow from the response as the marker parameter value in a subsequent limited request. :returns: A generator of workflow instances. """ return self._list(_workflow.Workflow, **query) def delete_workflow(self, value, ignore_missing=True): """Delete a workflow :param value: The value can be either the name of a workflow or a :class:`~openstack.workflow.v2.workflow.Workflow` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the workflow does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent workflow. :returns: ``None`` """ return self._delete( _workflow.Workflow, value, ignore_missing=ignore_missing ) def find_workflow(self, name_or_id, ignore_missing=True): """Find a single workflow :param name_or_id: The name or ID of an workflow. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.compute.v2.workflow.Extension` or None """ return self._find( _workflow.Workflow, name_or_id, ignore_missing=ignore_missing ) def create_execution(self, **attrs): """Create a new execution from attributes :param workflow_name: The name of target workflow to execute. :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.workflow.v2.execution.Execution`, comprised of the properties on the Execution class. :returns: The results of execution creation :rtype: :class:`~openstack.workflow.v2.execution.Execution` """ return self._create(_execution.Execution, **attrs) def get_execution(self, *attrs): """Get a execution :param workflow_name: The name of target workflow to execute. :param execution: The value can be either the ID of a execution or a :class:`~openstack.workflow.v2.execution.Execution` instance. :returns: One :class:`~openstack.workflow.v2.execution.Execution` :raises: :class:`~openstack.exceptions.NotFoundException` when no execution matching the criteria could be found. """ return self._get(_execution.Execution, *attrs) def executions(self, **query): """Retrieve a generator of executions :param kwargs query: Optional query parameters to be sent to restrict the executions to be returned. Available parameters include: * limit: Requests at most the specified number of items be returned from the query. * marker: Specifies the ID of the last-seen execution. Use the limit parameter to make an initial limited request and use the ID of the last-seen execution from the response as the marker parameter value in a subsequent limited request. :returns: A generator of execution instances. """ return self._list(_execution.Execution, **query) def delete_execution(self, value, ignore_missing=True): """Delete an execution :param value: The value can be either the name of a execution or a :class:`~openstack.workflow.v2.execute.Execution` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the execution does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent execution. :returns: ``None`` """ return self._delete( _execution.Execution, value, ignore_missing=ignore_missing ) def find_execution(self, name_or_id, ignore_missing=True): """Find a single execution :param name_or_id: The name or ID of an execution. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~openstack.compute.v2.execution.Execution` or None """ return self._find( _execution.Execution, name_or_id, ignore_missing=ignore_missing ) def create_cron_trigger(self, **attrs): """Create a new cron trigger from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~openstack.workflow.v2.cron_trigger.CronTrigger`, comprised of the properties on the CronTrigger class. :returns: The results of cron trigger creation :rtype: :class:`~openstack.workflow.v2.cron_trigger.CronTrigger` """ return self._create(_cron_trigger.CronTrigger, **attrs) def get_cron_trigger(self, cron_trigger): """Get a cron trigger :param cron_trigger: The value can be the name of a cron_trigger or :class:`~openstack.workflow.v2.cron_trigger.CronTrigger` instance. :returns: One :class:`~openstack.workflow.v2.cron_trigger.CronTrigger` :raises: :class:`~openstack.exceptions.NotFoundException` when no cron triggers matching the criteria could be found. """ return self._get(_cron_trigger.CronTrigger, cron_trigger) def cron_triggers(self, *, all_projects=False, **query): """Retrieve a generator of cron triggers :param bool all_projects: When set to ``True``, list cron triggers from all projects. Admin-only by default. :param kwargs query: Optional query parameters to be sent to restrict the cron triggers to be returned. Available parameters include: * limit: Requests at most the specified number of items be returned from the query. * marker: Specifies the ID of the last-seen cron trigger. Use the limit parameter to make an initial limited request and use the ID of the last-seen cron trigger from the response as the marker parameter value in a subsequent limited request. :returns: A generator of CronTrigger instances. """ if all_projects: query['all_projects'] = True return self._list(_cron_trigger.CronTrigger, **query) def delete_cron_trigger(self, value, ignore_missing=True): """Delete a cron trigger :param value: The value can be either the name of a cron trigger or a :class:`~openstack.workflow.v2.cron_trigger.CronTrigger` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the cron trigger does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent cron trigger. :returns: ``None`` """ return self._delete( _cron_trigger.CronTrigger, value, ignore_missing=ignore_missing ) # TODO(stephenfin): Drop 'query' parameter or apply it consistently def find_cron_trigger( self, name_or_id, ignore_missing=True, *, all_projects=False, **query, ): """Find a single cron trigger :param name_or_id: The name or ID of a cron trigger. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param bool all_projects: When set to ``True``, search for cron triggers by name across all projects. Note that this will likely result in a higher chance of duplicates. :param kwargs query: Optional query parameters to be sent to limit the cron triggers being returned. :returns: One :class:`~openstack.compute.v2.cron_trigger.CronTrigger` or None :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple resources are found. """ return self._find( _cron_trigger.CronTrigger, name_or_id, ignore_missing=ignore_missing, all_projects=all_projects, **query, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/workflow/v2/cron_trigger.py0000664000175000017500000000466000000000000023463 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class CronTrigger(resource.Resource): resource_key = 'cron_trigger' resources_key = 'cron_triggers' base_path = '/cron_triggers' # capabilities allow_create = True allow_list = True allow_fetch = True allow_delete = True _query_mapping = resource.QueryParameters( 'marker', 'limit', 'sort_keys', 'sort_dirs', 'fields', 'name', 'workflow_name', 'workflow_id', 'workflow_input', 'workflow_params', 'scope', 'pattern', 'remaining_executions', 'project_id', 'first_execution_time', 'next_execution_time', 'created_at', 'updated_at', 'all_projects', ) #: The name of this Cron Trigger name = resource.Body("name") #: The pattern for this Cron Trigger pattern = resource.Body("pattern") #: Count of remaining exectuions remaining_executions = resource.Body("remaining_executions") #: Time of the first execution first_execution_time = resource.Body("first_execution_time") #: Time of the next execution next_execution_time = resource.Body("next_execution_time") #: Workflow name workflow_name = resource.Body("workflow_name") #: Workflow ID workflow_id = resource.Body("workflow_id") #: The inputs for Workflow workflow_input = resource.Body("workflow_input") #: Workflow params workflow_params = resource.Body("workflow_params") #: The ID of the associated project project_id = resource.Body("project_id") #: The time at which the cron trigger was created created_at = resource.Body("created_at") #: The time at which the cron trigger was created updated_at = resource.Body("updated_at") def create(self, session, base_path=None): return super().create(session, prepend_key=False, base_path=base_path) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/workflow/v2/execution.py0000664000175000017500000000473300000000000023003 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Execution(resource.Resource): resource_key = 'execution' resources_key = 'executions' base_path = '/executions' # capabilities allow_create = True allow_list = True allow_fetch = True allow_delete = True _query_mapping = resource.QueryParameters( 'marker', 'limit', 'sort_keys', 'sort_dirs', 'fields', 'params', 'include_output', ) #: The name of the workflow workflow_name = resource.Body("workflow_name") #: The ID of the workflow workflow_id = resource.Body("workflow_id") #: A description of the workflow execution description = resource.Body("description") #: A reference to the parent task execution task_execution_id = resource.Body("task_execution_id") #: Status can be one of: IDLE, RUNNING, SUCCESS, ERROR, or PAUSED status = resource.Body("state") #: An optional information string about the status status_info = resource.Body("state_info") #: A JSON structure containing workflow input values # TODO(briancurtin): type=dict input = resource.Body("input") #: An optional JSON structure containing workflow type specific parameters params = resource.Body("params") #: The output of the workflow output = resource.Body("output") #: The time at which the Execution was created created_at = resource.Body("created_at") #: The time at which the Execution was updated updated_at = resource.Body("updated_at") def create(self, session, prepend_key=True, base_path=None): request = self._prepare_request( requires_id=False, prepend_key=prepend_key, base_path=base_path ) request_body = request.body["execution"] response = session.post( request.url, json=request_body, headers=request.headers ) self._translate_response(response, has_body=True) return self ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/workflow/v2/workflow.py0000664000175000017500000000552300000000000022650 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Workflow(resource.Resource): resource_key = 'workflow' resources_key = 'workflows' base_path = '/workflows' # capabilities allow_create = True allow_commit = True allow_list = True allow_fetch = True allow_delete = True _query_mapping = resource.QueryParameters( 'marker', 'limit', 'sort_keys', 'sort_dirs', 'fields' ) #: The name of this Workflow name = resource.Body("name") #: The inputs for this Workflow input = resource.Body("input") #: A Workflow definition using the Mistral v2 DSL definition = resource.Body("definition") #: A list of values associated with a workflow that users can use #: to group workflows by some criteria # TODO(briancurtin): type=list tags = resource.Body("tags") #: Can be either "private" or "public" scope = resource.Body("scope") #: The ID of the associated project project_id = resource.Body("project_id") #: The time at which the workflow was created created_at = resource.Body("created_at") #: The time at which the workflow was created updated_at = resource.Body("updated_at") def _request_kwargs(self, prepend_key=True, base_path=None): request = self._prepare_request( requires_id=False, prepend_key=prepend_key, base_path=base_path ) headers = {"Content-Type": 'text/plain'} kwargs = { "data": self.definition, } scope = "?scope=%s" % self.scope uri = request.url + scope request.headers.update(headers) return dict(url=uri, json=None, headers=request.headers, **kwargs) def create(self, session, prepend_key=True, base_path=None): kwargs = self._request_kwargs( prepend_key=prepend_key, base_path=base_path ) response = session.post(**kwargs) self._translate_response(response, has_body=False) return self def update(self, session, prepend_key=True, base_path=None): kwargs = self._request_kwargs( prepend_key=prepend_key, base_path=base_path ) response = session.put(**kwargs) self._translate_response(response, has_body=False) return self def commit(self, *args, **kwargs): return self.update(*args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/workflow/version.py0000664000175000017500000000147500000000000022136 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource class Version(resource.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' # capabilities allow_list = True # Properties links = resource.Body('links') status = resource.Body('status') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/openstack/workflow/workflow_service.py0000664000175000017500000000142300000000000024034 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import service_description from openstack.workflow.v2 import _proxy class WorkflowService(service_description.ServiceDescription): """The workflow service.""" supported_versions = { '2': _proxy.Proxy, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.5094407 openstacksdk-4.0.0/openstacksdk.egg-info/0000775000175000017500000000000000000000000020412 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296384.0 openstacksdk-4.0.0/openstacksdk.egg-info/PKG-INFO0000664000175000017500000003473700000000000021525 0ustar00zuulzuul00000000000000Metadata-Version: 1.2 Name: openstacksdk Version: 4.0.0 Summary: An SDK for building applications to work with OpenStack Home-page: https://docs.openstack.org/openstacksdk/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ============ openstacksdk ============ openstacksdk is a client library for building applications to work with OpenStack clouds. The project aims to provide a consistent and complete set of interactions with OpenStack's many services, along with complete documentation, examples, and tools. It also contains an abstraction interface layer. Clouds can do many things, but there are probably only about 10 of them that most people care about with any regularity. If you want to do complicated things, the per-service oriented portions of the SDK are for you. However, if what you want is to be able to write an application that talks to any OpenStack cloud regardless of configuration, then the Cloud Abstraction layer is for you. More information about the history of openstacksdk can be found at https://docs.openstack.org/openstacksdk/latest/contributor/history.html Getting started --------------- .. rubric:: Authentication and connection management openstacksdk aims to talk to any OpenStack cloud. To do this, it requires a configuration file. openstacksdk favours ``clouds.yaml`` files, but can also use environment variables. The ``clouds.yaml`` file should be provided by your cloud provider or deployment tooling. An example: .. code-block:: yaml clouds: mordred: region_name: Dallas auth: username: 'mordred' password: XXXXXXX project_name: 'demo' auth_url: 'https://identity.example.com' openstacksdk will look for ``clouds.yaml`` files in the following locations: * If set, the path indicated by the ``OS_CLIENT_CONFIG_FILE`` environment variable * ``.`` (the current directory) * ``$HOME/.config/openstack`` * ``/etc/openstack`` You can create a connection using the ``openstack.connect`` function. The cloud name can be either passed directly to this function or specified using the ``OS_CLOUD`` environment variable. If you don't have a ``clouds.yaml`` file and instead use environment variables for configuration then you can use the special ``envvars`` cloud name to load configuration from the environment. For example: .. code-block:: python import openstack # Initialize connection from a clouds.yaml by passing a cloud name conn_from_cloud_name = openstack.connect(cloud='mordred') # Initialize connection from a clouds.yaml using the OS_CLOUD envvar conn_from_os_cloud = openstack.connect() # Initialize connection from environment variables conn_from_env_vars = openstack.connect(cloud='envvars') .. note:: How this is all achieved is described in more detail `below `__. .. rubric:: The cloud layer openstacksdk consists of four layers which all build on top of each other. The highest level layer is the *cloud* layer. Cloud layer methods are available via the top level ``Connection`` object returned by ``openstack.connect``. For example: .. code-block:: python import openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # List the servers for server in conn.list_servers(): print(server.to_dict()) The cloud layer is based on logical operations that can potentially touch multiple services. The benefit of this layer is mostly seen in more complicated operations that take multiple steps and where the steps vary across providers. For example: .. code-block:: python import openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # Upload an image to the cloud image = conn.create_image( 'ubuntu-trusty', filename='ubuntu-trusty.qcow2', wait=True) # Find a flavor with at least 512M of RAM flavor = conn.get_flavor_by_ram(512) # Boot a server, wait for it to boot, and then do whatever is needed # to get a public IP address for it. conn.create_server( 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True) .. rubric:: The proxy layer The next layer is the *proxy* layer. Most users will make use of this layer. The proxy layer is service-specific, so methods will be available under service-specific connection attributes of the ``Connection`` object such as ``compute``, ``block_storage``, ``image`` etc. For example: .. code-block:: python import openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # List the servers for server in conn.compute.servers(): print(server.to_dict()) .. note:: A list of supported services is given `below `__. .. rubric:: The resource layer Below this there is the *resource* layer. This provides support for the basic CRUD operations supported by REST APIs and is the base building block for the other layers. You typically will not need to use this directly but it can be helpful for operations where you already have a ``Resource`` object to hand. For example: .. code-block:: python import openstack import openstack.config.loader import openstack.compute.v2.server # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # List the servers for server in openstack.compute.v2.server.Server.list(session=conn.compute): print(server.to_dict()) .. rubric:: The raw HTTP layer Finally, there is the *raw HTTP* layer. This exposes raw HTTP semantics and is effectively a wrapper around the `requests`__ API with added smarts to handle stuff like authentication and version management. As such, you can use the ``requests`` API methods you know and love, like ``get``, ``post`` and ``put``, and expect to receive a ``requests.Response`` object in response (unlike the other layers, which mostly all return objects that subclass ``openstack.resource.Resource``). Like the *resource* layer, you will typically not need to use this directly but it can be helpful to interact with APIs that have not or will not be supported by openstacksdk. For example: .. code-block:: python import openstack # Initialize and turn on debug logging openstack.enable_logging(debug=True) # Initialize connection conn = openstack.connect(cloud='mordred') # List servers for server in openstack.compute.get('/servers').json(): print(server) .. __: https://requests.readthedocs.io/en/latest/ .. _openstack.config: Configuration ------------- openstacksdk uses the ``openstack.config`` module to parse configuration. ``openstack.config`` will find cloud configuration for as few as one cloud and as many as you want to put in a config file. It will read environment variables and config files, and it also contains some vendor specific default values so that you don't have to know extra info to use OpenStack * If you have a config file, you will get the clouds listed in it * If you have environment variables, you will get a cloud named `envvars` * If you have neither, you will get a cloud named `defaults` with base defaults You can view the configuration identified by openstacksdk in your current environment by running ``openstack.config.loader``. For example: .. code-block:: bash $ python -m openstack.config.loader More information at https://docs.openstack.org/openstacksdk/latest/user/config/configuration.html .. _supported-services: Supported services ------------------ The following services are currently supported. A full list of all available OpenStack service can be found in the `Project Navigator`__. .. note:: Support here does not guarantee full-support for all APIs. It simply means some aspect of the project is supported. .. list-table:: Supported services :widths: 15 25 10 40 :header-rows: 1 * - Service - Description - Cloud Layer - Proxy & Resource Layer * - **Compute** - - - * - Nova - Compute - ✔ - ✔ (``openstack.compute``) * - **Hardware Lifecycle** - - - * - Ironic - Bare metal provisioning - ✔ - ✔ (``openstack.baremetal``, ``openstack.baremetal_introspection``) * - Cyborg - Lifecycle management of accelerators - ✔ - ✔ (``openstack.accelerator``) * - **Storage** - - - * - Cinder - Block storage - ✔ - ✔ (``openstack.block_storage``) * - Swift - Object store - ✔ - ✔ (``openstack.object_store``) * - Cinder - Shared filesystems - ✔ - ✔ (``openstack.shared_file_system``) * - **Networking** - - - * - Neutron - Networking - ✔ - ✔ (``openstack.network``) * - Octavia - Load balancing - ✔ - ✔ (``openstack.load_balancer``) * - Designate - DNS - ✔ - ✔ (``openstack.dns``) * - **Shared services** - - - * - Keystone - Identity - ✔ - ✔ (``openstack.identity``) * - Placement - Placement - ✔ - ✔ (``openstack.placement``) * - Glance - Image storage - ✔ - ✔ (``openstack.image``) * - Barbican - Key management - ✔ - ✔ (``openstack.key_manager``) * - **Workload provisioning** - - - * - Magnum - Container orchestration engine provisioning - ✔ - ✔ (``openstack.container_infrastructure_management``) * - **Orchestration** - - - * - Heat - Orchestration - ✔ - ✔ (``openstack.orchestration``) * - Senlin - Clustering - ✔ - ✔ (``openstack.clustering``) * - Mistral - Workflow - ✔ - ✔ (``openstack.workflow``) * - Zaqar - Messaging - ✔ - ✔ (``openstack.message``) * - **Application lifecycle** - - - * - Masakari - Instances high availability service - ✔ - ✔ (``openstack.instance_ha``) .. __: https://www.openstack.org/software/project-navigator/openstack-components#openstack-services Links ----- * `Issue Tracker `_ * `Code Review `_ * `Documentation `_ * `PyPI `_ * `Mailing list `_ * `Release Notes `_ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Requires-Python: >=3.8 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296385.0 openstacksdk-4.0.0/openstacksdk.egg-info/SOURCES.txt0000664000175000017500000031467000000000000022311 0ustar00zuulzuul00000000000000.coveragerc .git-blame-ignore-revs .mailmap .pre-commit-config.yaml .stestr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MANIFEST.in README.rst SHADE-MERGE-TODO.rst babel.cfg bindep.txt docs-requirements.txt include-acceptance-regular-user.txt post_test_hook.sh requirements.txt setup.cfg setup.py test-requirements.txt tox.ini devstack/plugin.sh doc/requirements.txt doc/source/conf.py doc/source/glossary.rst doc/source/index.rst doc/source/releasenotes.rst doc/source/contributor/clouds.yaml doc/source/contributor/coding.rst doc/source/contributor/contributing.rst doc/source/contributor/history.rst doc/source/contributor/index.rst doc/source/contributor/layout.rst doc/source/contributor/layout.txt doc/source/contributor/setup.rst doc/source/contributor/testing.rst doc/source/contributor/create/resource.rst doc/source/contributor/create/examples/resource/fake.py doc/source/contributor/create/examples/resource/fake_service.py doc/source/install/index.rst doc/source/user/connection.rst doc/source/user/examples doc/source/user/exceptions.rst doc/source/user/index.rst doc/source/user/microversions.rst doc/source/user/model.rst doc/source/user/multi-cloud-demo.rst doc/source/user/resource.rst doc/source/user/service_description.rst doc/source/user/transition_from_profile.rst doc/source/user/utils.rst doc/source/user/warnings.rst doc/source/user/config/configuration.rst doc/source/user/config/index.rst doc/source/user/config/network-config.rst doc/source/user/config/reference.rst doc/source/user/config/using.rst doc/source/user/config/vendor-support.rst doc/source/user/guides/baremetal.rst doc/source/user/guides/block_storage.rst doc/source/user/guides/clustering.rst doc/source/user/guides/compute.rst doc/source/user/guides/connect.rst doc/source/user/guides/connect_from_config.rst doc/source/user/guides/database.rst doc/source/user/guides/dns.rst doc/source/user/guides/identity.rst doc/source/user/guides/image.rst doc/source/user/guides/intro.rst doc/source/user/guides/key_manager.rst doc/source/user/guides/logging.rst doc/source/user/guides/message.rst doc/source/user/guides/network.rst doc/source/user/guides/object_store.rst doc/source/user/guides/orchestration.rst doc/source/user/guides/shared_file_system.rst doc/source/user/guides/stats.rst doc/source/user/guides/clustering/action.rst doc/source/user/guides/clustering/cluster.rst doc/source/user/guides/clustering/event.rst doc/source/user/guides/clustering/node.rst doc/source/user/guides/clustering/policy.rst doc/source/user/guides/clustering/policy_type.rst doc/source/user/guides/clustering/profile.rst doc/source/user/guides/clustering/profile_type.rst doc/source/user/guides/clustering/receiver.rst doc/source/user/proxies/accelerator.rst doc/source/user/proxies/baremetal.rst doc/source/user/proxies/baremetal_introspection.rst doc/source/user/proxies/block_storage_v2.rst doc/source/user/proxies/block_storage_v3.rst doc/source/user/proxies/clustering.rst doc/source/user/proxies/compute.rst doc/source/user/proxies/container_infrastructure_management.rst doc/source/user/proxies/database.rst doc/source/user/proxies/dns.rst doc/source/user/proxies/identity_v2.rst doc/source/user/proxies/identity_v3.rst doc/source/user/proxies/image_v1.rst doc/source/user/proxies/image_v2.rst doc/source/user/proxies/key_manager.rst doc/source/user/proxies/load_balancer_v2.rst doc/source/user/proxies/message_v2.rst doc/source/user/proxies/network.rst doc/source/user/proxies/object_store.rst doc/source/user/proxies/orchestration.rst doc/source/user/proxies/placement.rst doc/source/user/proxies/shared_file_system.rst doc/source/user/proxies/workflow.rst doc/source/user/resources/accelerator/index.rst doc/source/user/resources/accelerator/v2/accelerator_request.rst doc/source/user/resources/accelerator/v2/deployable.rst doc/source/user/resources/accelerator/v2/device.rst doc/source/user/resources/accelerator/v2/device_profile.rst doc/source/user/resources/baremetal/index.rst doc/source/user/resources/baremetal/v1/allocation.rst doc/source/user/resources/baremetal/v1/chassis.rst doc/source/user/resources/baremetal/v1/conductor.rst doc/source/user/resources/baremetal/v1/deploy_templates.rst doc/source/user/resources/baremetal/v1/driver.rst doc/source/user/resources/baremetal/v1/node.rst doc/source/user/resources/baremetal/v1/port.rst doc/source/user/resources/baremetal/v1/port_group.rst doc/source/user/resources/baremetal/v1/volume_connector.rst doc/source/user/resources/baremetal/v1/volume_target.rst doc/source/user/resources/baremetal_introspection/index.rst doc/source/user/resources/baremetal_introspection/v1/introspection.rst doc/source/user/resources/baremetal_introspection/v1/introspection_rule.rst doc/source/user/resources/block_storage/index.rst doc/source/user/resources/block_storage/v2/backup.rst doc/source/user/resources/block_storage/v2/capabilities.rst doc/source/user/resources/block_storage/v2/limits.rst doc/source/user/resources/block_storage/v2/quota_set.rst doc/source/user/resources/block_storage/v2/snapshot.rst doc/source/user/resources/block_storage/v2/stats.rst doc/source/user/resources/block_storage/v2/type.rst doc/source/user/resources/block_storage/v2/volume.rst doc/source/user/resources/block_storage/v3/attachment.rst doc/source/user/resources/block_storage/v3/availability_zone.rst doc/source/user/resources/block_storage/v3/backup.rst doc/source/user/resources/block_storage/v3/block_storage_summary.rst doc/source/user/resources/block_storage/v3/capabilities.rst doc/source/user/resources/block_storage/v3/extension.rst doc/source/user/resources/block_storage/v3/group.rst doc/source/user/resources/block_storage/v3/group_snapshot.rst doc/source/user/resources/block_storage/v3/group_type.rst doc/source/user/resources/block_storage/v3/limits.rst doc/source/user/resources/block_storage/v3/quota_set.rst doc/source/user/resources/block_storage/v3/resource_filter.rst doc/source/user/resources/block_storage/v3/service.rst doc/source/user/resources/block_storage/v3/snapshot.rst doc/source/user/resources/block_storage/v3/stats.rst doc/source/user/resources/block_storage/v3/transfer.rst doc/source/user/resources/block_storage/v3/type.rst doc/source/user/resources/block_storage/v3/volume.rst doc/source/user/resources/clustering/index.rst doc/source/user/resources/clustering/v1/action.rst doc/source/user/resources/clustering/v1/build_info.rst doc/source/user/resources/clustering/v1/cluster.rst doc/source/user/resources/clustering/v1/cluster_policy.rst doc/source/user/resources/clustering/v1/event.rst doc/source/user/resources/clustering/v1/node.rst doc/source/user/resources/clustering/v1/policy.rst doc/source/user/resources/clustering/v1/policy_type.rst doc/source/user/resources/clustering/v1/profile.rst doc/source/user/resources/clustering/v1/profile_type.rst doc/source/user/resources/clustering/v1/receiver.rst doc/source/user/resources/compute/index.rst doc/source/user/resources/compute/version.rst doc/source/user/resources/compute/v2/aggregate.rst doc/source/user/resources/compute/v2/availability_zone.rst doc/source/user/resources/compute/v2/extension.rst doc/source/user/resources/compute/v2/flavor.rst doc/source/user/resources/compute/v2/hypervisor.rst doc/source/user/resources/compute/v2/image.rst doc/source/user/resources/compute/v2/keypair.rst doc/source/user/resources/compute/v2/limits.rst doc/source/user/resources/compute/v2/migration.rst doc/source/user/resources/compute/v2/quota_set.rst doc/source/user/resources/compute/v2/server.rst doc/source/user/resources/compute/v2/server_action.rst doc/source/user/resources/compute/v2/server_diagnostics.rst doc/source/user/resources/compute/v2/server_group.rst doc/source/user/resources/compute/v2/server_interface.rst doc/source/user/resources/compute/v2/server_ip.rst doc/source/user/resources/compute/v2/server_migration.rst doc/source/user/resources/compute/v2/server_remote_console.rst doc/source/user/resources/compute/v2/service.rst doc/source/user/resources/compute/v2/usage.rst doc/source/user/resources/compute/v2/volume_attachment.rst doc/source/user/resources/container_infrastructure_management/cluster.rst doc/source/user/resources/container_infrastructure_management/cluster_certificate.rst doc/source/user/resources/container_infrastructure_management/cluster_template.rst doc/source/user/resources/container_infrastructure_management/index.rst doc/source/user/resources/container_infrastructure_management/service.rst doc/source/user/resources/database/index.rst doc/source/user/resources/database/v1/database.rst doc/source/user/resources/database/v1/flavor.rst doc/source/user/resources/database/v1/instance.rst doc/source/user/resources/database/v1/user.rst doc/source/user/resources/dns/index.rst doc/source/user/resources/dns/v2/floating_ip.rst doc/source/user/resources/dns/v2/recordset.rst doc/source/user/resources/dns/v2/zone.rst doc/source/user/resources/dns/v2/zone_export.rst doc/source/user/resources/dns/v2/zone_import.rst doc/source/user/resources/dns/v2/zone_share.rst doc/source/user/resources/dns/v2/zone_transfer.rst doc/source/user/resources/identity/index.rst doc/source/user/resources/identity/version.rst doc/source/user/resources/identity/v2/extension.rst doc/source/user/resources/identity/v2/role.rst doc/source/user/resources/identity/v2/tenant.rst doc/source/user/resources/identity/v2/user.rst doc/source/user/resources/identity/v3/application_credential.rst doc/source/user/resources/identity/v3/credential.rst doc/source/user/resources/identity/v3/domain.rst doc/source/user/resources/identity/v3/domain_config.rst doc/source/user/resources/identity/v3/endpoint.rst doc/source/user/resources/identity/v3/federation_protocol.rst doc/source/user/resources/identity/v3/group.rst doc/source/user/resources/identity/v3/identity_provider.rst doc/source/user/resources/identity/v3/limit.rst doc/source/user/resources/identity/v3/mapping.rst doc/source/user/resources/identity/v3/policy.rst doc/source/user/resources/identity/v3/project.rst doc/source/user/resources/identity/v3/region.rst doc/source/user/resources/identity/v3/registered_limit.rst doc/source/user/resources/identity/v3/role.rst doc/source/user/resources/identity/v3/role_assignment.rst doc/source/user/resources/identity/v3/role_domain_group_assignment.rst doc/source/user/resources/identity/v3/role_domain_user_assignment.rst doc/source/user/resources/identity/v3/role_project_group_assignment.rst doc/source/user/resources/identity/v3/role_project_user_assignment.rst doc/source/user/resources/identity/v3/role_system_group_assignment.rst doc/source/user/resources/identity/v3/role_system_user_assignment.rst doc/source/user/resources/identity/v3/service.rst doc/source/user/resources/identity/v3/system.rst doc/source/user/resources/identity/v3/trust.rst doc/source/user/resources/identity/v3/user.rst doc/source/user/resources/image/index.rst doc/source/user/resources/image/v1/image.rst doc/source/user/resources/image/v2/image.rst doc/source/user/resources/image/v2/member.rst doc/source/user/resources/image/v2/metadef_namespace.rst doc/source/user/resources/image/v2/metadef_object.rst doc/source/user/resources/image/v2/metadef_property.rst doc/source/user/resources/image/v2/metadef_resource_type.rst doc/source/user/resources/image/v2/metadef_schema.rst doc/source/user/resources/image/v2/service_info.rst doc/source/user/resources/image/v2/task.rst doc/source/user/resources/key_manager/index.rst doc/source/user/resources/key_manager/v1/container.rst doc/source/user/resources/key_manager/v1/order.rst doc/source/user/resources/key_manager/v1/secret.rst doc/source/user/resources/load_balancer/index.rst doc/source/user/resources/load_balancer/v2/amphora.rst doc/source/user/resources/load_balancer/v2/availability_zone.rst doc/source/user/resources/load_balancer/v2/availability_zone_profile.rst doc/source/user/resources/load_balancer/v2/flavor.rst doc/source/user/resources/load_balancer/v2/flavor_profile.rst doc/source/user/resources/load_balancer/v2/health_monitor.rst doc/source/user/resources/load_balancer/v2/l7_policy.rst doc/source/user/resources/load_balancer/v2/l7_rule.rst doc/source/user/resources/load_balancer/v2/listener.rst doc/source/user/resources/load_balancer/v2/load_balancer.rst doc/source/user/resources/load_balancer/v2/member.rst doc/source/user/resources/load_balancer/v2/pool.rst doc/source/user/resources/load_balancer/v2/provider.rst doc/source/user/resources/load_balancer/v2/quota.rst doc/source/user/resources/network/index.rst doc/source/user/resources/network/v2/address_group.rst doc/source/user/resources/network/v2/address_scope.rst doc/source/user/resources/network/v2/agent.rst doc/source/user/resources/network/v2/auto_allocated_topology.rst doc/source/user/resources/network/v2/availability_zone.rst doc/source/user/resources/network/v2/bgp_peer.rst doc/source/user/resources/network/v2/bgp_speaker.rst doc/source/user/resources/network/v2/bgpvpn.rst doc/source/user/resources/network/v2/bgpvpn_network_association.rst doc/source/user/resources/network/v2/bgpvpn_port_association.rst doc/source/user/resources/network/v2/bgpvpn_router_association.rst doc/source/user/resources/network/v2/extension.rst doc/source/user/resources/network/v2/flavor.rst doc/source/user/resources/network/v2/floating_ip.rst doc/source/user/resources/network/v2/health_monitor.rst doc/source/user/resources/network/v2/listener.rst doc/source/user/resources/network/v2/load_balancer.rst doc/source/user/resources/network/v2/local_ip.rst doc/source/user/resources/network/v2/local_ip_association.rst doc/source/user/resources/network/v2/metering_label.rst doc/source/user/resources/network/v2/metering_label_rule.rst doc/source/user/resources/network/v2/ndp_proxy.rst doc/source/user/resources/network/v2/network.rst doc/source/user/resources/network/v2/network_ip_availability.rst doc/source/user/resources/network/v2/network_segment_range.rst doc/source/user/resources/network/v2/pool.rst doc/source/user/resources/network/v2/pool_member.rst doc/source/user/resources/network/v2/port.rst doc/source/user/resources/network/v2/qos_bandwidth_limit_rule.rst doc/source/user/resources/network/v2/qos_dscp_marking_rule.rst doc/source/user/resources/network/v2/qos_minimum_bandwidth_rule.rst doc/source/user/resources/network/v2/qos_minimum_packet_rate_rule.rst doc/source/user/resources/network/v2/qos_policy.rst doc/source/user/resources/network/v2/qos_rule_type.rst doc/source/user/resources/network/v2/quota.rst doc/source/user/resources/network/v2/rbac_policy.rst doc/source/user/resources/network/v2/router.rst doc/source/user/resources/network/v2/security_group.rst doc/source/user/resources/network/v2/security_group_rule.rst doc/source/user/resources/network/v2/segment.rst doc/source/user/resources/network/v2/service_profile.rst doc/source/user/resources/network/v2/service_provider.rst doc/source/user/resources/network/v2/sfc_flow_classifier.rst doc/source/user/resources/network/v2/sfc_port_chain.rst doc/source/user/resources/network/v2/sfc_port_pair.rst doc/source/user/resources/network/v2/sfc_port_pair_group.rst doc/source/user/resources/network/v2/sfc_service_graph.rst doc/source/user/resources/network/v2/subnet.rst doc/source/user/resources/network/v2/subnet_pool.rst doc/source/user/resources/network/v2/tap_flow.rst doc/source/user/resources/network/v2/tap_mirror.rst doc/source/user/resources/network/v2/tap_service.rst doc/source/user/resources/network/v2/vpn/endpoint_group.rst doc/source/user/resources/network/v2/vpn/ike_policy.rst doc/source/user/resources/network/v2/vpn/index.rst doc/source/user/resources/network/v2/vpn/ipsec_policy.rst doc/source/user/resources/network/v2/vpn/ipsec_site_connection.rst doc/source/user/resources/network/v2/vpn/service.rst doc/source/user/resources/object_store/index.rst doc/source/user/resources/object_store/v1/account.rst doc/source/user/resources/object_store/v1/container.rst doc/source/user/resources/object_store/v1/obj.rst doc/source/user/resources/orchestration/index.rst doc/source/user/resources/orchestration/v1/resource.rst doc/source/user/resources/orchestration/v1/software_config.rst doc/source/user/resources/orchestration/v1/software_deployment.rst doc/source/user/resources/orchestration/v1/stack.rst doc/source/user/resources/orchestration/v1/stack_environment.rst doc/source/user/resources/orchestration/v1/stack_event.rst doc/source/user/resources/orchestration/v1/stack_files.rst doc/source/user/resources/orchestration/v1/stack_template.rst doc/source/user/resources/orchestration/v1/template.rst doc/source/user/resources/placement/index.rst doc/source/user/resources/placement/v1/resource_class.rst doc/source/user/resources/placement/v1/resource_provider.rst doc/source/user/resources/placement/v1/resource_provider_inventory.rst doc/source/user/resources/placement/v1/trait.rst doc/source/user/resources/shared_file_system/index.rst doc/source/user/resources/shared_file_system/v2/availability_zone.rst doc/source/user/resources/shared_file_system/v2/limit.rst doc/source/user/resources/shared_file_system/v2/quota_class_set.rst doc/source/user/resources/shared_file_system/v2/resource_locks.rst doc/source/user/resources/shared_file_system/v2/share.rst doc/source/user/resources/shared_file_system/v2/share_access_rule.rst doc/source/user/resources/shared_file_system/v2/share_group.rst doc/source/user/resources/shared_file_system/v2/share_group_snapshot.rst doc/source/user/resources/shared_file_system/v2/share_instance.rst doc/source/user/resources/shared_file_system/v2/share_network.rst doc/source/user/resources/shared_file_system/v2/share_network_subnet.rst doc/source/user/resources/shared_file_system/v2/share_snapshot.rst doc/source/user/resources/shared_file_system/v2/share_snapshot_instance.rst doc/source/user/resources/shared_file_system/v2/storage_pool.rst doc/source/user/resources/shared_file_system/v2/user_message.rst doc/source/user/resources/workflow/index.rst doc/source/user/resources/workflow/v2/crontrigger.rst doc/source/user/resources/workflow/v2/execution.rst doc/source/user/resources/workflow/v2/workflow.rst doc/source/user/testing/fakes.rst doc/source/user/testing/index.rst examples/__init__.py examples/connect.py examples/baremetal/list.py examples/baremetal/provisioning.py examples/cloud/cleanup-servers.py examples/cloud/create-server-dict.py examples/cloud/create-server-name-or-id.py examples/cloud/debug-logging.py examples/cloud/find-an-image.py examples/cloud/http-debug-logging.py examples/cloud/munch-dict-object.py examples/cloud/normalization.py examples/cloud/server-information.py examples/cloud/service-conditional-overrides.py examples/cloud/service-conditionals.py examples/cloud/strict-mode.py examples/cloud/upload-large-object.py examples/cloud/upload-object.py examples/cloud/user-agent.py examples/clustering/__init__.py examples/clustering/action.py examples/clustering/cluster.py examples/clustering/event.py examples/clustering/node.py examples/clustering/policy.py examples/clustering/policy_type.py examples/clustering/profile.py examples/clustering/profile_type.py examples/clustering/receiver.py examples/compute/__init__.py examples/compute/create.py examples/compute/delete.py examples/compute/find.py examples/compute/list.py examples/dns/__init__.py examples/dns/list.py examples/identity/__init__.py examples/identity/list.py examples/image/__init__.py examples/image/create.py examples/image/delete.py examples/image/download.py examples/image/import.py examples/image/list.py examples/key_manager/__init__.py examples/key_manager/create.py examples/key_manager/get.py examples/key_manager/list.py examples/network/__init__.py examples/network/create.py examples/network/delete.py examples/network/find.py examples/network/list.py examples/network/security_group_rules.py examples/shared_file_system/__init__.py examples/shared_file_system/availability_zones.py examples/shared_file_system/share_group_snapshots.py examples/shared_file_system/share_instances.py examples/shared_file_system/share_metadata.py examples/shared_file_system/shares.py extras/delete-network.sh extras/run-ansible-tests.sh openstack/__init__.py openstack/__main__.py openstack/_log.py openstack/_services_mixin.py openstack/connection.py openstack/exceptions.py openstack/format.py openstack/proxy.py openstack/py.typed openstack/resource.py openstack/service_description.py openstack/utils.py openstack/version.py openstack/warnings.py openstack/_hacking/checks.py openstack/accelerator/__init__.py openstack/accelerator/accelerator_service.py openstack/accelerator/version.py openstack/accelerator/v2/__init__.py openstack/accelerator/v2/_proxy.py openstack/accelerator/v2/accelerator_request.py openstack/accelerator/v2/deployable.py openstack/accelerator/v2/device.py openstack/accelerator/v2/device_profile.py openstack/baremetal/__init__.py openstack/baremetal/baremetal_service.py openstack/baremetal/configdrive.py openstack/baremetal/version.py openstack/baremetal/v1/__init__.py openstack/baremetal/v1/_common.py openstack/baremetal/v1/_proxy.py openstack/baremetal/v1/allocation.py openstack/baremetal/v1/chassis.py openstack/baremetal/v1/conductor.py openstack/baremetal/v1/deploy_templates.py openstack/baremetal/v1/driver.py openstack/baremetal/v1/node.py openstack/baremetal/v1/port.py openstack/baremetal/v1/port_group.py openstack/baremetal/v1/volume_connector.py openstack/baremetal/v1/volume_target.py openstack/baremetal_introspection/__init__.py openstack/baremetal_introspection/baremetal_introspection_service.py openstack/baremetal_introspection/v1/__init__.py openstack/baremetal_introspection/v1/_proxy.py openstack/baremetal_introspection/v1/introspection.py openstack/baremetal_introspection/v1/introspection_rule.py openstack/block_storage/__init__.py openstack/block_storage/_base_proxy.py openstack/block_storage/block_storage_service.py openstack/block_storage/v2/__init__.py openstack/block_storage/v2/_proxy.py openstack/block_storage/v2/backup.py openstack/block_storage/v2/capabilities.py openstack/block_storage/v2/extension.py openstack/block_storage/v2/limits.py openstack/block_storage/v2/quota_class_set.py openstack/block_storage/v2/quota_set.py openstack/block_storage/v2/snapshot.py openstack/block_storage/v2/stats.py openstack/block_storage/v2/type.py openstack/block_storage/v2/volume.py openstack/block_storage/v3/__init__.py openstack/block_storage/v3/_proxy.py openstack/block_storage/v3/attachment.py openstack/block_storage/v3/availability_zone.py openstack/block_storage/v3/backup.py openstack/block_storage/v3/block_storage_summary.py openstack/block_storage/v3/capabilities.py openstack/block_storage/v3/extension.py openstack/block_storage/v3/group.py openstack/block_storage/v3/group_snapshot.py openstack/block_storage/v3/group_type.py openstack/block_storage/v3/limits.py openstack/block_storage/v3/quota_class_set.py openstack/block_storage/v3/quota_set.py openstack/block_storage/v3/resource_filter.py openstack/block_storage/v3/service.py openstack/block_storage/v3/snapshot.py openstack/block_storage/v3/stats.py openstack/block_storage/v3/transfer.py openstack/block_storage/v3/type.py openstack/block_storage/v3/volume.py openstack/cloud/__init__.py openstack/cloud/_accelerator.py openstack/cloud/_baremetal.py openstack/cloud/_block_storage.py openstack/cloud/_coe.py openstack/cloud/_compute.py openstack/cloud/_dns.py openstack/cloud/_identity.py openstack/cloud/_image.py openstack/cloud/_network.py openstack/cloud/_network_common.py openstack/cloud/_object_store.py openstack/cloud/_orchestration.py openstack/cloud/_shared_file_system.py openstack/cloud/_utils.py openstack/cloud/exc.py openstack/cloud/inventory.py openstack/cloud/meta.py openstack/cloud/openstackcloud.py openstack/cloud/cmd/__init__.py openstack/cloud/cmd/inventory.py openstack/cloud/tests/__init__.py openstack/clustering/__init__.py openstack/clustering/clustering_service.py openstack/clustering/version.py openstack/clustering/v1/__init__.py openstack/clustering/v1/_async_resource.py openstack/clustering/v1/_proxy.py openstack/clustering/v1/action.py openstack/clustering/v1/build_info.py openstack/clustering/v1/cluster.py openstack/clustering/v1/cluster_attr.py openstack/clustering/v1/cluster_policy.py openstack/clustering/v1/event.py openstack/clustering/v1/node.py openstack/clustering/v1/policy.py openstack/clustering/v1/policy_type.py openstack/clustering/v1/profile.py openstack/clustering/v1/profile_type.py openstack/clustering/v1/receiver.py openstack/clustering/v1/service.py openstack/common/__init__.py openstack/common/metadata.py openstack/common/quota_set.py openstack/common/tag.py openstack/compute/__init__.py openstack/compute/compute_service.py openstack/compute/version.py openstack/compute/v2/__init__.py openstack/compute/v2/_proxy.py openstack/compute/v2/aggregate.py openstack/compute/v2/availability_zone.py openstack/compute/v2/extension.py openstack/compute/v2/flavor.py openstack/compute/v2/hypervisor.py openstack/compute/v2/image.py openstack/compute/v2/keypair.py openstack/compute/v2/limits.py openstack/compute/v2/migration.py openstack/compute/v2/quota_class_set.py openstack/compute/v2/quota_set.py openstack/compute/v2/server.py openstack/compute/v2/server_action.py openstack/compute/v2/server_diagnostics.py openstack/compute/v2/server_group.py openstack/compute/v2/server_interface.py openstack/compute/v2/server_ip.py openstack/compute/v2/server_migration.py openstack/compute/v2/server_remote_console.py openstack/compute/v2/service.py openstack/compute/v2/usage.py openstack/compute/v2/volume_attachment.py openstack/config/__init__.py openstack/config/_util.py openstack/config/cloud_config.py openstack/config/cloud_region.py openstack/config/defaults.json openstack/config/defaults.py openstack/config/exceptions.py openstack/config/loader.py openstack/config/schema.json openstack/config/vendor-schema.json openstack/config/vendors/__init__.py openstack/config/vendors/auro.json openstack/config/vendors/betacloud.json openstack/config/vendors/binero.json openstack/config/vendors/bluebox.json openstack/config/vendors/catalyst.json openstack/config/vendors/citycloud.json openstack/config/vendors/conoha.json openstack/config/vendors/dreamcompute.json openstack/config/vendors/elastx.json openstack/config/vendors/entercloudsuite.json openstack/config/vendors/fuga.json openstack/config/vendors/ibmcloud.json openstack/config/vendors/internap.json openstack/config/vendors/limestonenetworks.yaml openstack/config/vendors/otc-swiss.json openstack/config/vendors/otc.json openstack/config/vendors/ovh-us.json openstack/config/vendors/ovh.json openstack/config/vendors/rackspace.json openstack/config/vendors/switchengines.json openstack/config/vendors/ultimum.json openstack/config/vendors/unitedstack.json openstack/config/vendors/vexxhost.json openstack/config/vendors/zetta.json openstack/container_infrastructure_management/__init__.py openstack/container_infrastructure_management/container_infrastructure_management_service.py openstack/container_infrastructure_management/v1/__init__.py openstack/container_infrastructure_management/v1/_proxy.py openstack/container_infrastructure_management/v1/cluster.py openstack/container_infrastructure_management/v1/cluster_certificate.py openstack/container_infrastructure_management/v1/cluster_template.py openstack/container_infrastructure_management/v1/service.py openstack/database/__init__.py openstack/database/database_service.py openstack/database/v1/__init__.py openstack/database/v1/_proxy.py openstack/database/v1/database.py openstack/database/v1/flavor.py openstack/database/v1/instance.py openstack/database/v1/user.py openstack/dns/__init__.py openstack/dns/dns_service.py openstack/dns/version.py openstack/dns/v2/__init__.py openstack/dns/v2/_base.py openstack/dns/v2/_proxy.py openstack/dns/v2/floating_ip.py openstack/dns/v2/recordset.py openstack/dns/v2/zone.py openstack/dns/v2/zone_export.py openstack/dns/v2/zone_import.py openstack/dns/v2/zone_share.py openstack/dns/v2/zone_transfer.py openstack/fixture/__init__.py openstack/fixture/connection.py openstack/identity/__init__.py openstack/identity/identity_service.py openstack/identity/version.py openstack/identity/v2/__init__.py openstack/identity/v2/_proxy.py openstack/identity/v2/extension.py openstack/identity/v2/role.py openstack/identity/v2/tenant.py openstack/identity/v2/user.py openstack/identity/v3/__init__.py openstack/identity/v3/_proxy.py openstack/identity/v3/access_rule.py openstack/identity/v3/application_credential.py openstack/identity/v3/credential.py openstack/identity/v3/domain.py openstack/identity/v3/domain_config.py openstack/identity/v3/endpoint.py openstack/identity/v3/federation_protocol.py openstack/identity/v3/group.py openstack/identity/v3/identity_provider.py openstack/identity/v3/limit.py openstack/identity/v3/mapping.py openstack/identity/v3/policy.py openstack/identity/v3/project.py openstack/identity/v3/region.py openstack/identity/v3/registered_limit.py openstack/identity/v3/role.py openstack/identity/v3/role_assignment.py openstack/identity/v3/role_domain_group_assignment.py openstack/identity/v3/role_domain_user_assignment.py openstack/identity/v3/role_project_group_assignment.py openstack/identity/v3/role_project_user_assignment.py openstack/identity/v3/role_system_group_assignment.py openstack/identity/v3/role_system_user_assignment.py openstack/identity/v3/service.py openstack/identity/v3/service_provider.py openstack/identity/v3/system.py openstack/identity/v3/trust.py openstack/identity/v3/user.py openstack/image/__init__.py openstack/image/_download.py openstack/image/image_service.py openstack/image/image_signer.py openstack/image/iterable_chunked_file.py openstack/image/v1/__init__.py openstack/image/v1/_proxy.py openstack/image/v1/image.py openstack/image/v2/__init__.py openstack/image/v2/_proxy.py openstack/image/v2/cache.py openstack/image/v2/image.py openstack/image/v2/member.py openstack/image/v2/metadef_namespace.py openstack/image/v2/metadef_object.py openstack/image/v2/metadef_property.py openstack/image/v2/metadef_resource_type.py openstack/image/v2/metadef_schema.py openstack/image/v2/schema.py openstack/image/v2/service_info.py openstack/image/v2/task.py openstack/instance_ha/__init__.py openstack/instance_ha/instance_ha_service.py openstack/instance_ha/v1/__init__.py openstack/instance_ha/v1/_proxy.py openstack/instance_ha/v1/host.py openstack/instance_ha/v1/notification.py openstack/instance_ha/v1/segment.py openstack/instance_ha/v1/vmove.py openstack/key_manager/__init__.py openstack/key_manager/key_manager_service.py openstack/key_manager/v1/__init__.py openstack/key_manager/v1/_format.py openstack/key_manager/v1/_proxy.py openstack/key_manager/v1/container.py openstack/key_manager/v1/order.py openstack/key_manager/v1/secret.py openstack/load_balancer/__init__.py openstack/load_balancer/load_balancer_service.py openstack/load_balancer/version.py openstack/load_balancer/v2/__init__.py openstack/load_balancer/v2/_proxy.py openstack/load_balancer/v2/amphora.py openstack/load_balancer/v2/availability_zone.py openstack/load_balancer/v2/availability_zone_profile.py openstack/load_balancer/v2/flavor.py openstack/load_balancer/v2/flavor_profile.py openstack/load_balancer/v2/health_monitor.py openstack/load_balancer/v2/l7_policy.py openstack/load_balancer/v2/l7_rule.py openstack/load_balancer/v2/listener.py openstack/load_balancer/v2/load_balancer.py openstack/load_balancer/v2/member.py openstack/load_balancer/v2/pool.py openstack/load_balancer/v2/provider.py openstack/load_balancer/v2/quota.py openstack/message/__init__.py openstack/message/message_service.py openstack/message/version.py openstack/message/v2/__init__.py openstack/message/v2/_proxy.py openstack/message/v2/claim.py openstack/message/v2/message.py openstack/message/v2/queue.py openstack/message/v2/subscription.py openstack/network/__init__.py openstack/network/network_service.py openstack/network/version.py openstack/network/v2/__init__.py openstack/network/v2/_base.py openstack/network/v2/_proxy.py openstack/network/v2/address_group.py openstack/network/v2/address_scope.py openstack/network/v2/agent.py openstack/network/v2/auto_allocated_topology.py openstack/network/v2/availability_zone.py openstack/network/v2/bgp_peer.py openstack/network/v2/bgp_speaker.py openstack/network/v2/bgpvpn.py openstack/network/v2/bgpvpn_network_association.py openstack/network/v2/bgpvpn_port_association.py openstack/network/v2/bgpvpn_router_association.py openstack/network/v2/default_security_group_rule.py openstack/network/v2/extension.py openstack/network/v2/firewall_group.py openstack/network/v2/firewall_policy.py openstack/network/v2/firewall_rule.py openstack/network/v2/flavor.py openstack/network/v2/floating_ip.py openstack/network/v2/health_monitor.py openstack/network/v2/l3_conntrack_helper.py openstack/network/v2/listener.py openstack/network/v2/load_balancer.py openstack/network/v2/local_ip.py openstack/network/v2/local_ip_association.py openstack/network/v2/metering_label.py openstack/network/v2/metering_label_rule.py openstack/network/v2/ndp_proxy.py openstack/network/v2/network.py openstack/network/v2/network_ip_availability.py openstack/network/v2/network_segment_range.py openstack/network/v2/pool.py openstack/network/v2/pool_member.py openstack/network/v2/port.py openstack/network/v2/port_forwarding.py openstack/network/v2/qos_bandwidth_limit_rule.py openstack/network/v2/qos_dscp_marking_rule.py openstack/network/v2/qos_minimum_bandwidth_rule.py openstack/network/v2/qos_minimum_packet_rate_rule.py openstack/network/v2/qos_policy.py openstack/network/v2/qos_rule_type.py openstack/network/v2/quota.py openstack/network/v2/rbac_policy.py openstack/network/v2/router.py openstack/network/v2/security_group.py openstack/network/v2/security_group_rule.py openstack/network/v2/segment.py openstack/network/v2/service_profile.py openstack/network/v2/service_provider.py openstack/network/v2/sfc_flow_classifier.py openstack/network/v2/sfc_port_chain.py openstack/network/v2/sfc_port_pair.py openstack/network/v2/sfc_port_pair_group.py openstack/network/v2/sfc_service_graph.py openstack/network/v2/subnet.py openstack/network/v2/subnet_pool.py openstack/network/v2/tap_flow.py openstack/network/v2/tap_mirror.py openstack/network/v2/tap_service.py openstack/network/v2/trunk.py openstack/network/v2/vpn_endpoint_group.py openstack/network/v2/vpn_ike_policy.py openstack/network/v2/vpn_ipsec_policy.py openstack/network/v2/vpn_ipsec_site_connection.py openstack/network/v2/vpn_service.py openstack/object_store/__init__.py openstack/object_store/object_store_service.py openstack/object_store/v1/__init__.py openstack/object_store/v1/_base.py openstack/object_store/v1/_proxy.py openstack/object_store/v1/account.py openstack/object_store/v1/container.py openstack/object_store/v1/info.py openstack/object_store/v1/obj.py openstack/orchestration/__init__.py openstack/orchestration/orchestration_service.py openstack/orchestration/version.py openstack/orchestration/util/__init__.py openstack/orchestration/util/environment_format.py openstack/orchestration/util/event_utils.py openstack/orchestration/util/template_format.py openstack/orchestration/util/template_utils.py openstack/orchestration/util/utils.py openstack/orchestration/v1/__init__.py openstack/orchestration/v1/_proxy.py openstack/orchestration/v1/resource.py openstack/orchestration/v1/software_config.py openstack/orchestration/v1/software_deployment.py openstack/orchestration/v1/stack.py openstack/orchestration/v1/stack_environment.py openstack/orchestration/v1/stack_event.py openstack/orchestration/v1/stack_files.py openstack/orchestration/v1/stack_template.py openstack/orchestration/v1/template.py openstack/placement/__init__.py openstack/placement/placement_service.py openstack/placement/v1/__init__.py openstack/placement/v1/_proxy.py openstack/placement/v1/resource_class.py openstack/placement/v1/resource_provider.py openstack/placement/v1/resource_provider_inventory.py openstack/placement/v1/trait.py openstack/shared_file_system/__init__.py openstack/shared_file_system/shared_file_system_service.py openstack/shared_file_system/v2/__init__.py openstack/shared_file_system/v2/_proxy.py openstack/shared_file_system/v2/availability_zone.py openstack/shared_file_system/v2/limit.py openstack/shared_file_system/v2/quota_class_set.py openstack/shared_file_system/v2/resource_locks.py openstack/shared_file_system/v2/share.py openstack/shared_file_system/v2/share_access_rule.py openstack/shared_file_system/v2/share_export_locations.py openstack/shared_file_system/v2/share_group.py openstack/shared_file_system/v2/share_group_snapshot.py openstack/shared_file_system/v2/share_instance.py openstack/shared_file_system/v2/share_network.py openstack/shared_file_system/v2/share_network_subnet.py openstack/shared_file_system/v2/share_snapshot.py openstack/shared_file_system/v2/share_snapshot_instance.py openstack/shared_file_system/v2/storage_pool.py openstack/shared_file_system/v2/user_message.py openstack/test/__init__.py openstack/test/fakes.py openstack/tests/README.rst openstack/tests/__init__.py openstack/tests/base.py openstack/tests/fakes.py openstack/tests/fixtures.py openstack/tests/ansible/README.txt openstack/tests/ansible/run.yml openstack/tests/ansible/hooks/post_test_hook.sh openstack/tests/ansible/roles/auth/tasks/main.yml openstack/tests/ansible/roles/client_config/tasks/main.yml openstack/tests/ansible/roles/group/defaults/main.yml openstack/tests/ansible/roles/group/tasks/main.yml openstack/tests/ansible/roles/image/defaults/main.yml openstack/tests/ansible/roles/image/tasks/main.yml openstack/tests/ansible/roles/keypair/defaults/main.yml openstack/tests/ansible/roles/keypair/tasks/main.yml openstack/tests/ansible/roles/keystone_domain/defaults/main.yml openstack/tests/ansible/roles/keystone_domain/tasks/main.yml openstack/tests/ansible/roles/keystone_role/defaults/main.yml openstack/tests/ansible/roles/keystone_role/tasks/main.yml openstack/tests/ansible/roles/network/defaults/main.yml openstack/tests/ansible/roles/network/tasks/main.yml openstack/tests/ansible/roles/nova_flavor/tasks/main.yml openstack/tests/ansible/roles/object/tasks/main.yml openstack/tests/ansible/roles/port/defaults/main.yml openstack/tests/ansible/roles/port/tasks/main.yml openstack/tests/ansible/roles/router/defaults/main.yml openstack/tests/ansible/roles/router/tasks/main.yml openstack/tests/ansible/roles/security_group/defaults/main.yml openstack/tests/ansible/roles/security_group/tasks/main.yml openstack/tests/ansible/roles/server/defaults/main.yaml openstack/tests/ansible/roles/server/tasks/main.yml openstack/tests/ansible/roles/subnet/defaults/main.yml openstack/tests/ansible/roles/subnet/tasks/main.yml openstack/tests/ansible/roles/user/tasks/main.yml openstack/tests/ansible/roles/user_group/tasks/main.yml openstack/tests/ansible/roles/volume/tasks/main.yml openstack/tests/functional/README.rst openstack/tests/functional/__init__.py openstack/tests/functional/base.py openstack/tests/functional/baremetal/__init__.py openstack/tests/functional/baremetal/base.py openstack/tests/functional/baremetal/test_baremetal_allocation.py openstack/tests/functional/baremetal/test_baremetal_chassis.py openstack/tests/functional/baremetal/test_baremetal_conductor.py openstack/tests/functional/baremetal/test_baremetal_deploy_templates.py openstack/tests/functional/baremetal/test_baremetal_driver.py openstack/tests/functional/baremetal/test_baremetal_node.py openstack/tests/functional/baremetal/test_baremetal_port.py openstack/tests/functional/baremetal/test_baremetal_port_group.py openstack/tests/functional/baremetal/test_baremetal_volume_connector.py openstack/tests/functional/baremetal/test_baremetal_volume_target.py openstack/tests/functional/block_storage/__init__.py openstack/tests/functional/block_storage/v2/__init__.py openstack/tests/functional/block_storage/v2/base.py openstack/tests/functional/block_storage/v2/test_backup.py openstack/tests/functional/block_storage/v2/test_snapshot.py openstack/tests/functional/block_storage/v2/test_stats.py openstack/tests/functional/block_storage/v2/test_type.py openstack/tests/functional/block_storage/v2/test_volume.py openstack/tests/functional/block_storage/v3/__init__.py openstack/tests/functional/block_storage/v3/base.py openstack/tests/functional/block_storage/v3/test_attachment.py openstack/tests/functional/block_storage/v3/test_availability_zone.py openstack/tests/functional/block_storage/v3/test_backup.py openstack/tests/functional/block_storage/v3/test_block_storage_summary.py openstack/tests/functional/block_storage/v3/test_capabilities.py openstack/tests/functional/block_storage/v3/test_extension.py openstack/tests/functional/block_storage/v3/test_group.py openstack/tests/functional/block_storage/v3/test_limits.py openstack/tests/functional/block_storage/v3/test_resource_filters.py openstack/tests/functional/block_storage/v3/test_service.py openstack/tests/functional/block_storage/v3/test_snapshot.py openstack/tests/functional/block_storage/v3/test_transfer.py openstack/tests/functional/block_storage/v3/test_type.py openstack/tests/functional/block_storage/v3/test_volume.py openstack/tests/functional/cloud/__init__.py openstack/tests/functional/cloud/test_aggregate.py openstack/tests/functional/cloud/test_cluster_templates.py openstack/tests/functional/cloud/test_coe_clusters.py openstack/tests/functional/cloud/test_compute.py openstack/tests/functional/cloud/test_devstack.py openstack/tests/functional/cloud/test_domain.py openstack/tests/functional/cloud/test_endpoints.py openstack/tests/functional/cloud/test_flavor.py openstack/tests/functional/cloud/test_floating_ip.py openstack/tests/functional/cloud/test_floating_ip_pool.py openstack/tests/functional/cloud/test_groups.py openstack/tests/functional/cloud/test_identity.py openstack/tests/functional/cloud/test_image.py openstack/tests/functional/cloud/test_inventory.py openstack/tests/functional/cloud/test_keypairs.py openstack/tests/functional/cloud/test_limits.py openstack/tests/functional/cloud/test_magnum_services.py openstack/tests/functional/cloud/test_network.py openstack/tests/functional/cloud/test_object.py openstack/tests/functional/cloud/test_port.py openstack/tests/functional/cloud/test_project.py openstack/tests/functional/cloud/test_project_cleanup.py openstack/tests/functional/cloud/test_qos_bandwidth_limit_rule.py openstack/tests/functional/cloud/test_qos_dscp_marking_rule.py openstack/tests/functional/cloud/test_qos_minimum_bandwidth_rule.py openstack/tests/functional/cloud/test_qos_policy.py openstack/tests/functional/cloud/test_quotas.py openstack/tests/functional/cloud/test_range_search.py openstack/tests/functional/cloud/test_recordset.py openstack/tests/functional/cloud/test_router.py openstack/tests/functional/cloud/test_security_groups.py openstack/tests/functional/cloud/test_server_group.py openstack/tests/functional/cloud/test_services.py openstack/tests/functional/cloud/test_stack.py openstack/tests/functional/cloud/test_users.py openstack/tests/functional/cloud/test_volume.py openstack/tests/functional/cloud/test_volume_backup.py openstack/tests/functional/cloud/test_volume_type.py openstack/tests/functional/cloud/test_zone.py openstack/tests/functional/clustering/__init__.py openstack/tests/functional/clustering/test_cluster.py openstack/tests/functional/compute/__init__.py openstack/tests/functional/compute/base.py openstack/tests/functional/compute/v2/__init__.py openstack/tests/functional/compute/v2/test_extension.py openstack/tests/functional/compute/v2/test_flavor.py openstack/tests/functional/compute/v2/test_hypervisor.py openstack/tests/functional/compute/v2/test_image.py openstack/tests/functional/compute/v2/test_keypair.py openstack/tests/functional/compute/v2/test_limits.py openstack/tests/functional/compute/v2/test_quota_set.py openstack/tests/functional/compute/v2/test_server.py openstack/tests/functional/compute/v2/test_service.py openstack/tests/functional/compute/v2/test_volume_attachment.py openstack/tests/functional/dns/__init__.py openstack/tests/functional/dns/v2/__init__.py openstack/tests/functional/dns/v2/test_zone.py openstack/tests/functional/dns/v2/test_zone_share.py openstack/tests/functional/examples/__init__.py openstack/tests/functional/examples/test_compute.py openstack/tests/functional/examples/test_identity.py openstack/tests/functional/examples/test_image.py openstack/tests/functional/examples/test_network.py openstack/tests/functional/identity/__init__.py openstack/tests/functional/identity/v3/__init__.py openstack/tests/functional/identity/v3/test_access_rule.py openstack/tests/functional/identity/v3/test_application_credential.py openstack/tests/functional/identity/v3/test_domain_config.py openstack/tests/functional/image/__init__.py openstack/tests/functional/image/v2/__init__.py openstack/tests/functional/image/v2/base.py openstack/tests/functional/image/v2/test_image.py openstack/tests/functional/image/v2/test_metadef_namespace.py openstack/tests/functional/image/v2/test_metadef_object.py openstack/tests/functional/image/v2/test_metadef_property.py openstack/tests/functional/image/v2/test_metadef_resource_type.py openstack/tests/functional/image/v2/test_metadef_schema.py openstack/tests/functional/image/v2/test_schema.py openstack/tests/functional/image/v2/test_task.py openstack/tests/functional/instance_ha/__init__.py openstack/tests/functional/instance_ha/test_host.py openstack/tests/functional/instance_ha/test_segment.py openstack/tests/functional/load_balancer/__init__.py openstack/tests/functional/load_balancer/v2/__init__.py openstack/tests/functional/load_balancer/v2/test_load_balancer.py openstack/tests/functional/network/__init__.py openstack/tests/functional/network/v2/__init__.py openstack/tests/functional/network/v2/test_address_group.py openstack/tests/functional/network/v2/test_address_scope.py openstack/tests/functional/network/v2/test_agent.py openstack/tests/functional/network/v2/test_agent_add_remove_network.py openstack/tests/functional/network/v2/test_agent_add_remove_router.py openstack/tests/functional/network/v2/test_auto_allocated_topology.py openstack/tests/functional/network/v2/test_availability_zone.py openstack/tests/functional/network/v2/test_bgp.py openstack/tests/functional/network/v2/test_bgpvpn.py openstack/tests/functional/network/v2/test_default_security_group_rule.py openstack/tests/functional/network/v2/test_dvr_router.py openstack/tests/functional/network/v2/test_extension.py openstack/tests/functional/network/v2/test_firewall_group.py openstack/tests/functional/network/v2/test_firewall_policy.py openstack/tests/functional/network/v2/test_firewall_rule.py openstack/tests/functional/network/v2/test_firewall_rule_insert_remove_policy.py openstack/tests/functional/network/v2/test_flavor.py openstack/tests/functional/network/v2/test_floating_ip.py openstack/tests/functional/network/v2/test_l3_conntrack_helper.py openstack/tests/functional/network/v2/test_local_ip.py openstack/tests/functional/network/v2/test_local_ip_association.py openstack/tests/functional/network/v2/test_ndp_proxy.py openstack/tests/functional/network/v2/test_network.py openstack/tests/functional/network/v2/test_network_ip_availability.py openstack/tests/functional/network/v2/test_network_segment_range.py openstack/tests/functional/network/v2/test_port.py openstack/tests/functional/network/v2/test_port_forwarding.py openstack/tests/functional/network/v2/test_qos_bandwidth_limit_rule.py openstack/tests/functional/network/v2/test_qos_dscp_marking_rule.py openstack/tests/functional/network/v2/test_qos_minimum_bandwidth_rule.py openstack/tests/functional/network/v2/test_qos_minimum_packet_rate_rule.py openstack/tests/functional/network/v2/test_qos_policy.py openstack/tests/functional/network/v2/test_qos_rule_type.py openstack/tests/functional/network/v2/test_quota.py openstack/tests/functional/network/v2/test_rbac_policy.py openstack/tests/functional/network/v2/test_router.py openstack/tests/functional/network/v2/test_router_add_remove_interface.py openstack/tests/functional/network/v2/test_security_group.py openstack/tests/functional/network/v2/test_security_group_rule.py openstack/tests/functional/network/v2/test_segment.py openstack/tests/functional/network/v2/test_service_profile.py openstack/tests/functional/network/v2/test_service_provider.py openstack/tests/functional/network/v2/test_sfc.py openstack/tests/functional/network/v2/test_subnet.py openstack/tests/functional/network/v2/test_subnet_from_subnet_pool.py openstack/tests/functional/network/v2/test_subnet_pool.py openstack/tests/functional/network/v2/test_taas.py openstack/tests/functional/network/v2/test_tap_mirror.py openstack/tests/functional/network/v2/test_trunk.py openstack/tests/functional/network/v2/test_vpnaas.py openstack/tests/functional/object_store/__init__.py openstack/tests/functional/object_store/v1/__init__.py openstack/tests/functional/object_store/v1/test_account.py openstack/tests/functional/object_store/v1/test_container.py openstack/tests/functional/object_store/v1/test_obj.py openstack/tests/functional/orchestration/__init__.py openstack/tests/functional/orchestration/v1/__init__.py openstack/tests/functional/orchestration/v1/hello_world.yaml openstack/tests/functional/orchestration/v1/test_stack.py openstack/tests/functional/placement/__init__.py openstack/tests/functional/placement/v1/__init__.py openstack/tests/functional/placement/v1/test_resource_provider.py openstack/tests/functional/placement/v1/test_resource_provider_inventory.py openstack/tests/functional/placement/v1/test_trait.py openstack/tests/functional/shared_file_system/__init__.py openstack/tests/functional/shared_file_system/base.py openstack/tests/functional/shared_file_system/test_availability_zone.py openstack/tests/functional/shared_file_system/test_export_locations.py openstack/tests/functional/shared_file_system/test_limit.py openstack/tests/functional/shared_file_system/test_quota_class_set.py openstack/tests/functional/shared_file_system/test_resource_lock.py openstack/tests/functional/shared_file_system/test_share.py openstack/tests/functional/shared_file_system/test_share_access_rule.py openstack/tests/functional/shared_file_system/test_share_group.py openstack/tests/functional/shared_file_system/test_share_group_snapshot.py openstack/tests/functional/shared_file_system/test_share_instance.py openstack/tests/functional/shared_file_system/test_share_metadata.py openstack/tests/functional/shared_file_system/test_share_network.py openstack/tests/functional/shared_file_system/test_share_network_subnet.py openstack/tests/functional/shared_file_system/test_share_snapshot.py openstack/tests/functional/shared_file_system/test_share_snapshot_instance.py openstack/tests/functional/shared_file_system/test_storage_pool.py openstack/tests/functional/shared_file_system/test_user_message.py openstack/tests/unit/README.rst openstack/tests/unit/__init__.py openstack/tests/unit/base.py openstack/tests/unit/fakes.py openstack/tests/unit/test_connection.py openstack/tests/unit/test_exceptions.py openstack/tests/unit/test_fakes.py openstack/tests/unit/test_format.py openstack/tests/unit/test_hacking.py openstack/tests/unit/test_microversions.py openstack/tests/unit/test_missing_version.py openstack/tests/unit/test_placement_rest.py openstack/tests/unit/test_proxy.py openstack/tests/unit/test_proxy_base.py openstack/tests/unit/test_resource.py openstack/tests/unit/test_stats.py openstack/tests/unit/test_utils.py openstack/tests/unit/accelerator/__init__.py openstack/tests/unit/accelerator/test_version.py openstack/tests/unit/accelerator/v2/__init__.py openstack/tests/unit/accelerator/v2/test_accelerator_request.py openstack/tests/unit/accelerator/v2/test_deployable.py openstack/tests/unit/accelerator/v2/test_device.py openstack/tests/unit/accelerator/v2/test_device_profile.py openstack/tests/unit/accelerator/v2/test_proxy.py openstack/tests/unit/baremetal/__init__.py openstack/tests/unit/baremetal/test_configdrive.py openstack/tests/unit/baremetal/test_version.py openstack/tests/unit/baremetal/v1/__init__.py openstack/tests/unit/baremetal/v1/test_allocation.py openstack/tests/unit/baremetal/v1/test_chassis.py openstack/tests/unit/baremetal/v1/test_conductor.py openstack/tests/unit/baremetal/v1/test_deploy_templates.py openstack/tests/unit/baremetal/v1/test_driver.py openstack/tests/unit/baremetal/v1/test_node.py openstack/tests/unit/baremetal/v1/test_port.py openstack/tests/unit/baremetal/v1/test_port_group.py openstack/tests/unit/baremetal/v1/test_proxy.py openstack/tests/unit/baremetal/v1/test_volume_connector.py openstack/tests/unit/baremetal/v1/test_volume_target.py openstack/tests/unit/baremetal_introspection/__init__.py openstack/tests/unit/baremetal_introspection/v1/__init__.py openstack/tests/unit/baremetal_introspection/v1/test_introspection_rule.py openstack/tests/unit/baremetal_introspection/v1/test_proxy.py openstack/tests/unit/block_storage/__init__.py openstack/tests/unit/block_storage/v2/__init__.py openstack/tests/unit/block_storage/v2/test_backup.py openstack/tests/unit/block_storage/v2/test_capabilities.py openstack/tests/unit/block_storage/v2/test_extension.py openstack/tests/unit/block_storage/v2/test_limits.py openstack/tests/unit/block_storage/v2/test_proxy.py openstack/tests/unit/block_storage/v2/test_snapshot.py openstack/tests/unit/block_storage/v2/test_stats.py openstack/tests/unit/block_storage/v2/test_type.py openstack/tests/unit/block_storage/v2/test_volume.py openstack/tests/unit/block_storage/v3/__init__.py openstack/tests/unit/block_storage/v3/test_attachment.py openstack/tests/unit/block_storage/v3/test_availability_zone.py openstack/tests/unit/block_storage/v3/test_backup.py openstack/tests/unit/block_storage/v3/test_block_storage_summary.py openstack/tests/unit/block_storage/v3/test_capabilities.py openstack/tests/unit/block_storage/v3/test_extension.py openstack/tests/unit/block_storage/v3/test_group.py openstack/tests/unit/block_storage/v3/test_group_snapshot.py openstack/tests/unit/block_storage/v3/test_group_type.py openstack/tests/unit/block_storage/v3/test_limits.py openstack/tests/unit/block_storage/v3/test_proxy.py openstack/tests/unit/block_storage/v3/test_resource_filter.py openstack/tests/unit/block_storage/v3/test_service.py openstack/tests/unit/block_storage/v3/test_snapshot.py openstack/tests/unit/block_storage/v3/test_transfer.py openstack/tests/unit/block_storage/v3/test_type.py openstack/tests/unit/block_storage/v3/test_type_encryption.py openstack/tests/unit/block_storage/v3/test_volume.py openstack/tests/unit/cloud/__init__.py openstack/tests/unit/cloud/test__utils.py openstack/tests/unit/cloud/test_accelerator.py openstack/tests/unit/cloud/test_aggregate.py openstack/tests/unit/cloud/test_availability_zones.py openstack/tests/unit/cloud/test_baremetal_node.py openstack/tests/unit/cloud/test_baremetal_ports.py openstack/tests/unit/cloud/test_cloud.py openstack/tests/unit/cloud/test_cluster_templates.py openstack/tests/unit/cloud/test_clustering.py openstack/tests/unit/cloud/test_coe_clusters.py openstack/tests/unit/cloud/test_coe_clusters_certificate.py openstack/tests/unit/cloud/test_compute.py openstack/tests/unit/cloud/test_create_server.py openstack/tests/unit/cloud/test_create_volume_snapshot.py openstack/tests/unit/cloud/test_delete_server.py openstack/tests/unit/cloud/test_delete_volume_snapshot.py openstack/tests/unit/cloud/test_domain_params.py openstack/tests/unit/cloud/test_domains.py openstack/tests/unit/cloud/test_endpoints.py openstack/tests/unit/cloud/test_flavors.py openstack/tests/unit/cloud/test_floating_ip_common.py openstack/tests/unit/cloud/test_floating_ip_neutron.py openstack/tests/unit/cloud/test_floating_ip_nova.py openstack/tests/unit/cloud/test_floating_ip_pool.py openstack/tests/unit/cloud/test_fwaas.py openstack/tests/unit/cloud/test_groups.py openstack/tests/unit/cloud/test_identity_roles.py openstack/tests/unit/cloud/test_identity_users.py openstack/tests/unit/cloud/test_image.py openstack/tests/unit/cloud/test_image_snapshot.py openstack/tests/unit/cloud/test_inventory.py openstack/tests/unit/cloud/test_keypair.py openstack/tests/unit/cloud/test_limits.py openstack/tests/unit/cloud/test_magnum_services.py openstack/tests/unit/cloud/test_meta.py openstack/tests/unit/cloud/test_network.py openstack/tests/unit/cloud/test_object.py openstack/tests/unit/cloud/test_openstackcloud.py openstack/tests/unit/cloud/test_operator.py openstack/tests/unit/cloud/test_operator_noauth.py openstack/tests/unit/cloud/test_port.py openstack/tests/unit/cloud/test_project.py openstack/tests/unit/cloud/test_qos_bandwidth_limit_rule.py openstack/tests/unit/cloud/test_qos_dscp_marking_rule.py openstack/tests/unit/cloud/test_qos_minimum_bandwidth_rule.py openstack/tests/unit/cloud/test_qos_policy.py openstack/tests/unit/cloud/test_qos_rule_type.py openstack/tests/unit/cloud/test_quotas.py openstack/tests/unit/cloud/test_rebuild_server.py openstack/tests/unit/cloud/test_recordset.py openstack/tests/unit/cloud/test_role_assignment.py openstack/tests/unit/cloud/test_router.py openstack/tests/unit/cloud/test_security_groups.py openstack/tests/unit/cloud/test_server_console.py openstack/tests/unit/cloud/test_server_delete_metadata.py openstack/tests/unit/cloud/test_server_group.py openstack/tests/unit/cloud/test_server_set_metadata.py openstack/tests/unit/cloud/test_services.py openstack/tests/unit/cloud/test_shared_file_system.py openstack/tests/unit/cloud/test_stack.py openstack/tests/unit/cloud/test_subnet.py openstack/tests/unit/cloud/test_update_server.py openstack/tests/unit/cloud/test_usage.py openstack/tests/unit/cloud/test_users.py openstack/tests/unit/cloud/test_volume.py openstack/tests/unit/cloud/test_volume_access.py openstack/tests/unit/cloud/test_volume_backups.py openstack/tests/unit/cloud/test_zone.py openstack/tests/unit/clustering/__init__.py openstack/tests/unit/clustering/test_version.py openstack/tests/unit/clustering/v1/__init__.py openstack/tests/unit/clustering/v1/test_action.py openstack/tests/unit/clustering/v1/test_build_info.py openstack/tests/unit/clustering/v1/test_cluster.py openstack/tests/unit/clustering/v1/test_cluster_attr.py openstack/tests/unit/clustering/v1/test_cluster_policy.py openstack/tests/unit/clustering/v1/test_event.py openstack/tests/unit/clustering/v1/test_node.py openstack/tests/unit/clustering/v1/test_policy.py openstack/tests/unit/clustering/v1/test_policy_type.py openstack/tests/unit/clustering/v1/test_profile.py openstack/tests/unit/clustering/v1/test_profile_type.py openstack/tests/unit/clustering/v1/test_proxy.py openstack/tests/unit/clustering/v1/test_receiver.py openstack/tests/unit/clustering/v1/test_service.py openstack/tests/unit/common/__init__.py openstack/tests/unit/common/test_metadata.py openstack/tests/unit/common/test_quota_set.py openstack/tests/unit/common/test_tag.py openstack/tests/unit/compute/__init__.py openstack/tests/unit/compute/test_version.py openstack/tests/unit/compute/v2/__init__.py openstack/tests/unit/compute/v2/test_aggregate.py openstack/tests/unit/compute/v2/test_availability_zone.py openstack/tests/unit/compute/v2/test_extension.py openstack/tests/unit/compute/v2/test_flavor.py openstack/tests/unit/compute/v2/test_hypervisor.py openstack/tests/unit/compute/v2/test_image.py openstack/tests/unit/compute/v2/test_keypair.py openstack/tests/unit/compute/v2/test_limits.py openstack/tests/unit/compute/v2/test_migration.py openstack/tests/unit/compute/v2/test_proxy.py openstack/tests/unit/compute/v2/test_server.py openstack/tests/unit/compute/v2/test_server_actions.py openstack/tests/unit/compute/v2/test_server_diagnostics.py openstack/tests/unit/compute/v2/test_server_group.py openstack/tests/unit/compute/v2/test_server_interface.py openstack/tests/unit/compute/v2/test_server_ip.py openstack/tests/unit/compute/v2/test_server_migration.py openstack/tests/unit/compute/v2/test_server_remote_console.py openstack/tests/unit/compute/v2/test_service.py openstack/tests/unit/compute/v2/test_usage.py openstack/tests/unit/compute/v2/test_volume_attachment.py openstack/tests/unit/config/__init__.py openstack/tests/unit/config/base.py openstack/tests/unit/config/test_cloud_config.py openstack/tests/unit/config/test_config.py openstack/tests/unit/config/test_environ.py openstack/tests/unit/config/test_from_conf.py openstack/tests/unit/config/test_from_session.py openstack/tests/unit/config/test_init.py openstack/tests/unit/config/test_json.py openstack/tests/unit/config/test_loader.py openstack/tests/unit/container_infrastructure_management/__init__.py openstack/tests/unit/container_infrastructure_management/v1/__init__.py openstack/tests/unit/container_infrastructure_management/v1/test_cluster.py openstack/tests/unit/container_infrastructure_management/v1/test_cluster_certificate.py openstack/tests/unit/container_infrastructure_management/v1/test_cluster_template.py openstack/tests/unit/container_infrastructure_management/v1/test_proxy.py openstack/tests/unit/container_infrastructure_management/v1/test_service.py openstack/tests/unit/database/__init__.py openstack/tests/unit/database/v1/__init__.py openstack/tests/unit/database/v1/test_database.py openstack/tests/unit/database/v1/test_flavor.py openstack/tests/unit/database/v1/test_instance.py openstack/tests/unit/database/v1/test_proxy.py openstack/tests/unit/database/v1/test_user.py openstack/tests/unit/dns/__init__.py openstack/tests/unit/dns/test_version.py openstack/tests/unit/dns/v2/__init__.py openstack/tests/unit/dns/v2/test_floating_ip.py openstack/tests/unit/dns/v2/test_proxy.py openstack/tests/unit/dns/v2/test_recordset.py openstack/tests/unit/dns/v2/test_zone.py openstack/tests/unit/dns/v2/test_zone_export.py openstack/tests/unit/dns/v2/test_zone_import.py openstack/tests/unit/dns/v2/test_zone_share.py openstack/tests/unit/dns/v2/test_zone_transfer.py openstack/tests/unit/fake/__init__.py openstack/tests/unit/fake/fake_service.py openstack/tests/unit/fake/v1/__init__.py openstack/tests/unit/fake/v1/_proxy.py openstack/tests/unit/fake/v1/fake.py openstack/tests/unit/fake/v2/__init__.py openstack/tests/unit/fake/v2/_proxy.py openstack/tests/unit/fake/v2/fake.py openstack/tests/unit/fixtures/accelerator.json openstack/tests/unit/fixtures/bad-glance-version.json openstack/tests/unit/fixtures/bad-placement.json openstack/tests/unit/fixtures/baremetal.json openstack/tests/unit/fixtures/block-storage-version.json openstack/tests/unit/fixtures/clustering.json openstack/tests/unit/fixtures/compute-version.json openstack/tests/unit/fixtures/discovery.json openstack/tests/unit/fixtures/dns.json openstack/tests/unit/fixtures/image-version-broken.json openstack/tests/unit/fixtures/image-version-suburl.json openstack/tests/unit/fixtures/image-version-v1.json openstack/tests/unit/fixtures/image-version-v2.json openstack/tests/unit/fixtures/image-version.json openstack/tests/unit/fixtures/old-compute-version.json openstack/tests/unit/fixtures/placement.json openstack/tests/unit/fixtures/shared-file-system.json openstack/tests/unit/fixtures/clouds/clouds.yaml openstack/tests/unit/fixtures/clouds/clouds_cache.yaml openstack/tests/unit/identity/__init__.py openstack/tests/unit/identity/test_version.py openstack/tests/unit/identity/v2/__init__.py openstack/tests/unit/identity/v2/test_extension.py openstack/tests/unit/identity/v2/test_proxy.py openstack/tests/unit/identity/v2/test_role.py openstack/tests/unit/identity/v2/test_tenant.py openstack/tests/unit/identity/v2/test_user.py openstack/tests/unit/identity/v3/__init__.py openstack/tests/unit/identity/v3/test_access_rule.py openstack/tests/unit/identity/v3/test_application_credential.py openstack/tests/unit/identity/v3/test_credential.py openstack/tests/unit/identity/v3/test_domain.py openstack/tests/unit/identity/v3/test_domain_config.py openstack/tests/unit/identity/v3/test_endpoint.py openstack/tests/unit/identity/v3/test_federation_protocol.py openstack/tests/unit/identity/v3/test_group.py openstack/tests/unit/identity/v3/test_identity_provider.py openstack/tests/unit/identity/v3/test_limit.py openstack/tests/unit/identity/v3/test_mapping.py openstack/tests/unit/identity/v3/test_policy.py openstack/tests/unit/identity/v3/test_project.py openstack/tests/unit/identity/v3/test_proxy.py openstack/tests/unit/identity/v3/test_region.py openstack/tests/unit/identity/v3/test_registered_limit.py openstack/tests/unit/identity/v3/test_role.py openstack/tests/unit/identity/v3/test_role_assignment.py openstack/tests/unit/identity/v3/test_role_domain_group_assignment.py openstack/tests/unit/identity/v3/test_role_domain_user_assignment.py openstack/tests/unit/identity/v3/test_role_project_group_assignment.py openstack/tests/unit/identity/v3/test_role_project_user_assignment.py openstack/tests/unit/identity/v3/test_role_system_group_assignment.py openstack/tests/unit/identity/v3/test_role_system_user_assignment.py openstack/tests/unit/identity/v3/test_service.py openstack/tests/unit/identity/v3/test_service_provider.py openstack/tests/unit/identity/v3/test_trust.py openstack/tests/unit/identity/v3/test_user.py openstack/tests/unit/image/__init__.py openstack/tests/unit/image/v1/__init__.py openstack/tests/unit/image/v1/test_image.py openstack/tests/unit/image/v1/test_proxy.py openstack/tests/unit/image/v2/__init__.py openstack/tests/unit/image/v2/test_cache.py openstack/tests/unit/image/v2/test_image.py openstack/tests/unit/image/v2/test_member.py openstack/tests/unit/image/v2/test_metadef_namespace.py openstack/tests/unit/image/v2/test_metadef_object.py openstack/tests/unit/image/v2/test_metadef_property.py openstack/tests/unit/image/v2/test_metadef_resource_type.py openstack/tests/unit/image/v2/test_metadef_resource_type_association.py openstack/tests/unit/image/v2/test_metadef_schema.py openstack/tests/unit/image/v2/test_proxy.py openstack/tests/unit/image/v2/test_schema.py openstack/tests/unit/image/v2/test_service_info.py openstack/tests/unit/image/v2/test_task.py openstack/tests/unit/instance_ha/__init__.py openstack/tests/unit/instance_ha/v1/__init__.py openstack/tests/unit/instance_ha/v1/test_host.py openstack/tests/unit/instance_ha/v1/test_notification.py openstack/tests/unit/instance_ha/v1/test_proxy.py openstack/tests/unit/instance_ha/v1/test_segment.py openstack/tests/unit/instance_ha/v1/test_vmove.py openstack/tests/unit/key_manager/__init__.py openstack/tests/unit/key_manager/v1/__init__.py openstack/tests/unit/key_manager/v1/test_container.py openstack/tests/unit/key_manager/v1/test_order.py openstack/tests/unit/key_manager/v1/test_proxy.py openstack/tests/unit/key_manager/v1/test_secret.py openstack/tests/unit/load_balancer/__init__.py openstack/tests/unit/load_balancer/test_amphora.py openstack/tests/unit/load_balancer/test_availability_zone.py openstack/tests/unit/load_balancer/test_availability_zone_profile.py openstack/tests/unit/load_balancer/test_flavor.py openstack/tests/unit/load_balancer/test_flavor_profile.py openstack/tests/unit/load_balancer/test_health_monitor.py openstack/tests/unit/load_balancer/test_l7policy.py openstack/tests/unit/load_balancer/test_l7rule.py openstack/tests/unit/load_balancer/test_listener.py openstack/tests/unit/load_balancer/test_load_balancer.py openstack/tests/unit/load_balancer/test_member.py openstack/tests/unit/load_balancer/test_pool.py openstack/tests/unit/load_balancer/test_provider.py openstack/tests/unit/load_balancer/test_quota.py openstack/tests/unit/load_balancer/test_version.py openstack/tests/unit/load_balancer/v2/__init__.py openstack/tests/unit/load_balancer/v2/test_proxy.py openstack/tests/unit/message/__init__.py openstack/tests/unit/message/test_version.py openstack/tests/unit/message/v2/__init__.py openstack/tests/unit/message/v2/test_claim.py openstack/tests/unit/message/v2/test_message.py openstack/tests/unit/message/v2/test_proxy.py openstack/tests/unit/message/v2/test_queue.py openstack/tests/unit/message/v2/test_subscription.py openstack/tests/unit/network/__init__.py openstack/tests/unit/network/test_version.py openstack/tests/unit/network/v2/__init__.py openstack/tests/unit/network/v2/test_address_group.py openstack/tests/unit/network/v2/test_address_scope.py openstack/tests/unit/network/v2/test_agent.py openstack/tests/unit/network/v2/test_auto_allocated_topology.py openstack/tests/unit/network/v2/test_availability_zone.py openstack/tests/unit/network/v2/test_bgp_peer.py openstack/tests/unit/network/v2/test_bgp_speaker.py openstack/tests/unit/network/v2/test_bgpvpn.py openstack/tests/unit/network/v2/test_default_security_group_rule.py openstack/tests/unit/network/v2/test_extension.py openstack/tests/unit/network/v2/test_firewall_group.py openstack/tests/unit/network/v2/test_firewall_policy.py openstack/tests/unit/network/v2/test_firewall_rule.py openstack/tests/unit/network/v2/test_flavor.py openstack/tests/unit/network/v2/test_floating_ip.py openstack/tests/unit/network/v2/test_health_monitor.py openstack/tests/unit/network/v2/test_l3_conntrack_helper.py openstack/tests/unit/network/v2/test_listener.py openstack/tests/unit/network/v2/test_load_balancer.py openstack/tests/unit/network/v2/test_local_ip.py openstack/tests/unit/network/v2/test_local_ip_association.py openstack/tests/unit/network/v2/test_metering_label.py openstack/tests/unit/network/v2/test_metering_label_rule.py openstack/tests/unit/network/v2/test_ndp_proxy.py openstack/tests/unit/network/v2/test_network.py openstack/tests/unit/network/v2/test_network_ip_availability.py openstack/tests/unit/network/v2/test_network_segment_range.py openstack/tests/unit/network/v2/test_pool.py openstack/tests/unit/network/v2/test_pool_member.py openstack/tests/unit/network/v2/test_port.py openstack/tests/unit/network/v2/test_port_forwarding.py openstack/tests/unit/network/v2/test_proxy.py openstack/tests/unit/network/v2/test_qos_bandwidth_limit_rule.py openstack/tests/unit/network/v2/test_qos_dscp_marking_rule.py openstack/tests/unit/network/v2/test_qos_minimum_bandwidth_rule.py openstack/tests/unit/network/v2/test_qos_minimum_packet_rate_rule.py openstack/tests/unit/network/v2/test_qos_policy.py openstack/tests/unit/network/v2/test_qos_rule_type.py openstack/tests/unit/network/v2/test_quota.py openstack/tests/unit/network/v2/test_rbac_policy.py openstack/tests/unit/network/v2/test_router.py openstack/tests/unit/network/v2/test_security_group.py openstack/tests/unit/network/v2/test_security_group_rule.py openstack/tests/unit/network/v2/test_segment.py openstack/tests/unit/network/v2/test_service_profile.py openstack/tests/unit/network/v2/test_service_provider.py openstack/tests/unit/network/v2/test_sfc_flow_classifier.py openstack/tests/unit/network/v2/test_sfc_port_chain.py openstack/tests/unit/network/v2/test_sfc_port_pair.py openstack/tests/unit/network/v2/test_sfc_port_pair_group.py openstack/tests/unit/network/v2/test_sfc_service_graph.py openstack/tests/unit/network/v2/test_subnet.py openstack/tests/unit/network/v2/test_subnet_pool.py openstack/tests/unit/network/v2/test_tap_flow.py openstack/tests/unit/network/v2/test_tap_mirror.py openstack/tests/unit/network/v2/test_tap_service.py openstack/tests/unit/network/v2/test_trunk.py openstack/tests/unit/network/v2/test_vpn_endpoint_group.py openstack/tests/unit/network/v2/test_vpn_ikepolicy.py openstack/tests/unit/network/v2/test_vpn_ipsec_site_connection.py openstack/tests/unit/network/v2/test_vpn_ipsecpolicy.py openstack/tests/unit/network/v2/test_vpn_service.py openstack/tests/unit/object_store/__init__.py openstack/tests/unit/object_store/v1/__init__.py openstack/tests/unit/object_store/v1/test_account.py openstack/tests/unit/object_store/v1/test_container.py openstack/tests/unit/object_store/v1/test_info.py openstack/tests/unit/object_store/v1/test_obj.py openstack/tests/unit/object_store/v1/test_proxy.py openstack/tests/unit/orchestration/__init__.py openstack/tests/unit/orchestration/test_version.py openstack/tests/unit/orchestration/v1/__init__.py openstack/tests/unit/orchestration/v1/hello_world.yaml openstack/tests/unit/orchestration/v1/helloworld.txt openstack/tests/unit/orchestration/v1/test_proxy.py openstack/tests/unit/orchestration/v1/test_resource.py openstack/tests/unit/orchestration/v1/test_software_config.py openstack/tests/unit/orchestration/v1/test_software_deployment.py openstack/tests/unit/orchestration/v1/test_stack.py openstack/tests/unit/orchestration/v1/test_stack_environment.py openstack/tests/unit/orchestration/v1/test_stack_event.py openstack/tests/unit/orchestration/v1/test_stack_files.py openstack/tests/unit/orchestration/v1/test_stack_template.py openstack/tests/unit/orchestration/v1/test_template.py openstack/tests/unit/placement/__init__.py openstack/tests/unit/placement/v1/__init__.py openstack/tests/unit/placement/v1/test_proxy.py openstack/tests/unit/placement/v1/test_resource_class.py openstack/tests/unit/placement/v1/test_resource_provider.py openstack/tests/unit/placement/v1/test_resource_provider_inventory.py openstack/tests/unit/placement/v1/test_trait.py openstack/tests/unit/shared_file_system/__init__.py openstack/tests/unit/shared_file_system/v2/__init__.py openstack/tests/unit/shared_file_system/v2/test_availability_zone.py openstack/tests/unit/shared_file_system/v2/test_limit.py openstack/tests/unit/shared_file_system/v2/test_proxy.py openstack/tests/unit/shared_file_system/v2/test_quota_class_set.py openstack/tests/unit/shared_file_system/v2/test_share.py openstack/tests/unit/shared_file_system/v2/test_share_access_rule.py openstack/tests/unit/shared_file_system/v2/test_share_export_locations.py openstack/tests/unit/shared_file_system/v2/test_share_group.py openstack/tests/unit/shared_file_system/v2/test_share_group_snapshot.py openstack/tests/unit/shared_file_system/v2/test_share_instance.py openstack/tests/unit/shared_file_system/v2/test_share_network.py openstack/tests/unit/shared_file_system/v2/test_share_network_subnet.py openstack/tests/unit/shared_file_system/v2/test_share_snapshot.py openstack/tests/unit/shared_file_system/v2/test_share_snapshot_instance.py openstack/tests/unit/shared_file_system/v2/test_storage_pool.py openstack/tests/unit/shared_file_system/v2/test_user_message.py openstack/tests/unit/workflow/__init__.py openstack/tests/unit/workflow/test_cron_trigger.py openstack/tests/unit/workflow/test_execution.py openstack/tests/unit/workflow/test_version.py openstack/tests/unit/workflow/test_workflow.py openstack/tests/unit/workflow/v2/__init__.py openstack/tests/unit/workflow/v2/test_proxy.py openstack/workflow/__init__.py openstack/workflow/version.py openstack/workflow/workflow_service.py openstack/workflow/v2/__init__.py openstack/workflow/v2/_proxy.py openstack/workflow/v2/cron_trigger.py openstack/workflow/v2/execution.py openstack/workflow/v2/workflow.py openstacksdk.egg-info/PKG-INFO openstacksdk.egg-info/SOURCES.txt openstacksdk.egg-info/dependency_links.txt openstacksdk.egg-info/entry_points.txt openstacksdk.egg-info/not-zip-safe openstacksdk.egg-info/pbr.json openstacksdk.egg-info/requires.txt openstacksdk.egg-info/top_level.txt playbooks/acceptance/post.yaml playbooks/acceptance/pre.yaml playbooks/acceptance/run-with-devstack.yaml playbooks/devstack/legacy-git.yaml playbooks/devstack/post.yaml releasenotes/notes/add-aggregates-fc563e237755112e.yaml releasenotes/notes/add-application-credentials-abab9106dea10c11.yaml releasenotes/notes/add-az-to-loadbalancer-da9bf1baaedc89a4.yaml releasenotes/notes/add-block-storage-group-snapshots-954cc869227317c3.yaml releasenotes/notes/add-block-storage-group-type-group-specs-d07047167224ec83.yaml releasenotes/notes/add-block-storage-groups-bf5f1af714c9e505.yaml releasenotes/notes/add-block-storage-service-support-ce03092ce2d7e7b9.yaml releasenotes/notes/add-block-storage-summary-support-dd00d424c4e6a3b1.yaml releasenotes/notes/add-bulk-create-resources-12192ec9d76c7716.yaml releasenotes/notes/add-cipher-list-support-to-octavia-b6b2b0053ca6b184.yaml releasenotes/notes/add-compute-flavor-ops-12149e58299c413e.yaml releasenotes/notes/add-current-user-id-49b6463e6bcc3b31.yaml releasenotes/notes/add-cyborg-support-b9afca69f709c048.yaml releasenotes/notes/add-dns-606cc018e01d40fa.yaml releasenotes/notes/add-dns-domain-support-for-port-3fa4568330dda07e.yaml releasenotes/notes/add-dns-resource-list-by-project-8b5479a045ef7373.yaml releasenotes/notes/add-dns-zone-share-api-374e71cac504917f.yaml releasenotes/notes/add-fakes-generator-72c53d34c995fcb2.yaml releasenotes/notes/add-find-backup-find-snapshot-v2-756a05ccd150db82.yaml releasenotes/notes/add-fip-portforwarding-methods-cffc14a6283cedfb.yaml releasenotes/notes/add-identity-domain-configuration-2e8bcaa20736b379.yaml releasenotes/notes/add-identity-group-users-proxy-method-e37f8983b2406819.yaml releasenotes/notes/add-identity-service-provider-support-8c97cbb157883626.yaml releasenotes/notes/add-image-attributes-05b820a85cd09806.yaml releasenotes/notes/add-image-cache-support-3f8c13550a84d749.yaml releasenotes/notes/add-image-cache-support-78477e1686c52e56.yaml releasenotes/notes/add-image-metadef-namespace-support-b93557afdcf4272c.yaml releasenotes/notes/add-image-metadef-property-fb87e5a7090e73ac.yaml releasenotes/notes/add-image-metadef-schema-b463825481bdf954.yaml releasenotes/notes/add-image-schema-9c07c2789490718a.yaml releasenotes/notes/add-image-service-info-90d6063b5ba0735d.yaml releasenotes/notes/add-image-stage-1dbc3844a042fd26.yaml releasenotes/notes/add-jmespath-support-f47b7a503dbbfda1.yaml releasenotes/notes/add-limit-to-shared-file-2b443c2a00c75e6e.yaml releasenotes/notes/add-list_flavor_access-e038253e953e6586.yaml releasenotes/notes/add-load-balancer-flavor-api-d2598e30347a19fc.yaml releasenotes/notes/add-load-balancer-flavor-profile-api-e5a15157563eb75f.yaml releasenotes/notes/add-load-balancer-listener-alpn-protocols-ded816c78bf2080c.yaml releasenotes/notes/add-load-balancer-pool-alpn-protocols-77f0c7015f176369.yaml releasenotes/notes/add-load-balancer-provider-api-08bcfb72ddf5b247.yaml releasenotes/notes/add-magnum-cluster-support-843fe2709b8f4789.yaml releasenotes/notes/add-manage-volume-support-a4fd90e3ff2fa0d0.yaml releasenotes/notes/add-masakara-support-3f7df4436ac869cf.yaml releasenotes/notes/add-masakari-enabled-to-segment-0e83da869d2ab03f.yaml releasenotes/notes/add-masakari-vmoves-873ad67830c92254.yaml releasenotes/notes/add-metadef-object-5eec168baf039e80.yaml releasenotes/notes/add-migrations-946adf16674d4b2a.yaml releasenotes/notes/add-namespace-object-delete-all-6cea62cb038012df.yaml releasenotes/notes/add-new-field-progress-details-in-notification-resource-f7871acb6ffd46dc.yaml releasenotes/notes/add-node-boot-mode-5f49882fdd86f35b.yaml releasenotes/notes/add-node-boot-mode-set-5718a8d6511b4826.yaml releasenotes/notes/add-node-firmware-list-support-fec2f96a3a578730.yaml releasenotes/notes/add-node-inventory-52f54e16777814e7.yaml releasenotes/notes/add-node-vendor_passthru-29b384cadf795b48.yaml releasenotes/notes/add-octavia-amphora-api-7f3586f6a4f31de4.yaml releasenotes/notes/add-octavia-lb-failover-9a34c9577d78ad34.yaml releasenotes/notes/add-octavia-lb-listener-stats-1538cc6e4f734353.yaml releasenotes/notes/add-octavia-listener-hsts-fields-50c621b71e56dc13.yaml releasenotes/notes/add-octavia-tags-support-1c1cf94184e6ebb7.yaml releasenotes/notes/add-placement-resource-class-e1c644d978b886bc.yaml releasenotes/notes/add-placement-resource-provider-aggregates-1310c0be6a4097d3.yaml releasenotes/notes/add-placement-resource-provider-inventory-8714cafefae74810.yaml releasenotes/notes/add-placement-support-a2011eb1e900804d.yaml releasenotes/notes/add-placement-trait-29957d2c03edbfb9.yaml releasenotes/notes/add-port-hardware-offload-type-1232c5ae3f62d7df.yaml releasenotes/notes/add-port-numa-affinity-policy-b42a85dbe26560d2.yaml releasenotes/notes/add-propagate_uplink_status-to-port-0152d476c65979e3.yaml releasenotes/notes/add-quota-class-set-to-shared-file-systems-43da33e6a3ed65e3.yaml releasenotes/notes/add-server-clear-password-256e269223453bd7.yaml releasenotes/notes/add-server-console-078ed2696e5b04d9.yaml releasenotes/notes/add-server-migrations-6e31183196f14deb.yaml releasenotes/notes/add-server-tag-proxy-methods-c791a36d8d4d85f6.yaml releasenotes/notes/add-service-0bcc16eb026eade3.yaml releasenotes/notes/add-sg-rules-bulk-f36a3e2326d74867.yaml releasenotes/notes/add-share-access-rules-to-shared-file-362bee34f7331186.yaml releasenotes/notes/add-share-network-subnet-to-shared-file-b5de3ce6ca723209.yaml releasenotes/notes/add-share-network-to-shared-file-c5c9a6b8ccf1d958.yaml releasenotes/notes/add-share-snapshot-instance-to-shared-file-4d935f12d67bf59d.yaml releasenotes/notes/add-share-snapshot-to-shared-file-82ecedbdbed2e3c5.yaml releasenotes/notes/add-share_group-to-shared-file-8cee20d8aa2afbb7.yaml releasenotes/notes/add-shared-file-syste-share_instance-fffaea2d3a77ba24.yaml releasenotes/notes/add-shared-file-system-locks-support-4859ca93f93a1056.yaml releasenotes/notes/add-shared-file-system-manage-unmanage-share-830e313f96e5fd2b.yaml releasenotes/notes/add-shared-file-system-share-group-snapshot-c5099e6c8accf077.yaml releasenotes/notes/add-shared-file-system-share-metadata-e0415bb71d8a0a48.yaml releasenotes/notes/add-shared-file-system-share-resize-ddd650c2e32fed34.yaml releasenotes/notes/add-shared-file-system-shares-2e1d44a1bb882d6d.yaml releasenotes/notes/add-shared-file-system-shares-e9f356a318045607.yaml releasenotes/notes/add-shared-file-systems-83a3767429fd5e8c.yaml releasenotes/notes/add-shared-file-systems-export-location-a27c1741880c384b.yaml releasenotes/notes/add-shelve_offload-427f6550fc55e622.yaml releasenotes/notes/add-show-all-images-flag-352748b6c3d99f3f.yaml releasenotes/notes/add-stack-events-b8674d7bb657e789.yaml releasenotes/notes/add-stack-export-3ace746a8c80d766.yaml releasenotes/notes/add-stack-suspend-and-resume-26d4fc5904291d5d.yaml releasenotes/notes/add-storage-pool-to-shared-file-ad45da1b2510b412.yaml releasenotes/notes/add-support-allowed-cidrs-loadbalancer-listener-809e523a8bd6a7d5.yaml releasenotes/notes/add-support-availability_zone-loadbalancer-a18aa1708d7859e2.yaml releasenotes/notes/add-support-for-setting-static-routes-b3ce6cac2c5e9e51.yaml releasenotes/notes/add-system-role-assignment-693dd3e1da33a54d.yaml releasenotes/notes/add-tls-container-refs-params-for-octavia-pools-76f295cd2daa7f53.yaml releasenotes/notes/add-tls-version-support-for-octavia-7ecb372e6fb58101.yaml releasenotes/notes/add-tls_enabled-parameter-for-octavia-pools-f0a23436d826b313.yaml releasenotes/notes/add-unified-limit-5ac334a08e137a70.yaml releasenotes/notes/add-user-group-assignment-9c419b6c6bfe392c.yaml releasenotes/notes/add-user-message-to-shared-file-85d7bbccf8347c4f.yaml releasenotes/notes/add-vif-optional-params-abb755b74f076eb2.yaml releasenotes/notes/add-volume-attachment-support-b5f9a9e78ba88355.yaml releasenotes/notes/add-volume-extend-support-86e5c8cff5d6874e.yaml releasenotes/notes/add-volume-image-metadata-support-c61bcb918fdff529.yaml releasenotes/notes/add-volume-snapshot-manage-unmanage-support-fc0be2a3fb4427d1.yaml releasenotes/notes/add-volume-transfer-support-28bf34a243d96e1b.yaml releasenotes/notes/add-volume-type-update-b84f50b7fa3b061d.yaml releasenotes/notes/add_access_rules-06eb8a1f9fcd9367.yaml releasenotes/notes/add_description_create_user-0ddc9a0ef4da840d.yaml releasenotes/notes/add_designate_recordsets_support-69af0a6b317073e7.yaml releasenotes/notes/add_designate_zones_support-35fa9b8b09995b43.yaml releasenotes/notes/add_heat_tag_support-135aa43ba1dce3bb.yaml releasenotes/notes/add_host_aggregate_support-471623faf45ec3c3.yaml releasenotes/notes/add_image_import_support-6cea2e7d7a781071.yaml releasenotes/notes/add_influxdb_stats-665714d715302ad5.yaml releasenotes/notes/add_magnum_baymodel_support-e35e5aab0b14ff75.yaml releasenotes/notes/add_magnum_services_support-3d95f9dcc60b5573.yaml releasenotes/notes/add_project_cleanup-39c3517b25a5372e.yaml releasenotes/notes/add_server_group_support-dfa472e3dae7d34d.yaml releasenotes/notes/add_support_port_binding_attrs-c70966724eb970f3.yaml releasenotes/notes/add_update_server-8761059d6de7e68b.yaml releasenotes/notes/add_update_service-28e590a7a7524053.yaml releasenotes/notes/add_vendor_hook-e87b6afb7f215a30.yaml releasenotes/notes/added-federation-support-3b65e531e57211f5.yaml releasenotes/notes/added-senlin-support-1eb4e47c31258f66.yaml releasenotes/notes/allocation-api-04f6b3b7a0ccc850.yaml releasenotes/notes/allocation-update-910c36c1290e5121.yaml releasenotes/notes/alternate-auth-context-3939f1492a0e1355.yaml releasenotes/notes/always-detail-cluster-templates-3eb4b5744ba327ac.yaml releasenotes/notes/auth-url-vexxhost-8d63cd17bde21320.yaml releasenotes/notes/bail-on-failed-service-cf299c37d5647b08.yaml releasenotes/notes/baremetal-configdrive-mkisofs-xorrisofs-075db4d7d80e5a13.yaml releasenotes/notes/baremetal-details-09b27fba82111cfb.yaml releasenotes/notes/baremetal-errors-5cc871e8df4c9d95.yaml releasenotes/notes/baremetal-fields-1f6fbcd8bd1ea2aa.yaml releasenotes/notes/baremetal-fields-624546fa533a8287.yaml releasenotes/notes/baremetal-fields-convert-857b8804327f1e86.yaml releasenotes/notes/baremetal-introspection-973351b3ee76309e.yaml releasenotes/notes/baremetal-maintenance-5cb95c6d898d4d72.yaml releasenotes/notes/baremetal-patch-feebd96b1b92f3b9.yaml releasenotes/notes/baremetal-ports-cc0f56ae0d192aba.yaml releasenotes/notes/baremetal-reservation-40327923092e9647.yaml releasenotes/notes/baremetal-retired-fields-f56a4632ad4797d7.yaml releasenotes/notes/baremetal-retries-804f553b4e22b3bf.yaml releasenotes/notes/baremetal-retries-ff8aa8f73fb97415.yaml releasenotes/notes/baremetal-traits-d1137318db33b8d1.yaml releasenotes/notes/baremetal-update-80effb38aae8e02d.yaml releasenotes/notes/baremetal-validate-ccce2a37d2a20d96.yaml releasenotes/notes/baremetal-vif-122457118c722a9b.yaml releasenotes/notes/baremetal-wait-e4571cdb150b188a.yaml releasenotes/notes/basic-api-cache-4ad8cf2754b004d1.yaml releasenotes/notes/bgpvpn-list-filters-e76183a7008c0631.yaml releasenotes/notes/block-storage-backup-5886e91fd6e423bf.yaml releasenotes/notes/block-storage-init-return-95b465b4755f03ca.yaml releasenotes/notes/block-storage-qs-0e3b69be2e709b65.yaml releasenotes/notes/block-storage-v3-9798d584d088c048.yaml releasenotes/notes/block_storage-type_encryption-121f8a222c822fb5.yaml releasenotes/notes/boot-on-server-group-a80e51850db24b3d.yaml releasenotes/notes/bug-2001080-de52ead3c5466792.yaml releasenotes/notes/bug-2010898-430da335e4df0efe.yaml releasenotes/notes/cache-auth-in-keyring-773dd5f682cd1610.yaml releasenotes/notes/cache-in-use-volumes-c7fa8bb378106fe3.yaml releasenotes/notes/catch-up-release-notes-e385fad34e9f3d6e.yaml releasenotes/notes/change-attach-vol-return-value-4834a1f78392abb1.yaml releasenotes/notes/cinder_volume_backups_support-6f7ceab440853833.yaml releasenotes/notes/cinderv2-norm-fix-037189c60b43089f.yaml releasenotes/notes/cleanup-objects-f99aeecf22ac13dd.yaml releasenotes/notes/cloud-profile-status-e0d29b5e2f10e95c.yaml releasenotes/notes/clustering-resource-deletion-bed869ba47c2aac1.yaml releasenotes/notes/complete-aggregate-functions-45d5f2beeeac2b48.yaml releasenotes/notes/compute-microversion-2-17-b05cb87580b8d56a.yaml releasenotes/notes/compute-microversion-2-73-abae1d0c3740f76e.yaml releasenotes/notes/compute-microversion-2-89-8c5187cc3bf6bd02.yaml releasenotes/notes/compute-quota-set-e664412d089945d2.yaml releasenotes/notes/compute-quotas-b07a0f24dfac8444.yaml releasenotes/notes/compute-restore-server-020bf091acc9f8df.yaml releasenotes/notes/compute-service-zone-2b25ec705b0156c4.yaml releasenotes/notes/compute-usage-defaults-5f5b2936f17ff400.yaml releasenotes/notes/compute-volume-attachment-proxy-method-rework-dc35fe9ca3af1c16.yaml releasenotes/notes/conf-object-ctr-c0e1da0a67dad841.yaml releasenotes/notes/config-aliases-0f6297eafd05c07c.yaml releasenotes/notes/config-flavor-specs-ca712e17971482b6.yaml releasenotes/notes/configdrive-f8ca9f94b2981db7.yaml releasenotes/notes/container-search-b0f4253ce2deeda5.yaml releasenotes/notes/create-object-data-870cb543543aa983.yaml releasenotes/notes/create-object-directory-98e2cae175cc5082.yaml releasenotes/notes/create-stack-fix-12dbb59a48ac7442.yaml releasenotes/notes/create-subnet-by-subnetpool-eba1129c67ed4d96.yaml releasenotes/notes/create_server_network_fix-c4a56b31d2850a4b.yaml releasenotes/notes/create_service_norm-319a97433d68fa6a.yaml releasenotes/notes/cron_triggers_proxy-51aa89e91bbb9798.yaml releasenotes/notes/data-model-cf50d86982646370.yaml releasenotes/notes/default-cloud-7ee0bcb9e5dd24b9.yaml releasenotes/notes/default-microversion-b2401727cb591002.yaml releasenotes/notes/delete-autocreated-1839187b0aa35022.yaml releasenotes/notes/delete-image-objects-9d4b4e0fff36a23f.yaml releasenotes/notes/delete-obj-return-a3ecf0415b7a2989.yaml releasenotes/notes/delete_project-399f9b3107014dde.yaml releasenotes/notes/deprecate-remote_ip_prefix-metering-label-rules-843d5a962e4e428c.yaml releasenotes/notes/deprecated-compute-image-proxy-apis-986263f6aa1b1b25.yaml releasenotes/notes/deprecated-profile-762afdef0e8fc9e8.yaml releasenotes/notes/disable-service-39df96ef8a817785.yaml releasenotes/notes/dns-domain-parameter-d3acfc3287a9d632.yaml releasenotes/notes/domain_operations_name_or_id-baba4cac5b67234d.yaml releasenotes/notes/drop-Resource-allow_get-attribute-fec75b551fb79465.yaml releasenotes/notes/drop-formatter-deserialize-30b19956fb79bb8d.yaml releasenotes/notes/drop-python27-b824f9ce51cb1ab7.yaml releasenotes/notes/drop-senlin-cloud-layer-c06d496acc70b014.yaml releasenotes/notes/dropped-python-3.5-b154887cce87947c.yaml releasenotes/notes/dual-stack-networks-8a81941c97d28deb.yaml releasenotes/notes/endpoint-from-catalog-bad36cb0409a4e6a.yaml releasenotes/notes/expose-client-side-rate-limit-ddb82df7cb92091c.yaml releasenotes/notes/false-not-attribute-error-49484d0fdc61f75d.yaml releasenotes/notes/feature-server-metadata-50caf18cec532160.yaml releasenotes/notes/find_server-use-details-9a22e83ec6540c98.yaml releasenotes/notes/fip_timeout-035c4bb3ff92fa1f.yaml releasenotes/notes/firewall-resources-c7589d288dd57e35.yaml releasenotes/notes/fix-compat-with-old-keystoneauth-66e11ee9d008b962.yaml releasenotes/notes/fix-config-drive-a148b7589f7e1022.yaml releasenotes/notes/fix-delete-ips-1d4eebf7bc4d4733.yaml releasenotes/notes/fix-dns-return-c810d5e6736322f1.yaml releasenotes/notes/fix-endpoint-override-ac41baeec9549ab3.yaml releasenotes/notes/fix-floating-ip-private-matching-84e369eee380a185.yaml releasenotes/notes/fix-for-microversion-70cd686b6d6e3fd0.yaml releasenotes/notes/fix-image-hw_qemu_guest_agent-bf1147e52c84b5e8.yaml releasenotes/notes/fix-image-task-ae79502dd5c7ecba.yaml releasenotes/notes/fix-list-networks-a592725df64c306e.yaml releasenotes/notes/fix-microversion-354dc70deb2b2f0b.yaml releasenotes/notes/fix-missing-futures-a0617a1c1ce6e659.yaml releasenotes/notes/fix-neutron-endpoint-mangling-a9dd89dd09bc71ec.yaml releasenotes/notes/fix-os_auth_type-v3multifactor-049cf52573d9e00e.yaml releasenotes/notes/fix-properties-key-conflict-2161ca1faaad6731.yaml releasenotes/notes/fix-server-unshelve-to-host-cb02eee8a20ba478.yaml releasenotes/notes/fix-supplemental-fips-c9cd58aac12eb30e.yaml releasenotes/notes/fix-task-timing-048afea680adc62e.yaml releasenotes/notes/fix-update-domain-af47b066ac52eb7f.yaml releasenotes/notes/fix-yaml-load-3e6bd852afe549b4.yaml releasenotes/notes/fixed-magnum-type-7406f0a60525f858.yaml releasenotes/notes/flavor-cloud-layer-0b4d130ac1c5e7c4.yaml releasenotes/notes/flavor_fix-a53c6b326dc34a2c.yaml releasenotes/notes/floating_ip_normalization-41e0edcdb0c98aee.yaml releasenotes/notes/fnmatch-name-or-id-f658fe26f84086c8.yaml releasenotes/notes/force_ipv4_no_ipv6_address-9842168b5d05d262.yaml releasenotes/notes/futurist-b54b0f449d410997.yaml releasenotes/notes/generate-form-signature-294ca46812f291d6.yaml releasenotes/notes/get-limits-c383c512f8e01873.yaml releasenotes/notes/get-object-raw-e58284e59c81c8ef.yaml releasenotes/notes/get-server-by-id-none-3e8538800fa09d82.yaml releasenotes/notes/get-usage-72d249ff790d1b8f.yaml releasenotes/notes/get_compute_usage-01811dccd60dc92a.yaml releasenotes/notes/get_object_api-968483adb016bce1.yaml releasenotes/notes/glance-image-pagination-0b4dfef22b25852b.yaml releasenotes/notes/glance-image-stores-2baa66e6743a2f2d.yaml releasenotes/notes/global-request-id-d7c0736f43929165.yaml releasenotes/notes/grant-revoke-assignments-231d3f9596a1ae75.yaml releasenotes/notes/identity-auth-url-f3ae8ef22d2bcab6.yaml releasenotes/notes/identity-cloud-mixin-inherited-role-assignments-8fe9ac9509d99f4d.yaml releasenotes/notes/image-flavor-by-name-54865b00ebbf1004.yaml releasenotes/notes/image-from-volume-9acf7379f5995b5b.yaml releasenotes/notes/image-id-filter-key-b9b6b52139a27cbe.yaml releasenotes/notes/image-import-proxy-params-f19d8b6166104ebe.yaml releasenotes/notes/image-import-support-97052cdbc8ce449b.yaml releasenotes/notes/image-proxy-layer-kwarg-only-arguments-94c9b2033d386160.yaml releasenotes/notes/image-update-76bd3bf24c1c1380.yaml releasenotes/notes/improve-metrics-5d7ce70ce4021d72.yaml releasenotes/notes/infer-secgroup-source-58d840aaf1a1f485.yaml releasenotes/notes/introduce-source-and-destination-ip-prefixes-into-metering-label-rules-e04b797adac5d0d0.yaml releasenotes/notes/introspection-node-6a3b7d55839ef82c.yaml releasenotes/notes/ironic-conductors-support-3bf27e8b2f0299ba.yaml releasenotes/notes/ironic-deploy-steps-2c0f39d7d2a13289.yaml releasenotes/notes/ironic-deploy-template-support-fa56005365ed6e4d.yaml releasenotes/notes/ironic-introspection_rules_support-18b0488a76800122.yaml releasenotes/notes/ironic-microversion-ba5b0f36f11196a6.yaml releasenotes/notes/ironic-node-shard-35f2557c3dbfff1d.yaml releasenotes/notes/ironic-volume_target-support-8130361804366787.yaml releasenotes/notes/ksa-discovery-86a4ef00d85ea87f.yaml releasenotes/notes/less-file-hashing-d2497337da5acbef.yaml releasenotes/notes/list-all_projects-filter-27f1d471a7848507.yaml releasenotes/notes/list-az-names-a38c277d1192471b.yaml releasenotes/notes/list-network-resources-empty-list-6aa760c01e7d97d7.yaml releasenotes/notes/list-role-assignments-keystone-v2-b127b12b4860f50c.yaml releasenotes/notes/list-servers-all-projects-349e6dc665ba2e8d.yaml releasenotes/notes/load-yaml-3177efca78e5c67a.yaml releasenotes/notes/location-server-resource-af77fdab5d35d421.yaml releasenotes/notes/log-request-ids-37507cb6eed9a7da.yaml releasenotes/notes/machine-get-update-microversions-4b910e63cebd65e2.yaml releasenotes/notes/magic-fixes-dca4ae4dac2441a8.yaml releasenotes/notes/make-cloud-region-standalone-848a2c4b5f3ebc29.yaml releasenotes/notes/make-rest-client-dd3d365632a26fa0.yaml releasenotes/notes/make-rest-client-version-discovery-84125700f159491a.yaml releasenotes/notes/make_object_metadata_easier.yaml-e9751723e002e06f.yaml releasenotes/notes/merge-shade-os-client-config-29878734ad643e33.yaml releasenotes/notes/meta-passthrough-d695bff4f9366b65.yaml releasenotes/notes/metadata-key-name-bugfix-77612a825c5145d7.yaml releasenotes/notes/min-max-legacy-version-301242466ddefa93.yaml releasenotes/notes/mtu-settings-8ce8b54d096580a2.yaml releasenotes/notes/multiple-updates-b48cc2f6db2e526d.yaml releasenotes/notes/munch-sub-dict-e1619c71c26879cb.yaml releasenotes/notes/nat-source-field-7c7db2a724616d59.yaml releasenotes/notes/nat-source-support-92aaf6b336d0b848.yaml releasenotes/notes/net_provider-dd64b697476b7094.yaml releasenotes/notes/network-add-tap-mirror-46376bd98ee69c81.yaml releasenotes/notes/network-data-bd94e4a499ba3e0d.yaml releasenotes/notes/network-data-deb5772edc111428.yaml releasenotes/notes/network-list-e6e9dafdd8446263.yaml releasenotes/notes/network-qos-rule-filter-keys-324e3222510fd362.yaml releasenotes/notes/network-quotas-b98cce9ffeffdbf4.yaml releasenotes/notes/network-security-group-query-parameter-id-f6dda45b2c09dbaa.yaml releasenotes/notes/network_add_bgp_resources-c182dc2873d6db18.yaml releasenotes/notes/network_add_bgpvpn_resources-b3bd0b568c3c99db.yaml releasenotes/notes/network_add_sfc_resources-8a52c0c8c1f8e932.yaml releasenotes/notes/network_add_taas_resources-86a947265e11ce84.yaml releasenotes/notes/neutron-discovery-54399116d5f810ee.yaml releasenotes/notes/neutron_availability_zone_extension-675c2460ebb50a09.yaml releasenotes/notes/new-floating-attributes-213cdf5681d337e1.yaml releasenotes/notes/no-import-fallback-a09b5d5a11299933.yaml releasenotes/notes/no-inspect-associated-563e272785bb6016.yaml releasenotes/notes/no-more-troveclient-0a4739c21432ac63.yaml releasenotes/notes/no-start-task-manager-56773f3ea5eb3a59.yaml releasenotes/notes/node-boot-devices-2ab4991d75a2ab52.yaml releasenotes/notes/node-consoles-63589f22da98a689.yaml releasenotes/notes/node-create-027ea99193f344ef.yaml releasenotes/notes/node-inject-nmi-53d12681026e0b6c.yaml releasenotes/notes/node-owner-7f4b083ff9da8cce.yaml releasenotes/notes/node-set-provision-state-3472cbd81c47458f.yaml releasenotes/notes/norm_role_assignments-a13f41768e62d40c.yaml releasenotes/notes/normalize-images-1331bea7bfffa36a.yaml releasenotes/notes/normalize-machine-290d9f2a3b3a7ef0.yaml releasenotes/notes/nova-flavor-to-rest-0a5757e35714a690.yaml releasenotes/notes/nova-old-microversion-5e4b8e239ba44096.yaml releasenotes/notes/object-checksum-generation-ea1c1e47d2290054.yaml releasenotes/notes/object-chunked-data-ee619b7d4759b8d2.yaml releasenotes/notes/object-search-a5f5ec4b2df3e045.yaml releasenotes/notes/old-placement-4b3c34abb8fe7b81.yaml releasenotes/notes/optimize-server-console-1d27c107b9a1cdc3.yaml releasenotes/notes/option-precedence-1fecab21fdfb2c33.yaml releasenotes/notes/port-device-profile-af91e25c45321691.yaml releasenotes/notes/power-wait-751083852f958cb4.yaml releasenotes/notes/project-cleanup-exclude-option-65cba962eaa5b61a.yaml releasenotes/notes/project-cleanup-swift-f67615e5c3ab8fd8.yaml releasenotes/notes/provision-state-negotiation-0155b4d0e932054c.yaml releasenotes/notes/python-3.5-629817cec092d528.yaml releasenotes/notes/qos-min-pps-rule-52df1b150b1d3f68.yaml releasenotes/notes/qos-port-network-policy-cab43faa0f8bc036.yaml releasenotes/notes/r1-cab94ae7d749a1ec.yaml releasenotes/notes/r1-d4efe289ebf0cbcd.yaml releasenotes/notes/rackspace-block-storage-v2-fe0dd69b9e037599.yaml releasenotes/notes/register-machine-72ac3e65a1ed55b1.yaml releasenotes/notes/remote-address-group-id-6291816888cb3de7.yaml releasenotes/notes/remote-profile-100218d08b25019d.yaml releasenotes/notes/remove-auto-container-527f1807605b42c0.yaml releasenotes/notes/remove-block-store-details-classes-158ab1f46655320a.yaml releasenotes/notes/remove-cloud-caching-layer-2b0384870a45e8a3.yaml releasenotes/notes/remove-magnumclient-875b3e513f98f57c.yaml releasenotes/notes/remove-metric-fe5ddfd52b43c852.yaml releasenotes/notes/remove-novaclient-3f8d4db20d5f9582.yaml releasenotes/notes/remove-serverdetails-resource-f66cb278b224627d.yaml releasenotes/notes/removed-deprecated-things-8700fe3592c3bf18.yaml releasenotes/notes/removed-glanceclient-105c7fba9481b9be.yaml releasenotes/notes/removed-meter-6f6651b6e452e000.yaml releasenotes/notes/removed-profile-437f3038025b0fb3.yaml releasenotes/notes/removed-profile-b033d870937868a1.yaml releasenotes/notes/removed-swiftclient-aff22bfaeee5f59f.yaml releasenotes/notes/rename-base-proxy-b9fcb22d373864a2.yaml releasenotes/notes/rename-resource-methods-5f2a716b08156765.yaml releasenotes/notes/rename-service-force-down-6f462d62959a5315.yaml releasenotes/notes/renamed-bare-metal-b1cdbc52af14e042.yaml releasenotes/notes/renamed-block-store-bc5e0a7315bfeb67.yaml releasenotes/notes/renamed-cluster-743da6d321fffcba.yaml releasenotes/notes/renamed-telemetry-c08ae3e72afca24f.yaml releasenotes/notes/replace-appdirs-with-platformdirs-d3f5bcbe726b7829.yaml releasenotes/notes/request-stats-9d70480bebbdb4d6.yaml releasenotes/notes/resource-find-filter-by-name-e647e5c507ff4b6c.yaml releasenotes/notes/resource2-migration-835590b300bef621.yaml releasenotes/notes/retrieve-detailed-view-for-find-proxy-methods-947a3280732c448a.yaml releasenotes/notes/revert-futurist-34acc42fd3f0e7f3.yaml releasenotes/notes/rework-compute-hypervisor-a62f275a0fd1f074.yaml releasenotes/notes/router-extraroute-atomic-1a0c84c3fd90ceb1.yaml releasenotes/notes/router_ext_gw-b86582317bca8b39.yaml releasenotes/notes/sdk-helper-41f8d815cfbcfb00.yaml releasenotes/notes/search_resource-b9c2f772e01d3b2c.yaml releasenotes/notes/server-actions-microversion-support-f14b293d9c3d3d5e.yaml releasenotes/notes/server-create-error-id-66c698c7e633fb8b.yaml releasenotes/notes/server-security-groups-840ab28c04f359de.yaml releasenotes/notes/service_enabled_flag-c917b305d3f2e8fd.yaml releasenotes/notes/session-client-b581a6e5d18c8f04.yaml releasenotes/notes/set-bootable-volume-454a7a41e7e77d08.yaml releasenotes/notes/shade-helper-568f8cb372eef6d9.yaml releasenotes/notes/shade-into-connection-81191fb3d0ddaf6e.yaml releasenotes/notes/shade-location-b0d2e5cae743b738.yaml releasenotes/notes/snap-updated_at-a46711b6160e3a26.yaml releasenotes/notes/stack-update-5886e91fd6e423bf.yaml releasenotes/notes/started-using-reno-242e2b0cd27f9480.yaml releasenotes/notes/stateful-security-group-f32a78b9bbb49874.yaml releasenotes/notes/stop-using-tenant-id-42eb35139ba9eeff.yaml releasenotes/notes/story-2010784-21d23043155497f5.yaml releasenotes/notes/stream-object-6ecd43511dca726b.yaml releasenotes/notes/stream-to-file-91f48d6dcea399c6.yaml releasenotes/notes/strict-mode-d493abc0c3e87945.yaml releasenotes/notes/strict-proxies-4a315f68f387ee89.yaml releasenotes/notes/support_stdin_image_upload-305c04fb2daeb32c.yaml releasenotes/notes/swift-set-metadata-c18c60e440f9e4a7.yaml releasenotes/notes/swift-upload-lock-d18f3d42b3a0719a.yaml releasenotes/notes/switch-coe-to-proxy-c18789ed27cc1d95.yaml releasenotes/notes/switch-nova-to-created_at-45b7b50af6a2d59e.yaml releasenotes/notes/switch-to-warnings-333955d19afc99ca.yaml releasenotes/notes/task-manager-parameter-c6606653532248f2.yaml releasenotes/notes/toggle-port-security-f5bc606e82141feb.yaml releasenotes/notes/unprocessed-2d75133911945869.yaml releasenotes/notes/unshelve-to-specific-host-84666d440dce4a73.yaml releasenotes/notes/update-role-property-b16e902e913c7b25.yaml releasenotes/notes/update_endpoint-f87c1f42d0c0d1ef.yaml releasenotes/notes/update_workflow-ecdef6056ef2687b.yaml releasenotes/notes/use-interface-ip-c5cb3e7c91150096.yaml releasenotes/notes/use-proxy-layer-dfc3764d52bc1f2a.yaml releasenotes/notes/v4-fixed-ip-325740fdae85ffa9.yaml releasenotes/notes/validate-machine-dcf528b8f587e3f0.yaml releasenotes/notes/vendor-add-betacloud-03872c3485104853.yaml releasenotes/notes/vendor-add-limestonenetworks-99b2ffab9fc23b08.yaml releasenotes/notes/vendor-update-betacloud-37dac22d8d91a3c5.yaml releasenotes/notes/vendor-updates-f11184ba56bb27cf.yaml releasenotes/notes/version-command-70c37dd7f880e9ae.yaml releasenotes/notes/version-discovery-a501c4e9e9869f77.yaml releasenotes/notes/vol-updated_at-274c3a2bb94c8939.yaml releasenotes/notes/volume-quotas-5b674ee8c1f71eb6.yaml releasenotes/notes/volume-types-a07a14ae668e7dd2.yaml releasenotes/notes/volume-update-876e6540c8471440.yaml releasenotes/notes/volume_connector-api-f001e6f5fc4d1688.yaml releasenotes/notes/wait-on-image-snapshot-27cd2eacab2fabd8.yaml releasenotes/notes/wait-provision-state-no-fail-efa74dd39f687df8.yaml releasenotes/notes/wait_for_server-8dc8446b7c673d36.yaml releasenotes/notes/wait_for_status_delete_callback_param-68d30161e23340bb.yaml releasenotes/notes/wire-in-retries-10898f7bc81e2269.yaml releasenotes/notes/workaround-transitive-deps-1e7a214f3256b77e.yaml releasenotes/notes/xenapi-use-agent-ecc33e520da81ffa.yaml releasenotes/source/2023.1.rst releasenotes/source/2023.2.rst releasenotes/source/2024.1.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/yoga.rst releasenotes/source/zed.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder roles/deploy-clouds-config/README.rst roles/deploy-clouds-config/defaults/main.yaml roles/deploy-clouds-config/tasks/main.yaml roles/deploy-clouds-config/templates/clouds.yaml.j2 tools/keystone_version.py tools/nova_version.py tools/print-services.py zuul.d/acceptance-jobs.yaml zuul.d/functional-jobs.yaml zuul.d/metal-jobs.yaml zuul.d/project.yaml././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296384.0 openstacksdk-4.0.0/openstacksdk.egg-info/dependency_links.txt0000664000175000017500000000000100000000000024460 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296384.0 openstacksdk-4.0.0/openstacksdk.egg-info/entry_points.txt0000664000175000017500000000011400000000000023704 0ustar00zuulzuul00000000000000[console_scripts] openstack-inventory = openstack.cloud.cmd.inventory:main ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296384.0 openstacksdk-4.0.0/openstacksdk.egg-info/not-zip-safe0000664000175000017500000000000100000000000022640 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296384.0 openstacksdk-4.0.0/openstacksdk.egg-info/pbr.json0000664000175000017500000000005700000000000022072 0ustar00zuulzuul00000000000000{"git_version": "b65b7d4d", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296384.0 openstacksdk-4.0.0/openstacksdk.egg-info/requires.txt0000664000175000017500000000037100000000000023013 0ustar00zuulzuul00000000000000PyYAML>=3.13 cryptography>=2.7 decorator>=4.4.1 dogpile.cache>=0.6.5 iso8601>=0.1.11 jmespath>=0.9.0 jsonpatch!=1.20,>=1.16 keystoneauth1>=3.18.0 netifaces>=0.10.4 os-service-types>=1.7.0 pbr!=2.1.0,>=2.0.0 platformdirs>=3 requestsexceptions>=1.2.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296384.0 openstacksdk-4.0.0/openstacksdk.egg-info/top_level.txt0000664000175000017500000000001200000000000023135 0ustar00zuulzuul00000000000000openstack ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1012437 openstacksdk-4.0.0/playbooks/0000775000175000017500000000000000000000000016232 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.5094407 openstacksdk-4.0.0/playbooks/acceptance/0000775000175000017500000000000000000000000020320 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/playbooks/acceptance/post.yaml0000664000175000017500000000270600000000000022176 0ustar00zuulzuul00000000000000--- # This could be running on localhost only, but then the devstack job would need # to perform API call on the worker node. To keep the code a bit less crazy # rather address all hosts and perform certain steps on the localhost (zuul # executor). - hosts: all tasks: # TODO: # - clean the resources, which might have been created # Token is saved on the zuul executor node - name: Check token file delegate_to: localhost ansible.builtin.stat: path: "{{ zuul.executor.work_root }}/.{{ zuul.build }}" register: token_file # no_log is important since content WILL in logs - name: Read the token from file delegate_to: localhost no_log: true ansible.builtin.slurp: src: "{{ token_file.stat.path }}" register: token_data when: "token_file.stat.exists" - name: Delete data file delegate_to: localhost command: "shred {{ token_file.stat.path }}" when: "token_file.stat.exists" # no_log is important since content WILL appear in logs - name: Revoke token no_log: true ansible.builtin.uri: url: "{{ openstack_credentials.auth.auth_url | default(auth_url) }}/v3/auth/tokens" method: "DELETE" headers: X-Auth-Token: "{{ token_data['content'] | b64decode }}" X-Subject-Token: "{{ token_data['content'] | b64decode }}" status_code: 204 when: "token_file.stat.exists and 'content' in token_data" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/playbooks/acceptance/pre.yaml0000664000175000017500000000575300000000000022004 0ustar00zuulzuul00000000000000--- - hosts: all tasks: - name: Get temporary token for the cloud # nolog is important since content WILL appear in logs no_log: true ansible.builtin.uri: url: "{{ openstack_credentials.auth.auth_url | default(auth_url) }}/v3/auth/tokens" method: "POST" body_format: "json" body: auth: identity: methods: ["password"] password: user: name: "{{ openstack_credentials.auth.username | default(omit) }}" id: "{{ openstack_credentials.auth.user_id | default(omit) }}" password: "{{ openstack_credentials.auth.password }}" domain: name: "{{ openstack_credentials.auth.user_domain_name | default(omit) }}" id: "{{ openstack_credentials.auth.user_domain_id | default(omit) }}" scope: project: name: "{{ openstack_credentials.auth.project_name | default(omit) }}" id: "{{ openstack_credentials.auth.project_id | default(omit) }}" domain: name: "{{ openstack_credentials.auth.project_domain_name | default(omit) }}" id: "{{ openstack_credentials.auth.project_domain_id | default(omit) }}" return_content: true status_code: 201 register: os_auth - name: Verify token # nolog is important since content WILL appear in logs no_log: true ansible.builtin.uri: url: "{{ openstack_credentials.auth.auth_url | default(auth_url) }}/v3/auth/tokens" method: "GET" headers: X-Auth-Token: "{{ os_auth.x_subject_token }}" X-Subject-Token: "{{ os_auth.x_subject_token }}" - name: Include deploy-clouds-config role include_role: name: deploy-clouds-config vars: cloud_config: clouds: acceptance: profile: "{{ openstack_credentials.profile | default('') }}" auth_type: "token" auth: auth_url: "{{ openstack_credentials.auth.auth_url | default(auth_url) }}" project_name: "{{ openstack_credentials.auth.project_name | default('') }}" project_domain_id: "{{ openstack_credentials.auth.project_domain_id | default('') }}" project_domain_name: "{{ openstack_credentials.auth.project_domain_name | default('') }}" token: "{{ os_auth.x_subject_token }}" region_name: "{{ openstack_credentials.region_name | default('') }}" verify: "{{ openstack_credentials.verify | default(true) }}" # Intruders might want to corrupt clouds.yaml to avoid revoking token in the post phase # To prevent this we save token on the executor for later use. - name: Save the token delegate_to: localhost copy: dest: "{{ zuul.executor.work_root }}/.{{ zuul.build }}" content: "{{ os_auth.x_subject_token }}" mode: "0640" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/playbooks/acceptance/run-with-devstack.yaml0000664000175000017500000000054200000000000024564 0ustar00zuulzuul00000000000000--- # Need to actually start devstack first - hosts: all roles: - run-devstack - name: Get the token ansible.builtin.import_playbook: pre.yaml # Run the rest - hosts: all roles: - role: bindep bindep_profile: test bindep_dir: "{{ zuul_work_dir }}" - test-setup - ensure-tox - get-devstack-os-environment - tox ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.5094407 openstacksdk-4.0.0/playbooks/devstack/0000775000175000017500000000000000000000000020036 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/playbooks/devstack/legacy-git.yaml0000664000175000017500000000044400000000000022751 0ustar00zuulzuul00000000000000- hosts: all tasks: - name: Set openstacksdk libraries to master branch before functional tests command: git checkout master args: chdir: "src/opendev.org/{{ item }}" with_items: - openstack/shade - openstack/keystoneauth - openstack/os-client-config ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/playbooks/devstack/post.yaml0000664000175000017500000000034000000000000021704 0ustar00zuulzuul00000000000000- hosts: all tasks: - include_role: name: fetch-tox-output - include_role: name: fetch-subunit-output when: fetch_subunit|default(true)|bool - include_role: name: process-stackviz ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/post_test_hook.sh0000775000175000017500000000216500000000000017636 0ustar00zuulzuul00000000000000#!/bin/bash # # This is a script that kicks off a series of functional tests against a # OpenStack devstack cloud. This script is intended to work as a gate # in project-config for the Python SDK. DIR=$(cd $(dirname "$0") && pwd) echo "Running SDK functional test suite" sudo -H -u stack -i <`_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-az-to-loadbalancer-da9bf1baaedc89a4.yaml0000664000175000017500000000017700000000000027720 0ustar00zuulzuul00000000000000--- features: - | Adds Octavia (load_balancer) support for the availability zone and availability zone profile APIs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-block-storage-group-snapshots-954cc869227317c3.yaml0000664000175000017500000000012600000000000031377 0ustar00zuulzuul00000000000000--- features: - | Add support for group snapshots to the block storage service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-block-storage-group-type-group-specs-d07047167224ec83.yaml0000664000175000017500000000020200000000000032562 0ustar00zuulzuul00000000000000--- features: - | Add support for creating, updating and deleting group type group specs for the block storage service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-block-storage-groups-bf5f1af714c9e505.yaml0000664000175000017500000000011500000000000030027 0ustar00zuulzuul00000000000000--- features: - | Add support for groups to the block storage service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-block-storage-service-support-ce03092ce2d7e7b9.yaml0000664000175000017500000000010200000000000031656 0ustar00zuulzuul00000000000000--- features: - | Added support for block storage services. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-block-storage-summary-support-dd00d424c4e6a3b1.yaml0000664000175000017500000000010200000000000031670 0ustar00zuulzuul00000000000000--- features: - | Added support for block storage summary. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-bulk-create-resources-12192ec9d76c7716.yaml0000664000175000017500000000035000000000000027754 0ustar00zuulzuul00000000000000--- features: - Enabling Resource class for being able to create objects in bulk way. Add first objects using that feature - Port, which now expose a proxy method `create_ports` for creating multiple port objects at once. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-cipher-list-support-to-octavia-b6b2b0053ca6b184.yaml0000664000175000017500000000024300000000000031656 0ustar00zuulzuul00000000000000--- features: - | Added the ``tls_ciphers`` properties to listener.py and pool.py for storing stings of tls ciphers in OpenSSL cipher string format. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-compute-flavor-ops-12149e58299c413e.yaml0000664000175000017500000000026000000000000027226 0ustar00zuulzuul00000000000000--- features: - | Add additional compute flavor operations (flavor_add_tenant_access, flavor_remove_tenant_access, get_flavor_access, extra_specs fetching/updating). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-current-user-id-49b6463e6bcc3b31.yaml0000664000175000017500000000021000000000000026716 0ustar00zuulzuul00000000000000--- features: - Added a new property, 'current_user_id' which contains the id of the currently authenticated user from the token. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-cyborg-support-b9afca69f709c048.yaml0000664000175000017500000000006600000000000026772 0ustar00zuulzuul00000000000000--- features: - Add support for Cyborg(accelerator) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-dns-606cc018e01d40fa.yaml0000664000175000017500000000015400000000000024443 0ustar00zuulzuul00000000000000--- features: - | Adds support for `dns `_ service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-dns-domain-support-for-port-3fa4568330dda07e.yaml0000664000175000017500000000015100000000000031201 0ustar00zuulzuul00000000000000--- features: - | ``dns_domain`` attribute support has been added to the network port resource ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-dns-resource-list-by-project-8b5479a045ef7373.yaml0000664000175000017500000000026300000000000031212 0ustar00zuulzuul00000000000000--- features: - | Add functionality to list DNS resources for a certain project only, or for all projects, using the new `project_id` and `all_projects` parameters. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-dns-zone-share-api-374e71cac504917f.yaml0000664000175000017500000000010500000000000027221 0ustar00zuulzuul00000000000000--- features: - | Add Designate (DNS) support for zone shares. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-fakes-generator-72c53d34c995fcb2.yaml0000664000175000017500000000016600000000000026765 0ustar00zuulzuul00000000000000--- features: - | Add fake resource generator to ease unit testing in packages that depend on openstacksdk. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-find-backup-find-snapshot-v2-756a05ccd150db82.yaml0000664000175000017500000000030000000000000031143 0ustar00zuulzuul00000000000000--- features: - | The ``find_snapshot`` and ``find_backup`` methods have been added to the v2 block storage proxy API. These were previously only available for the v3 proxy API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-fip-portforwarding-methods-cffc14a6283cedfb.yaml0000664000175000017500000000011100000000000031453 0ustar00zuulzuul00000000000000--- features: - | Add floating IP Port Forwarding related methods. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-identity-domain-configuration-2e8bcaa20736b379.yaml0000664000175000017500000000017400000000000031641 0ustar00zuulzuul00000000000000--- features: - | Add support for creating, updating and deleting domain configurations for the identity service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-identity-group-users-proxy-method-e37f8983b2406819.yaml0000664000175000017500000000010400000000000032252 0ustar00zuulzuul00000000000000--- features: - | Add possibility to list users in the group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-identity-service-provider-support-8c97cbb157883626.yaml0000664000175000017500000000012300000000000032404 0ustar00zuulzuul00000000000000--- features: - | Add support for service providers to the identity service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-image-attributes-05b820a85cd09806.yaml0000664000175000017500000000011000000000000026772 0ustar00zuulzuul00000000000000--- features: - Add image attributes is_hidden, hash_algo, hash_value ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-image-cache-support-3f8c13550a84d749.yaml0000664000175000017500000000007200000000000027402 0ustar00zuulzuul00000000000000--- features: - | Add support for glance Cache API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-image-cache-support-78477e1686c52e56.yaml0000664000175000017500000000007200000000000027340 0ustar00zuulzuul00000000000000--- features: - | Add support for glance Cache API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-image-metadef-namespace-support-b93557afdcf4272c.yaml0000664000175000017500000000011500000000000032112 0ustar00zuulzuul00000000000000--- features: -| Adds support to query metadef namespaces from glance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-image-metadef-property-fb87e5a7090e73ac.yaml0000664000175000017500000000012200000000000030326 0ustar00zuulzuul00000000000000--- features: - | Added support for the ``MetadefProperty`` Image resource. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-image-metadef-schema-b463825481bdf954.yaml0000664000175000017500000000012700000000000027473 0ustar00zuulzuul00000000000000--- features: - Add support for metadata definition schema resource in image service.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-image-schema-9c07c2789490718a.yaml0000664000175000017500000000010400000000000026010 0ustar00zuulzuul00000000000000--- features: - Add support for schema resource in image service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-image-service-info-90d6063b5ba0735d.yaml0000664000175000017500000000013500000000000027260 0ustar00zuulzuul00000000000000--- features: - Add image service info discovery (import constraints and supported stores) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-image-stage-1dbc3844a042fd26.yaml0000664000175000017500000000007400000000000026051 0ustar00zuulzuul00000000000000--- features: - | Add support for staging image data. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-jmespath-support-f47b7a503dbbfda1.yaml0000664000175000017500000000016200000000000027432 0ustar00zuulzuul00000000000000--- features: - All get and search functions can now take a jmespath expression in their filters parameter. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-limit-to-shared-file-2b443c2a00c75e6e.yaml0000664000175000017500000000015600000000000027604 0ustar00zuulzuul00000000000000--- features: - | Added support to list absolute resource limits on the shared file system service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-list_flavor_access-e038253e953e6586.yaml0000664000175000017500000000017100000000000027341 0ustar00zuulzuul00000000000000--- features: - Add a list_flavor_access method to list all the projects/tenants allowed to access a given flavor. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-load-balancer-flavor-api-d2598e30347a19fc.yaml0000664000175000017500000000012200000000000030342 0ustar00zuulzuul00000000000000--- features: - | Adds Octavia (load_balancer) support for the flavor APIs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-load-balancer-flavor-profile-api-e5a15157563eb75f.yaml0000664000175000017500000000013200000000000032000 0ustar00zuulzuul00000000000000--- features: - | Adds Octavia (load_balancer) support for the flavor profile APIs. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=openstacksdk-4.0.0/releasenotes/notes/add-load-balancer-listener-alpn-protocols-ded816c78bf2080c.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-load-balancer-listener-alpn-protocols-ded816c78bf2080c.yam0000664000175000017500000000013100000000000033062 0ustar00zuulzuul00000000000000--- features: - Adds ALPN protocols support for the Octavia (load_balancer) listeners. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-load-balancer-pool-alpn-protocols-77f0c7015f176369.yaml0000664000175000017500000000012500000000000032074 0ustar00zuulzuul00000000000000--- features: - Adds ALPN protocols support for the Octavia (load_balancer) pools. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-load-balancer-provider-api-08bcfb72ddf5b247.yaml0000664000175000017500000000012500000000000031120 0ustar00zuulzuul00000000000000--- features: - | Adds Octavia (load_balancer) support for the providers APIs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-magnum-cluster-support-843fe2709b8f4789.yaml0000664000175000017500000000012600000000000030243 0ustar00zuulzuul00000000000000--- features: - | Added magnum cluster CRUD support to cloud abstraction layer. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-manage-volume-support-a4fd90e3ff2fa0d0.yaml0000664000175000017500000000010300000000000030351 0ustar00zuulzuul00000000000000--- features: - | Added support for manage volume operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-masakara-support-3f7df4436ac869cf.yaml0000664000175000017500000000016100000000000027266 0ustar00zuulzuul00000000000000--- features: - | Ported in support for masakari/``instance_ha`` service from `python-masakariclient`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-masakari-enabled-to-segment-0e83da869d2ab03f.yaml0000664000175000017500000000020000000000000031205 0ustar00zuulzuul00000000000000--- features: - Add support for the ``enabled`` field of the ``Segment`` resource for the instance HA service (Masakari). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-masakari-vmoves-873ad67830c92254.yaml0000664000175000017500000000015100000000000026571 0ustar00zuulzuul00000000000000--- features: - Add support for the new ``VMove`` resource for the instance HA service (Masakari). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-metadef-object-5eec168baf039e80.yaml0000664000175000017500000000007700000000000026637 0ustar00zuulzuul00000000000000--- features: - | Add support for Image Metadef objects. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-migrations-946adf16674d4b2a.yaml0000664000175000017500000000022500000000000026055 0ustar00zuulzuul00000000000000--- features: - | Add support for the Compute service's migrations API, allowing users to list all in-progress migrations for all servers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-namespace-object-delete-all-6cea62cb038012df.yaml0000664000175000017500000000012500000000000031147 0ustar00zuulzuul00000000000000--- features: - | Add support for deleting all objects inside a namespace. ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=openstacksdk-4.0.0/releasenotes/notes/add-new-field-progress-details-in-notification-resource-f7871acb6ffd46dc.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-new-field-progress-details-in-notification-resource-f7871a0000664000175000017500000000047000000000000033511 0ustar00zuulzuul00000000000000--- features: - | In microversion 1.1, Masakari returns ``recovery_workflow_details`` information of the notification in ``GET /notifications/{notification_id}`` API. Added ``recovery_workflow_details`` attribute to Notification class to read the recovery_workflow_details of the notification. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-node-boot-mode-5f49882fdd86f35b.yaml0000664000175000017500000000020300000000000026530 0ustar00zuulzuul00000000000000--- features: - | Add support to display node fields ``boot_mode`` and ``secure_boot`` which are introduced in API 1.75. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-node-boot-mode-set-5718a8d6511b4826.yaml0000664000175000017500000000022600000000000027065 0ustar00zuulzuul00000000000000--- features: - | Add support for changing node states ``boot_mode`` and ``secure_boot`` in sync with functionality introduced in API 1.76. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-node-firmware-list-support-fec2f96a3a578730.yaml0000664000175000017500000000020000000000000031121 0ustar00zuulzuul00000000000000--- features: - | Adds support for querying a bare-metal node's firmware as per functionality introduced in API 1.86. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-node-inventory-52f54e16777814e7.yaml0000664000175000017500000000017600000000000026463 0ustar00zuulzuul00000000000000--- features: - | Adds support for querying a node's hardware inventory as per functionality introduced in API 1.81.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-node-vendor_passthru-29b384cadf795b48.yaml0000664000175000017500000000011300000000000030060 0ustar00zuulzuul00000000000000--- features: - | Add node vendor_passthru interface for Ironic API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-octavia-amphora-api-7f3586f6a4f31de4.yaml0000664000175000017500000000012300000000000027530 0ustar00zuulzuul00000000000000--- features: - | Adds Octavia (load_balancer) support for the amphora APIs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-octavia-lb-failover-9a34c9577d78ad34.yaml0000664000175000017500000000011600000000000027462 0ustar00zuulzuul00000000000000--- features: - | Added Octavia (load_balancer) load balancer failover. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-octavia-lb-listener-stats-1538cc6e4f734353.yaml0000664000175000017500000000012100000000000030536 0ustar00zuulzuul00000000000000--- features: - | Added load balancer and listener get statistics methods. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-octavia-listener-hsts-fields-50c621b71e56dc13.yaml0000664000175000017500000000023000000000000031274 0ustar00zuulzuul00000000000000--- features: - | Added new fields to loadbalancer create/update listener API in order to support new HTTP Strict Transport Security support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-octavia-tags-support-1c1cf94184e6ebb7.yaml0000664000175000017500000000047400000000000030063 0ustar00zuulzuul00000000000000--- features: - Add tags support for the Octavia (load_balancer) objects. - | Added support for the Octavia (load_balancer) L7 Policy "redirect_prefix" capability. fixes: - | Fixed the Octavia (load_balancer) load balancer objects to have "flavor_id" instead of the nonexistent "flavor" field. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-placement-resource-class-e1c644d978b886bc.yaml0000664000175000017500000000012400000000000030614 0ustar00zuulzuul00000000000000--- features: - | Added support for the ``ResourceClass`` Placement resource. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=openstacksdk-4.0.0/releasenotes/notes/add-placement-resource-provider-aggregates-1310c0be6a4097d3.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-placement-resource-provider-aggregates-1310c0be6a4097d3.ya0000664000175000017500000000014300000000000033001 0ustar00zuulzuul00000000000000--- features: - | Add support for aggregates to the ``ResourceProvider`` Placement resource. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=openstacksdk-4.0.0/releasenotes/notes/add-placement-resource-provider-inventory-8714cafefae74810.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-placement-resource-provider-inventory-8714cafefae74810.yam0000664000175000017500000000014000000000000033261 0ustar00zuulzuul00000000000000--- features: - | Added support for the ``ResourceProviderInventory`` Placement resource. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-placement-support-a2011eb1e900804d.yaml0000664000175000017500000000021400000000000027251 0ustar00zuulzuul00000000000000--- features: - | Add initial support for Placement. Currently the following resources are supported: - ``ResourceProvider`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-placement-trait-29957d2c03edbfb9.yaml0000664000175000017500000000011200000000000027052 0ustar00zuulzuul00000000000000--- features: - | Add support for the ``Trait`` Placement resource. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-port-hardware-offload-type-1232c5ae3f62d7df.yaml0000664000175000017500000000035100000000000031120 0ustar00zuulzuul00000000000000--- features: - | Add ``hardware_offload_type`` attribute to ``port`` resource. Users can set this attribute to a valid value defined in ``neutron_lib.constants.VALID_HWOL_TYPES``, set "None" or leave it undefined. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-port-numa-affinity-policy-b42a85dbe26560d2.yaml0000664000175000017500000000030300000000000030713 0ustar00zuulzuul00000000000000--- features: - | Add ``numa_affinity_policy`` attribute to ``port`` resource. Users can set this attribute to ``required``, ``deferred`` or ``legacy``. This parameter is nullable. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-propagate_uplink_status-to-port-0152d476c65979e3.yaml0000664000175000017500000000103500000000000032043 0ustar00zuulzuul00000000000000--- features: - | Add ``propagate_uplink_status`` attribute to ``port`` resource. Users can set this attribute to ``True`` or ``False``. If it is set to ``True``, uplink status propagation is enabled. Otherwise, it is disabled. Neutron server needs to have the API extension ``uplink-status-propagation`` in order to support this feature. This feature can be used in SRIOV scenario, in which users enable uplink status propagation of the SRIOV port so that the link status of the VF will follow the PF. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=openstacksdk-4.0.0/releasenotes/notes/add-quota-class-set-to-shared-file-systems-43da33e6a3ed65e3.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-quota-class-set-to-shared-file-systems-43da33e6a3ed65e3.ya0000664000175000017500000000013500000000000032755 0ustar00zuulzuul00000000000000--- features: - | Added get and update to Quota Class Set to file system as a service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-server-clear-password-256e269223453bd7.yaml0000664000175000017500000000020100000000000027705 0ustar00zuulzuul00000000000000--- features: - | The ``Server.clear_password`` and equivalent ``clear_server_password`` proxy method have been added. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-server-console-078ed2696e5b04d9.yaml0000664000175000017500000000032300000000000026577 0ustar00zuulzuul00000000000000--- features: - Added get_server_console method to fetch the console log from a Server. On clouds that do not expose this feature, a debug line will be logged and an empty string will be returned. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-server-migrations-6e31183196f14deb.yaml0000664000175000017500000000031300000000000027275 0ustar00zuulzuul00000000000000--- features: - | Add support for the Compute service's server migrations API, allowing users to list all migrations for a server as well as force complete or abort in-progress migrations. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-server-tag-proxy-methods-c791a36d8d4d85f6.yaml0000664000175000017500000000026100000000000030614 0ustar00zuulzuul00000000000000--- features: - | The following new compute proxy methods have been added: - ``add_tag_to_server`` - ``remove_tag_from_server`` - ``remove_tags_from_server`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-service-0bcc16eb026eade3.yaml0000664000175000017500000000025100000000000025535 0ustar00zuulzuul00000000000000--- features: - | Added a new method `openstack.connection.Connection.add_service` which allows the registration of Proxy/Resource classes defined externally. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-sg-rules-bulk-f36a3e2326d74867.yaml0000664000175000017500000000027600000000000026251 0ustar00zuulzuul00000000000000--- features: - Added bulk create securtiy groups rules. With new proxy method `create_security_group_rules` now it's possible to create multiple rules for certain security group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-share-access-rules-to-shared-file-362bee34f7331186.yaml0000664000175000017500000000020200000000000032024 0ustar00zuulzuul00000000000000--- features: - | Added support to create, list, get and delete share access rules with the shared file system service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-share-network-subnet-to-shared-file-b5de3ce6ca723209.yaml0000664000175000017500000000020400000000000032637 0ustar00zuulzuul00000000000000--- features: - | Added support to create, list, get, and delete share network subnets on the shared file system service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-share-network-to-shared-file-c5c9a6b8ccf1d958.yaml0000664000175000017500000000020500000000000031442 0ustar00zuulzuul00000000000000--- features: - | Added support to create, update, list, get, and delete share networks on the shared file system service. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=openstacksdk-4.0.0/releasenotes/notes/add-share-snapshot-instance-to-shared-file-4d935f12d67bf59d.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-share-snapshot-instance-to-shared-file-4d935f12d67bf59d.ya0000664000175000017500000000016600000000000032730 0ustar00zuulzuul00000000000000--- features: - | Added support to list and get share snapshot instances on the shared file system service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-share-snapshot-to-shared-file-82ecedbdbed2e3c5.yaml0000664000175000017500000000020100000000000032006 0ustar00zuulzuul00000000000000--- features: - | Adds support to create, update, list, get, and delete share snapshots to shared file system service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-share_group-to-shared-file-8cee20d8aa2afbb7.yaml0000664000175000017500000000020300000000000031302 0ustar00zuulzuul00000000000000--- features: - | Added support to create, update, list, get, and delete share groups on the shared file system service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-shared-file-syste-share_instance-fffaea2d3a77ba24.yaml0000664000175000017500000000022100000000000032500 0ustar00zuulzuul00000000000000--- features: - | Added support to list, get, reset status of, and force delete share instances (from shared file system service). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-shared-file-system-locks-support-4859ca93f93a1056.yaml0000664000175000017500000000033600000000000032102 0ustar00zuulzuul00000000000000--- features: - | Added support to manipulate resource locks from the shared file system service. - | Added support to restrict the visibility and deletion of the shared file system share access rules. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=openstacksdk-4.0.0/releasenotes/notes/add-shared-file-system-manage-unmanage-share-830e313f96e5fd2b.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-shared-file-system-manage-unmanage-share-830e313f96e5fd2b.0000664000175000017500000000015400000000000032644 0ustar00zuulzuul00000000000000--- features: - | Added support to manage and unmanage shares from the shared file system service.././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=openstacksdk-4.0.0/releasenotes/notes/add-shared-file-system-share-group-snapshot-c5099e6c8accf077.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-shared-file-system-share-group-snapshot-c5099e6c8accf077.y0000664000175000017500000000022200000000000033070 0ustar00zuulzuul00000000000000--- features: - | Added support for list, show, update, delete, reset and create Share Group Snapshots for Shared File Systems service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-shared-file-system-share-metadata-e0415bb71d8a0a48.yaml0000664000175000017500000000020600000000000032245 0ustar00zuulzuul00000000000000--- features: - | Added support to list, get, create, update, and delete share metadata from shared file system service.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-shared-file-system-share-resize-ddd650c2e32fed34.yaml0000664000175000017500000000010700000000000032135 0ustar00zuulzuul00000000000000--- features: - | Added support for shrink/extend share actions. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-shared-file-system-shares-2e1d44a1bb882d6d.yaml0000664000175000017500000000013200000000000030735 0ustar00zuulzuul00000000000000--- features: - | Added revert share to snapshot to shared file system service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-shared-file-system-shares-e9f356a318045607.yaml0000664000175000017500000000017500000000000030457 0ustar00zuulzuul00000000000000--- features: - | Added support to create, update, list, get, and delete shares (from shared file system service). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-shared-file-systems-83a3767429fd5e8c.yaml0000664000175000017500000000031700000000000027531 0ustar00zuulzuul00000000000000--- features: - | Support for the OpenStack Shared File System API (manila) has been introduced. - | Added support to list Shared File System Service API Versions and Availability Zones. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-shared-file-systems-export-location-a27c1741880c384b.yaml0000664000175000017500000000017500000000000032555 0ustar00zuulzuul00000000000000--- features: - | Added support to list and show Export Locations for shares from the Shared File Systems service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-shelve_offload-427f6550fc55e622.yaml0000664000175000017500000000011700000000000026523 0ustar00zuulzuul00000000000000--- features: - | Adds shelve_offload_server method to the compute proxy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-show-all-images-flag-352748b6c3d99f3f.yaml0000664000175000017500000000063200000000000027540 0ustar00zuulzuul00000000000000--- features: - Added flag "show_all" to list_images. The behavior of Glance v2 to only show shared images if they have been accepted by the user can be confusing, and the only way to change it is to use search_images(filters=dict(member_status='all')) which isn't terribly obvious. "show_all=True" will set that flag, as well as disabling the filtering of images in "deleted" state. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-stack-events-b8674d7bb657e789.yaml0000664000175000017500000000017100000000000026256 0ustar00zuulzuul00000000000000--- features: - | The ``stack_events`` method and ``StackEvent`` Class have been added to retrieve stack events././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-stack-export-3ace746a8c80d766.yaml0000664000175000017500000000013000000000000026325 0ustar00zuulzuul00000000000000--- features: - | Add ``export_stack`` to print stack infomation in a json format ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-stack-suspend-and-resume-26d4fc5904291d5d.yaml0000664000175000017500000000015600000000000030447 0ustar00zuulzuul00000000000000--- features: - | Adds ``suspend_stack`` and ``resume_stack`` to support stack non-lifecycle operations.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-storage-pool-to-shared-file-ad45da1b2510b412.yaml0000664000175000017500000000017100000000000031061 0ustar00zuulzuul00000000000000--- features: - | Added support for retrieving storage pools information from the Shared File Systems service. ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=openstacksdk-4.0.0/releasenotes/notes/add-support-allowed-cidrs-loadbalancer-listener-809e523a8bd6a7d5.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-support-allowed-cidrs-loadbalancer-listener-809e523a8bd6a70000664000175000017500000000011500000000000033203 0ustar00zuulzuul00000000000000--- features: - Added allowed_cidrs parameter into load balancer listener. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=openstacksdk-4.0.0/releasenotes/notes/add-support-availability_zone-loadbalancer-a18aa1708d7859e2.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-support-availability_zone-loadbalancer-a18aa1708d7859e2.ya0000664000175000017500000000010700000000000033114 0ustar00zuulzuul00000000000000--- features: - Added availability_zone parameter into load balancer.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-support-for-setting-static-routes-b3ce6cac2c5e9e51.yaml0000664000175000017500000000071300000000000032674 0ustar00zuulzuul00000000000000--- features: - | The networking API v2 specification, which is implemented by OpenStack Neutron, features an optional routes parameter - when updating a router (PUT requests). Static routes are crucial for routers to handle traffic from subnets not directly connected to a router. The routes parameter has now been added to the OpenStackCloud.update_router method as a list of dictionaries with destination and nexthop parameters. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-system-role-assignment-693dd3e1da33a54d.yaml0000664000175000017500000000057600000000000030414 0ustar00zuulzuul00000000000000--- features: - | Add support for system role assignment. A system role assignment ultimately controls access to system-level API calls. Good examples of system-level APIs include management of the service catalog and compute hypervisors. `System role assignment API reference `_. ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=openstacksdk-4.0.0/releasenotes/notes/add-tls-container-refs-params-for-octavia-pools-76f295cd2daa7f53.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-tls-container-refs-params-for-octavia-pools-76f295cd2daa7f0000664000175000017500000000033600000000000033221 0ustar00zuulzuul00000000000000--- features: - | Add both ``ca_tls_container_ref`` and ``crl_container_ref`` parameters for Octavia pools, which can be used to store the ca certificate used by backend servers and the revocation list file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-tls-version-support-for-octavia-7ecb372e6fb58101.yaml0000664000175000017500000000027300000000000032107 0ustar00zuulzuul00000000000000--- features: - | Added the ``tls_versions`` properties to listener.py and pool.py for storing a python list of TLS protocol versions to be used by the pools and listeners. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=openstacksdk-4.0.0/releasenotes/notes/add-tls_enabled-parameter-for-octavia-pools-f0a23436d826b313.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-tls_enabled-parameter-for-octavia-pools-f0a23436d826b313.y0000664000175000017500000000025100000000000032625 0ustar00zuulzuul00000000000000--- features: - | Add ``tls_enabled`` parameter for Octavia pools, it can be used to enable TLS communications between a load balancer and its member servers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-unified-limit-5ac334a08e137a70.yaml0000664000175000017500000000021500000000000026340 0ustar00zuulzuul00000000000000--- features: - | Added the unified limits basic CRUD methods. It includes two kinds of resources: `registered_limit` and `limit`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-user-group-assignment-9c419b6c6bfe392c.yaml0000664000175000017500000000012400000000000030243 0ustar00zuulzuul00000000000000--- features: - | Add support for user group assignments in identity service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-user-message-to-shared-file-85d7bbccf8347c4f.yaml0000664000175000017500000000016400000000000031254 0ustar00zuulzuul00000000000000--- features: - | Added support to list, get, and delete user messages on the shared file system service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-vif-optional-params-abb755b74f076eb2.yaml0000664000175000017500000000027700000000000027654 0ustar00zuulzuul00000000000000--- features: - | Extend the ``attach_vif`` and ``attach_vif_to_node`` methods of the baremetal proxy to to accept optional parameters for VIF port UUID and VIF portgroup UUID. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-volume-attachment-support-b5f9a9e78ba88355.yaml0000664000175000017500000000027100000000000031072 0ustar00zuulzuul00000000000000--- features: - | Added support for: * Create Attachment * Update Attachment * List Attachment * Get Attachment * Delete Attachment * Complete Attachment ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-volume-extend-support-86e5c8cff5d6874e.yaml0000664000175000017500000000012500000000000030314 0ustar00zuulzuul00000000000000--- features: - Add the ability to extend a volume size with extend_volume method. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-volume-image-metadata-support-c61bcb918fdff529.yaml0000664000175000017500000000011200000000000031721 0ustar00zuulzuul00000000000000--- features: - | Added support for managing volume image metadata. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=openstacksdk-4.0.0/releasenotes/notes/add-volume-snapshot-manage-unmanage-support-fc0be2a3fb4427d1.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-volume-snapshot-manage-unmanage-support-fc0be2a3fb4427d1.y0000664000175000017500000000014300000000000033224 0ustar00zuulzuul00000000000000--- features: - | Added support for volume snapshot manage and volume snapshot unmanage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-volume-transfer-support-28bf34a243d96e1b.yaml0000664000175000017500000000015200000000000030536 0ustar00zuulzuul00000000000000--- features: - | Added support for volume transfer create, find, delete, get, list and accept. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add-volume-type-update-b84f50b7fa3b061d.yaml0000664000175000017500000000011600000000000027514 0ustar00zuulzuul00000000000000--- features: - Add support for updating block storage volume type objects. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add_access_rules-06eb8a1f9fcd9367.yaml0000664000175000017500000000023300000000000026526 0ustar00zuulzuul00000000000000--- features: - | Added support for `access_rules `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add_description_create_user-0ddc9a0ef4da840d.yaml0000664000175000017500000000012500000000000031074 0ustar00zuulzuul00000000000000--- features: - Add description parameter to create_user, available on Keystone v3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add_designate_recordsets_support-69af0a6b317073e7.yaml0000664000175000017500000000020500000000000031672 0ustar00zuulzuul00000000000000--- features: - Add support for Designate recordsets resources, with the usual methods (search/list/get/create/update/delete). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add_designate_zones_support-35fa9b8b09995b43.yaml0000664000175000017500000000020000000000000030663 0ustar00zuulzuul00000000000000--- features: - Add support for Designate zones resources, with the usual methods (search/list/get/create/update/delete). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add_heat_tag_support-135aa43ba1dce3bb.yaml0000664000175000017500000000031200000000000027516 0ustar00zuulzuul00000000000000--- features: - | Add tags support when creating a stack, as specified by the openstack orchestration api at [1] [1]https://developer.openstack.org/api-ref/orchestration/v1/#create-stack ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add_host_aggregate_support-471623faf45ec3c3.yaml0000664000175000017500000000012500000000000030534 0ustar00zuulzuul00000000000000--- features: - Add support for host aggregates and host aggregate membership. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add_image_import_support-6cea2e7d7a781071.yaml0000664000175000017500000000043000000000000030227 0ustar00zuulzuul00000000000000--- features: - Add ability to create image without upload data at the same time - Add support for interoperable image import process as introduced in the Image API v2.6 at [1] [1]https://developer.openstack.org/api-ref/image/v2/index.html#interoperable-image-import ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add_influxdb_stats-665714d715302ad5.yaml0000664000175000017500000000011500000000000026564 0ustar00zuulzuul00000000000000--- features: - | Add possibility to report API metrics into InfluxDB. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add_magnum_baymodel_support-e35e5aab0b14ff75.yaml0000664000175000017500000000047100000000000031051 0ustar00zuulzuul00000000000000--- features: - Add support for Magnum baymodels, with the usual methods (search/list/get/create/update/delete). Due to upcoming rename in Magnum from baymodel to cluster_template, the shade functionality uses the term cluster_template. However, baymodel aliases are provided for each api call. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add_magnum_services_support-3d95f9dcc60b5573.yaml0000664000175000017500000000007300000000000030756 0ustar00zuulzuul00000000000000--- features: - Add support for listing Magnum services. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add_project_cleanup-39c3517b25a5372e.yaml0000664000175000017500000000056600000000000027004 0ustar00zuulzuul00000000000000--- features: - | Project cleanup functionality. It provides a single method in the connection object, which calls cleanup method in all supported services (both part of the SDK itself and all "imported" in the runtime or through the vendor_hook functionality). Cleanup is working in multiple threads where possible (no dependencies between services). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add_server_group_support-dfa472e3dae7d34d.yaml0000664000175000017500000000010300000000000030511 0ustar00zuulzuul00000000000000--- features: - Adds support to create and delete server groups. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add_support_port_binding_attrs-c70966724eb970f3.yaml0000664000175000017500000000031000000000000031316 0ustar00zuulzuul00000000000000--- features: - Add support for query of port binding extended attributes including 'binding:host_id', 'binding:vnic_type', 'binding:vif_type', 'binding:vif_details', and 'binding:profile'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add_update_server-8761059d6de7e68b.yaml0000664000175000017500000000012600000000000026573 0ustar00zuulzuul00000000000000--- features: - Add update_server method to update name or description of a server. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add_update_service-28e590a7a7524053.yaml0000664000175000017500000000040600000000000026543 0ustar00zuulzuul00000000000000--- features: - Add the ability to update a keystone service information. This feature is not available on keystone v2.0. The new function, update_service(), allows the user to update description, name of service, service type, and enabled status. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/add_vendor_hook-e87b6afb7f215a30.yaml0000664000175000017500000000054500000000000026360 0ustar00zuulzuul00000000000000--- features: - | Add possibility to automatically invoke vendor hooks. This can be done either through extending profile (vendor_hook), or passing `vendor_hook` parameter to the connection. The format of the vendor_hook is the same as in the setuptools (module.name:function_name). The hook will get connection as the only parameter. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/added-federation-support-3b65e531e57211f5.yaml0000664000175000017500000000025400000000000027701 0ustar00zuulzuul00000000000000--- features: - | Adds support to create and manage Identity v3 Federation resources - Specifically, Identity Providers, Mappings and Federation Protocols. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/added-senlin-support-1eb4e47c31258f66.yaml0000664000175000017500000000005300000000000027134 0ustar00zuulzuul00000000000000--- features: - Added support for senlin ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/allocation-api-04f6b3b7a0ccc850.yaml0000664000175000017500000000010700000000000026110 0ustar00zuulzuul00000000000000--- features: - | Adds support for the baremetal allocation API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/allocation-update-910c36c1290e5121.yaml0000664000175000017500000000014100000000000026315 0ustar00zuulzuul00000000000000--- features: - | Allows updating ``name`` and ``extra`` fields of a baremetal allocation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/alternate-auth-context-3939f1492a0e1355.yaml0000664000175000017500000000026700000000000027340 0ustar00zuulzuul00000000000000--- features: - Added methods for making new cloud connections based on the current OpenStackCloud. This should enable working more easily across projects or user accounts. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/always-detail-cluster-templates-3eb4b5744ba327ac.yaml0000664000175000017500000000025500000000000031435 0ustar00zuulzuul00000000000000--- upgrade: - Cluster Templates have data model and normalization now. As a result, the detail parameter is now ignored and detailed records are always returned. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/auth-url-vexxhost-8d63cd17bde21320.yaml0000664000175000017500000000071500000000000026572 0ustar00zuulzuul00000000000000--- fixes: - | The ``v3password`` ``auth_type`` implies that the ``auth_url`` given is a versioned endpoint and so discovery is skipped for auth. Previously the ``auth_type`` for Vexxhost had been set to ``v3password`` due to v2 being no longer available to give better errors to users. The ``auth_url`` was unfortunately left unversioned, so authentication ceased working. The ``auth_url`` has been changed to the versioned endpoint. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/bail-on-failed-service-cf299c37d5647b08.yaml0000664000175000017500000000032600000000000027311 0ustar00zuulzuul00000000000000--- upgrade: - | When a known service cannot be resolved to a supported version, an exception is now thrown instead of just returning a blank Proxy object. This allows returning sane errors to users. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-configdrive-mkisofs-xorrisofs-075db4d7d80e5a13.yaml0000664000175000017500000000046000000000000033103 0ustar00zuulzuul00000000000000--- features: - | When generating a config drive for baremetal, "mkisofs" and "xorrisofs" are now supported beside the already available "genisoimage" binary. This is useful on environment where the "genisoimage" binary is not available but "mkisofs" and/or "xorrisofs" are available. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-details-09b27fba82111cfb.yaml0000664000175000017500000000100500000000000026571 0ustar00zuulzuul00000000000000--- features: - | The objects returned by baremetal detailed listing functions (``connection.baremetal.{nodes,ports,chassis,port_groups}``) are now fully functional, e.g. can be directly updated or deleted. deprecations: - | The following baremetal resource classes are no longer used and will be removed in a future release: ``NodeDetail``, ``PortDetail``, ``ChassisDetail`` and ``PortGroupDetail``. The regular ``Node``, ``Port``, ``Chassis`` and ``PortGroup`` are now used instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-errors-5cc871e8df4c9d95.yaml0000664000175000017500000000012200000000000026516 0ustar00zuulzuul00000000000000--- fixes: - | Adds support for error messages from the bare metal service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-fields-1f6fbcd8bd1ea2aa.yaml0000664000175000017500000000012000000000000026707 0ustar00zuulzuul00000000000000--- fixes: - | Fixes specifying fields when listing bare metal resources. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-fields-624546fa533a8287.yaml0000664000175000017500000000024100000000000026135 0ustar00zuulzuul00000000000000--- features: - | Adds support for fetching specific fields when getting bare metal `Node`, `Port`, `PortGroup`, `Chassis` and `Allocation` resources. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-fields-convert-857b8804327f1e86.yaml0000664000175000017500000000024100000000000027624 0ustar00zuulzuul00000000000000--- fixes: - | Fixes conversion of the bare metal ``fields`` argument from SDK to server-side field names (e.g. ``instance_id`` to ``instance_uuid``). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-introspection-973351b3ee76309e.yaml0000664000175000017500000000011700000000000027653 0ustar00zuulzuul00000000000000--- features: - | Adds support for the bare metal introspection service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-maintenance-5cb95c6d898d4d72.yaml0000664000175000017500000000013100000000000027403 0ustar00zuulzuul00000000000000--- features: - | Implements updating the baremetal Node's ``maintenance_reason``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-patch-feebd96b1b92f3b9.yaml0000664000175000017500000000074600000000000026442 0ustar00zuulzuul00000000000000--- features: - | Adds support for changing bare metal resources by providing a JSON patch. Adds the following calls to the bare metal proxy: ``patch_node``, ``patch_port``, ``patch_port_group`` and ``patch_chassis``. deprecations: - | The ``set_node_instance_info`` call is deprecated, use ``patch_machine`` with the same arguments instead. - | The ``purge_node_instance_info`` call is deprecated, use ``patch_machine`` or ``update_machine`` instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-ports-cc0f56ae0d192aba.yaml0000664000175000017500000000024200000000000026451 0ustar00zuulzuul00000000000000--- features: - | The ``OpenStackCloud`` bare metal NIC calls now support all microversions supported by the SDK. Previously version 1.6 was hardcoded. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-reservation-40327923092e9647.yaml0000664000175000017500000000050700000000000027076 0ustar00zuulzuul00000000000000--- features: - | Added ``wait_for_node_reservation`` to the baremetal proxy. deprecations: - | The `OpenStackCloud` ``wait_for_baremetal_node_lock`` call is deprecated. Generally, users should not have to call it. The new ``wait_for_node_reservation`` from the baremetal proxy can be used when needed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-retired-fields-f56a4632ad4797d7.yaml0000664000175000017500000000014000000000000027734 0ustar00zuulzuul00000000000000--- features: - | Adds ``is_retired`` and ``retired_reason`` to the baremetal Node schema.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-retries-804f553b4e22b3bf.yaml0000664000175000017500000000060400000000000026553 0ustar00zuulzuul00000000000000--- fixes: - | Changes the ``baremetal.attach_vif_to_node`` call to retry HTTP CONFLICT by default. While it's a valid error code when a VIF is already attached to a node, the same code is also used when the target node is locked. The latter happens more often, so the retries are now on by default and can be disabled by setting ``retry_on_conflict`` to ``False``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-retries-ff8aa8f73fb97415.yaml0000664000175000017500000000033000000000000026654 0ustar00zuulzuul00000000000000--- features: - | The bare metal operations now retry HTTP 409 and 503 by default. The number of retries can be changes via the ``baremetal_status_code_retries`` configuration option (defaulting to 5). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-traits-d1137318db33b8d1.yaml0000664000175000017500000000012300000000000026314 0ustar00zuulzuul00000000000000--- features: - | Implements add/remove/set traits API for bare metal nodes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-update-80effb38aae8e02d.yaml0000664000175000017500000000015600000000000026607 0ustar00zuulzuul00000000000000--- fixes: - | Correct updating bare metal resources. Previously an incorrect body used to be sent. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-validate-ccce2a37d2a20d96.yaml0000664000175000017500000000013500000000000027022 0ustar00zuulzuul00000000000000--- features: - | Adds support for bare metal node validation to the bare metal proxy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-vif-122457118c722a9b.yaml0000664000175000017500000000011700000000000025437 0ustar00zuulzuul00000000000000--- features: - | Implements VIF attach/detach API for bare metal nodes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/baremetal-wait-e4571cdb150b188a.yaml0000664000175000017500000000042100000000000026035 0ustar00zuulzuul00000000000000--- fixes: - | The baremetal calls ``wait_for_nodes_provision_state``, ``wait_for_allocation`` and the baremetal introspection call ``wait_for_introspection`` now raise ``ResourceFailure`` on reaching an error state instead of a generic ``SDKException``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/basic-api-cache-4ad8cf2754b004d1.yaml0000664000175000017500000000012300000000000026025 0ustar00zuulzuul00000000000000--- features: - | Add possibility to cache GET requests using dogpile cache. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/bgpvpn-list-filters-e76183a7008c0631.yaml0000664000175000017500000000042400000000000026637 0ustar00zuulzuul00000000000000--- features: - | ``openstack.network.v2.bgpvpn.BgpVpn`` can now be filtered by its associations to `networks`, `routers` and `ports. Additionally, filtering for the attributes `name`, `project_id`, `local_pref`, `vni` and `type` is now done on server-side. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/block-storage-backup-5886e91fd6e423bf.yaml0000664000175000017500000000013100000000000027163 0ustar00zuulzuul00000000000000--- features: - Implement block-storage.v2 Backup resource with restore functionality. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/block-storage-init-return-95b465b4755f03ca.yaml0000664000175000017500000000051600000000000030110 0ustar00zuulzuul00000000000000--- features: - | Methods ``openstack.block_storage.v3.volume.Volume.init_attachment`` and ``block_storage.init_volume_attachment`` now return the results of the POST request instead of None. This replicates the behaviour of cinderclient; the returned data is used by nova and ironic for managing volume attachments.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/block-storage-qs-0e3b69be2e709b65.yaml0000664000175000017500000000011700000000000026331 0ustar00zuulzuul00000000000000--- features: - | Add block storage QuotaSet resource and proxy methods. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/block-storage-v3-9798d584d088c048.yaml0000664000175000017500000000007400000000000026045 0ustar00zuulzuul00000000000000--- features: - | Added support for block storage v3. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/block_storage-type_encryption-121f8a222c822fb5.yaml0000664000175000017500000000011400000000000031121 0ustar00zuulzuul00000000000000--- features: - Add support for block storage type encryption parameters. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/boot-on-server-group-a80e51850db24b3d.yaml0000664000175000017500000000017100000000000027151 0ustar00zuulzuul00000000000000--- features: - Added ``group`` parameter to create_server to allow booting a server into a specific server group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/bug-2001080-de52ead3c5466792.yaml0000664000175000017500000000044000000000000024543 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 2001080 `_] Project update will only update the enabled field of projects when ``enabled=True`` or ``enabled=False`` is passed explicitly. The previous behavior had ``enabled=True`` as the default. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/bug-2010898-430da335e4df0efe.yaml0000664000175000017500000000023100000000000024705 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 2010898 `_] Fix Swift endpoint url handling to determine info/caps url ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/cache-auth-in-keyring-773dd5f682cd1610.yaml0000664000175000017500000000024600000000000027152 0ustar00zuulzuul00000000000000--- features: - | Added support for optionally caching auth information int the local keyring. Requires the installation of the python ``keyring`` package. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/cache-in-use-volumes-c7fa8bb378106fe3.yaml0000664000175000017500000000011200000000000027160 0ustar00zuulzuul00000000000000--- fixes: - Fixed caching the volume list when volumes are in use. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/catch-up-release-notes-e385fad34e9f3d6e.yaml0000664000175000017500000000102500000000000027574 0ustar00zuulzuul00000000000000--- features: - Swiftclient instantiation now provides authentication information so that long lived swiftclient objects can reauthenticate if necessary. - Add support for explicit v2password auth type. - Add SSL support to VEXXHOST vendor profile. - Add zetta.io cloud vendor profile. fixes: - Fix bug where project_domain_{name,id} was set even if project_{name,id} was not set. other: - HPCloud vendor profile removed due to cloud shutdown. - RunAbove vendor profile removed due to migration to OVH. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/change-attach-vol-return-value-4834a1f78392abb1.yaml0000664000175000017500000000040500000000000031002 0ustar00zuulzuul00000000000000--- upgrade: - | The ``attach_volume`` method now always returns a ``volume_attachment`` object. Previously, ``attach_volume`` would return a ``volume`` object if it was called with ``wait=True`` and a ``volume_attachment`` object otherwise. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/cinder_volume_backups_support-6f7ceab440853833.yaml0000664000175000017500000000017600000000000031240 0ustar00zuulzuul00000000000000--- features: - Add support for Cinder volume backup resources, with the usual methods (search/list/get/create/delete). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/cinderv2-norm-fix-037189c60b43089f.yaml0000664000175000017500000000012100000000000026201 0ustar00zuulzuul00000000000000--- fixes: - Fixed the volume normalization function when used with cinder v2. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/cleanup-objects-f99aeecf22ac13dd.yaml0000664000175000017500000000035200000000000026527 0ustar00zuulzuul00000000000000--- features: - If shade has to create objects in swift to upload an image, it will now delete those objects upon successful image creation as they are no longer needed. They will also be deleted on fatal import errors. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/cloud-profile-status-e0d29b5e2f10e95c.yaml0000664000175000017500000000034300000000000027315 0ustar00zuulzuul00000000000000--- features: - Add a field to vendor cloud profiles to indicate active, deprecated and shutdown status. A message to the user is triggered when attempting to use cloud with either deprecated or shutdown status. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/clustering-resource-deletion-bed869ba47c2aac1.yaml0000664000175000017500000000110400000000000031165 0ustar00zuulzuul00000000000000--- fixes: - | Fixed a regression in deleting Node and Cluster resources in clustering caused by the addition of the ``location`` property to all resource objects. Previously the delete calls had directly returned the ``location`` field returned in the headers from the clustering service pointing to an Action resource that could be fetched to get status on the delete operation. The delete calls now return an Action resource directly that is correctly constructed so that ``wait_for_status`` and ``wait_for_deleted`` work as expected. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/complete-aggregate-functions-45d5f2beeeac2b48.yaml0000664000175000017500000000026500000000000031135 0ustar00zuulzuul00000000000000--- features: - Complete compute.aggregate functions to the latest state fixes: - aggregate.deleted property is renamed to 'is_deleted' to comply with the naming convention ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/compute-microversion-2-17-b05cb87580b8d56a.yaml0000664000175000017500000000027600000000000027735 0ustar00zuulzuul00000000000000--- features: - | Add support for Compute API microversion 2.17, which allows admins to trigger a crash dump for a server. This can be useful for debugging misbehaving guests. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/compute-microversion-2-73-abae1d0c3740f76e.yaml0000664000175000017500000000021200000000000030052 0ustar00zuulzuul00000000000000--- features: - | Add support for Compute API microversion 2.73, which allows admins to specify a reason when locking a server. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/compute-microversion-2-89-8c5187cc3bf6bd02.yaml0000664000175000017500000000040300000000000030013 0ustar00zuulzuul00000000000000--- features: - | The 2.89 API microversion is now supported for the compute service. This adds additional fields to the ``os-volume_attachments`` API, represented by the ``openstack.compute.v2.volume_attachment.VolumeAttachment`` resource. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/compute-quota-set-e664412d089945d2.yaml0000664000175000017500000000011100000000000026327 0ustar00zuulzuul00000000000000--- features: - | Add support for QuotaSet in the compute service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/compute-quotas-b07a0f24dfac8444.yaml0000664000175000017500000000027500000000000026213 0ustar00zuulzuul00000000000000--- features: - Add new APIs, OperatorCloud.get_compute_quotas(), OperatorCloud.set_compute_quotas() and OperatorCloud.delete_compute_quotas() to manage nova quotas for projects and users././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/compute-restore-server-020bf091acc9f8df.yaml0000664000175000017500000000035600000000000027752 0ustar00zuulzuul00000000000000--- features: - | The ``openstack.compute.v2.server.Server`` object now provides a ``restore`` method to restore it from a soft-deleted state, while the compute proxy method provides an equivalent ``restore_server`` method. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/compute-service-zone-2b25ec705b0156c4.yaml0000664000175000017500000000035700000000000027145 0ustar00zuulzuul00000000000000--- upgrade: - | The ``zone`` attribute on compute ``Service`` objects has been renamed to ``availability_zone`` to match all of the other resources, and also to better integrate with the ``Resource.location`` attribute. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/compute-usage-defaults-5f5b2936f17ff400.yaml0000664000175000017500000000066100000000000027463 0ustar00zuulzuul00000000000000--- features: - get_compute_usage now has a default value for the start parameter of 2010-07-06. That was the date the OpenStack project started. It's completely impossible for someone to have Nova usage data that goes back further in time. Also, both the start and end date parameters now also accept strings which will be parsed and timezones will be properly converted to UTC which is what Nova expects. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=openstacksdk-4.0.0/releasenotes/notes/compute-volume-attachment-proxy-method-rework-dc35fe9ca3af1c16.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/compute-volume-attachment-proxy-method-rework-dc35fe9ca3af1c160000664000175000017500000000072200000000000033500 0ustar00zuulzuul00000000000000--- upgrade: - | The signatures of the various volume attachment-related methods in the compute API proxy layer have changed. These were previously incomplete and did not function as expected in many scenarios. Some callers may need to be reworked. The affected proxy methods are: - ``create_volume_attachment`` - ``delete_volume_attachment`` - ``update_volume_attachment`` - ``get_volume_attachment`` - ``volume_attachments`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/conf-object-ctr-c0e1da0a67dad841.yaml0000664000175000017500000000033500000000000026253 0ustar00zuulzuul00000000000000--- features: - | Added the ability to create a ``Connection`` from an ``oslo.config`` ``CONF`` object. This is primarily intended to be used by OpenStack services using SDK for inter-service communication. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/config-aliases-0f6297eafd05c07c.yaml0000664000175000017500000000042400000000000026115 0ustar00zuulzuul00000000000000--- features: - | Config values now support service-type aliases. The correct config names are based on the official service type, such as ``block_storage_api_version``, but with this change, legacy aliases such as ``volume_api_version`` are also supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/config-flavor-specs-ca712e17971482b6.yaml0000664000175000017500000000017000000000000026660 0ustar00zuulzuul00000000000000--- features: - Adds ability to add a config setting to clouds.yaml to disable fetching extra_specs from flavors. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/configdrive-f8ca9f94b2981db7.yaml0000664000175000017500000000022600000000000025551 0ustar00zuulzuul00000000000000--- features: - | Supports Bare Metal API version 1.56, which allows building a config drive on the server side from a provided dictionary. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/container-search-b0f4253ce2deeda5.yaml0000664000175000017500000000031000000000000026600 0ustar00zuulzuul00000000000000--- features: - | Containers are now searchable both with a JMESPath expression or a dict of container attributes via the ``openstack.connection.Connection.search_containers`` function. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/create-object-data-870cb543543aa983.yaml0000664000175000017500000000025000000000000026426 0ustar00zuulzuul00000000000000--- features: - | Add a data parameter to ``openstack.connection.Connection.create_object`` so that data can be passed in directly instead of through a file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/create-object-directory-98e2cae175cc5082.yaml0000664000175000017500000000045300000000000027667 0ustar00zuulzuul00000000000000--- features: - | Added a ``create_directory_marker_object``' method to allow for easy creation of zero-byte 'directory' marker objects. These are not needed in most cases, but on some clouds they are used by Static Web and Web Listings in swift to facilitate directory traversal. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/create-stack-fix-12dbb59a48ac7442.yaml0000664000175000017500000000021600000000000026275 0ustar00zuulzuul00000000000000--- fixes: - The create_stack() call was fixed to call the correct iterator method and to return the updated stack object when waiting. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/create-subnet-by-subnetpool-eba1129c67ed4d96.yaml0000664000175000017500000000020100000000000030570 0ustar00zuulzuul00000000000000--- features: - | Added support for specifying the subnetpool to use when creating subnets (``subnetpool_name_or_id``) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/create_server_network_fix-c4a56b31d2850a4b.yaml0000664000175000017500000000040400000000000030402 0ustar00zuulzuul00000000000000--- fixes: - The create_server() API call would not use the supplied 'network' parameter if the 'nics' parameter was also supplied, even though it would be an empty list. It now uses 'network' if 'nics' is not supplied or if it is an empty list. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/create_service_norm-319a97433d68fa6a.yaml0000664000175000017500000000013000000000000027111 0ustar00zuulzuul00000000000000--- fixes: - The returned data from a create_service() call was not being normalized. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/cron_triggers_proxy-51aa89e91bbb9798.yaml0000664000175000017500000000011500000000000027275 0ustar00zuulzuul00000000000000--- features: - | Add workflow CronTrigger resource and proxy methods. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/data-model-cf50d86982646370.yaml0000664000175000017500000000053200000000000024755 0ustar00zuulzuul00000000000000--- features: - Explicit data model contracts are now defined for Flavors, Images, Security Groups, Security Group Rules, and Servers. - Resources with data model contracts are now being returned with 'location' attribute. The location carries cloud name, region name and information about the project that owns the resource. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/default-cloud-7ee0bcb9e5dd24b9.yaml0000664000175000017500000000042300000000000026124 0ustar00zuulzuul00000000000000--- issues: - If there was only one cloud defined in clouds.yaml os-client-config was requiring the cloud parameter be passed. This is inconsistent with how the envvars cloud works which WILL work without setting the cloud parameter if it's the only cloud. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/default-microversion-b2401727cb591002.yaml0000664000175000017500000000046600000000000027057 0ustar00zuulzuul00000000000000--- features: - | Versions set in config via ``*_api_version`` or ``OS_*_API_VERSION`` that have a ``.`` in them will be also passed as the default microversion to the Adapter constructor. An additional config option, ``*_default_microversion`` has been added to support being more explicit. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/delete-autocreated-1839187b0aa35022.yaml0000664000175000017500000000025100000000000026454 0ustar00zuulzuul00000000000000--- features: - Added new method, delete_autocreated_image_objects that can be used to delete any leaked objects shade may have created on behalf of the user. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/delete-image-objects-9d4b4e0fff36a23f.yaml0000664000175000017500000000212500000000000027265 0ustar00zuulzuul00000000000000--- fixes: - Delete swift objects uploaded in service of uploading images at the time that the corresponding image is deleted. On some clouds, image uploads are accomplished by uploading the image to swift and then running a task-import. As shade does this action on behalf of the user, it is not reasonable to assume that the user would then be aware of or manage the swift objects shade created, which led to an ongoing leak of swift objects. - Upload swift Large Objects as Static Large Objects by default. Shade automatically uploads objects as Large Objects when they are over a segment_size threshold. It had been doing this as Dynamic Large Objects, which sound great, but which have the downside of not deleting their sub-segments when the primary object is deleted. Since nothing in the shade interface exposes that the object was segmented, the user would not know they would also need to find and delete the segments. Instead, we now upload as Static Large Objects which behave as expected and delete segments when the object is deleted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/delete-obj-return-a3ecf0415b7a2989.yaml0000664000175000017500000000024200000000000026500 0ustar00zuulzuul00000000000000--- fixes: - The delete_object() method was not returning True/False, similar to other delete methods. It is now consistent with the other delete APIs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/delete_project-399f9b3107014dde.yaml0000664000175000017500000000034700000000000026070 0ustar00zuulzuul00000000000000--- fixes: - The delete_project() API now conforms to our standard of returning True when the delete succeeds, or False when the project was not found. It would previously raise an expection if the project was not found. ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=openstacksdk-4.0.0/releasenotes/notes/deprecate-remote_ip_prefix-metering-label-rules-843d5a962e4e428c.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/deprecate-remote_ip_prefix-metering-label-rules-843d5a962e4e420000664000175000017500000000047700000000000033225 0ustar00zuulzuul00000000000000--- deprecations: - | Deprecate the use of 'remote_ip_prefix' in metering label rules, and it will be removed in future releases. One should use instead the 'source_ip_prefix' and/or 'destination_ip_prefix' parameters. For more details, you can check the spec: https://review.opendev.org/#/c/744702/. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/deprecated-compute-image-proxy-apis-986263f6aa1b1b25.yaml0000664000175000017500000000046100000000000032032 0ustar00zuulzuul00000000000000--- deprecations: - | The following Compute service proxy methods are now deprecated: * ``find_image`` * ``get_image`` * ``delete_image`` * ``images`` These are proxy APIs for the Image service. You should use the Image service instead via the Image service proxy methods. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/deprecated-profile-762afdef0e8fc9e8.yaml0000664000175000017500000000030200000000000027143 0ustar00zuulzuul00000000000000--- deprecations: - | ``openstack.profile.Profile`` has been deprecated and will be removed in the ``1.0`` release. Users should use the functions in ``openstack.config`` instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/disable-service-39df96ef8a817785.yaml0000664000175000017500000000105600000000000026171 0ustar00zuulzuul00000000000000--- features: - | ``has_{service_type}`` is a boolean config option that allows asserting that a given service does not exist or should not be used in a given cloud. Doing this will now cause the corresponding service ``Proxy`` object to not be created and in its place is an object that will throw exceptions if used. - | ``{service_type}_disabled_reason`` is a new string config option that can be set to indicate a reason why a service has been disabled. This string will be used in exceptions or log warnings emitted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/dns-domain-parameter-d3acfc3287a9d632.yaml0000664000175000017500000000015300000000000027240 0ustar00zuulzuul00000000000000--- features: - | Added dns_domain parameter into the create_network and update_network methods. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/domain_operations_name_or_id-baba4cac5b67234d.yaml0000664000175000017500000000017600000000000031245 0ustar00zuulzuul00000000000000--- features: - Added name_or_id parameter to domain operations, allowing an admin to update/delete/get by domain name. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/drop-Resource-allow_get-attribute-fec75b551fb79465.yaml0000664000175000017500000000023300000000000031672 0ustar00zuulzuul00000000000000--- upgrade: - | The ``allow_get`` attribute of ``openstack.resource.Resource`` has been removed. Use ``allow_fetch`` or ``allow_list`` instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/drop-formatter-deserialize-30b19956fb79bb8d.yaml0000664000175000017500000000024700000000000030432 0ustar00zuulzuul00000000000000--- upgrade: - | The ``openstack.format.Formatter`` class no longer defines a ``serialize`` method to override. This was unused and unneccessary complexity. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/drop-python27-b824f9ce51cb1ab7.yaml0000664000175000017500000000012200000000000025735 0ustar00zuulzuul00000000000000--- prelude: > As of this release, python v2 is neither tested nor supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/drop-senlin-cloud-layer-c06d496acc70b014.yaml0000664000175000017500000000015600000000000027610 0ustar00zuulzuul00000000000000--- upgrade: - | Cloud layer operations for Senlin service are dropped due to big amount of bugs there. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/dropped-python-3.5-b154887cce87947c.yaml0000664000175000017500000000007200000000000026373 0ustar00zuulzuul00000000000000--- upgrade: - | Python 3.5 is no longer supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/dual-stack-networks-8a81941c97d28deb.yaml0000664000175000017500000000063300000000000027071 0ustar00zuulzuul00000000000000--- features: - Added support for dual stack networks where the IPv4 subnet and the IPv6 subnet have opposite public/private qualities. It is now possible to add configuration to clouds.yaml that will indicate that a network is public for v6 and private for v4, which is otherwise very difficult to correctly infer while setting server attributes like private_v4, public_v4 and public_v6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/endpoint-from-catalog-bad36cb0409a4e6a.yaml0000664000175000017500000000020600000000000027463 0ustar00zuulzuul00000000000000--- features: - Add new method, 'endpoint_for' which will return the raw endpoint for a given service from the current catalog. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/expose-client-side-rate-limit-ddb82df7cb92091c.yaml0000664000175000017500000000050600000000000031062 0ustar00zuulzuul00000000000000--- features: - | Client-side rate limiting is now directly exposed via ``rate_limit`` and ``concurrency`` parameters. A single value can be given that applies to all services, or a dict of service-type and value if different client-side rate or concurrency limits should be used for different services. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/false-not-attribute-error-49484d0fdc61f75d.yaml0000664000175000017500000000040500000000000030203 0ustar00zuulzuul00000000000000--- fixes: - delete_image used to fail with an AttributeError if an invalid image name or id was passed, rather than returning False which was the intent. This is worthy of note because it's a behavior change, but the previous behavior was a bug. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/feature-server-metadata-50caf18cec532160.yaml0000664000175000017500000000024700000000000027655 0ustar00zuulzuul00000000000000--- features: - Add new APIs, OpenStackCloud.set_server_metadata() and OpenStackCloud.delete_server_metadata() to manage metadata of existing nova compute instances ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/find_server-use-details-9a22e83ec6540c98.yaml0000664000175000017500000000013300000000000027622 0ustar00zuulzuul00000000000000--- fixes: - | Make sure find_server returns server details when looking up by name. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fip_timeout-035c4bb3ff92fa1f.yaml0000664000175000017500000000021300000000000025624 0ustar00zuulzuul00000000000000--- fixes: - When creating a new server, the timeout was not being passed through to floating IP creation, which could also timeout. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/firewall-resources-c7589d288dd57e35.yaml0000664000175000017500000000015300000000000026734 0ustar00zuulzuul00000000000000--- features: - | Implement fwaas v2 resources for managing firewall groups, rules and policies. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-compat-with-old-keystoneauth-66e11ee9d008b962.yaml0000664000175000017500000000044500000000000031416 0ustar00zuulzuul00000000000000--- issues: - Fixed a regression when using latest os-client-config with the keystoneauth from stable/newton. Although this isn't a super common combination, the added feature that broke the interaction is really not worthy of the incompatibility, so a workaround was added. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-config-drive-a148b7589f7e1022.yaml0000664000175000017500000000037200000000000026157 0ustar00zuulzuul00000000000000--- issues: - Fixed an issue where nodepool could cause config_drive to be passed explicitly as None, which was getting directly passed through to the JSON. Also fix the same logic for key_name and scheduler_hints while we're in there. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-delete-ips-1d4eebf7bc4d4733.yaml0000664000175000017500000000033600000000000026132 0ustar00zuulzuul00000000000000--- issues: - Fixed the logic in delete_ips and added regression tests to cover it. The old logic was incorrectly looking for floating ips using port syntax. It was also not swallowing errors when it should. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-dns-return-c810d5e6736322f1.yaml0000664000175000017500000000025400000000000025675 0ustar00zuulzuul00000000000000--- fixes: - | Fixed issue where the dns methods were returning False instead of None when resources were not found. - | Fixed jsonification under python3. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-endpoint-override-ac41baeec9549ab3.yaml0000664000175000017500000000023000000000000027577 0ustar00zuulzuul00000000000000--- fixes: - | Fixed issue where ``endpoint_override`` settings were not getting passed to the Adapter constructor in ``get_session_client``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-floating-ip-private-matching-84e369eee380a185.yaml0000664000175000017500000000027200000000000031343 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue where an optimization in the logic to find floating ips first when looking for public ip addresses broke finding the correct private address. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-for-microversion-70cd686b6d6e3fd0.yaml0000664000175000017500000000127700000000000027335 0ustar00zuulzuul00000000000000--- fixes: - | In April 2019 the microversion support for the Server resource was increased to ``2.72``. Unfortunately, due to an issue with version discovery documents, this increase never actually became effective. A fix is coming in ``3.17.2`` of ``keystoneauth`` which will unbreak version discovery and cause the microversion support to start working. upgrade: - | Due to the fix in microversion support in `keystoneauth`, Servers will be fetched using microversion ``2.72``. Code that assumes the existence of a ``flavor.id`` field in the Server record should be removed, as it does not exist in new microversions and cannot be filled in behind the scenes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-image-hw_qemu_guest_agent-bf1147e52c84b5e8.yaml0000664000175000017500000000016400000000000031054 0ustar00zuulzuul00000000000000--- fixes: - | hw_qemu_guest_agent attribute of the image is a string boolean with values `yes` and `no`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-image-task-ae79502dd5c7ecba.yaml0000664000175000017500000000034200000000000026175 0ustar00zuulzuul00000000000000--- fixes: - | Fixed a regression in image upload when the cloud uses the task upload method. A refactor led to attempting to update the disk_format and container_format values after the image had been imported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-list-networks-a592725df64c306e.yaml0000664000175000017500000000007500000000000026510 0ustar00zuulzuul00000000000000--- fixes: - Fix for list_networks() ignoring any filters. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-microversion-354dc70deb2b2f0b.yaml0000664000175000017500000000053200000000000026577 0ustar00zuulzuul00000000000000--- features: - | Modify microversion handling. Microversion chosen by the client/user is respected in the microversion negotiation. For features, requiring particular microversion, it would be ensured it is supported by the server side and required microversion is <= chosen microversion, otherwise call will be rejected. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-missing-futures-a0617a1c1ce6e659.yaml0000664000175000017500000000025500000000000027077 0ustar00zuulzuul00000000000000--- fixes: - Added missing dependency on futures library for python 2. The depend was missed in testing due to it having been listed in test-requirements already. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-neutron-endpoint-mangling-a9dd89dd09bc71ec.yaml0000664000175000017500000000021100000000000031262 0ustar00zuulzuul00000000000000--- fixes: - | Fixed incorrect neutron endpoint mangling for the cases when the catalog contains a versioned neutron endpoint. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-os_auth_type-v3multifactor-049cf52573d9e00e.yaml0000664000175000017500000000053700000000000031170 0ustar00zuulzuul00000000000000--- fixes: - | It is now possible to configure ``v3multifactor`` auth type using environment variables. For example: export OS_AUTH_TYPE=v3multifactor export OS_AUTH_METHODS=v3password,v3totp export OS_USERNAME=admin export OS_PASSWORD=password export OS_PASSCODE=12345 openstack server list ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-properties-key-conflict-2161ca1faaad6731.yaml0000664000175000017500000000016600000000000030556 0ustar00zuulzuul00000000000000--- issues: - Images in the cloud with a string property named "properties" caused image normalization to bomb. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-server-unshelve-to-host-cb02eee8a20ba478.yaml0000664000175000017500000000017600000000000030620 0ustar00zuulzuul00000000000000--- fixes: - | Fixed the issue that unshelving a server to a specific host was failed due to unhandled host option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-supplemental-fips-c9cd58aac12eb30e.yaml0000664000175000017500000000052700000000000027620 0ustar00zuulzuul00000000000000--- fixes: - Fixed an issue where shade could report a floating IP being attached to a server erroneously due to only matching on fixed ip. Changed the lookup to match on port ids. This adds an API call in the case where the workaround is needed because of a bug in the cloud, but in most cases it should have no difference. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-task-timing-048afea680adc62e.yaml0000664000175000017500000000024200000000000026316 0ustar00zuulzuul00000000000000--- fixes: - | Fix a regression where the ``TaskManager.post_run_task`` ``elapsed_time`` argument was not reflecting the time taken by the actual task. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-update-domain-af47b066ac52eb7f.yaml0000664000175000017500000000010700000000000026621 0ustar00zuulzuul00000000000000--- fixes: - Fix for update_domain() where 'name' was not updatable. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fix-yaml-load-3e6bd852afe549b4.yaml0000664000175000017500000000016500000000000025705 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue where importing openstacksdk changed the behavior of ``yaml.load`` globally. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fixed-magnum-type-7406f0a60525f858.yaml0000664000175000017500000000036500000000000026302 0ustar00zuulzuul00000000000000--- fixes: - Fixed magnum service_type. shade was using it as 'container' but the correct type is 'container-infra'. It's possible that on old clouds with magnum shade may now do the wrong thing. If that occurs, please file a bug. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/flavor-cloud-layer-0b4d130ac1c5e7c4.yaml0000664000175000017500000000014400000000000026715 0ustar00zuulzuul00000000000000--- other: - Flavor operations of the cloud layer are switched to the rely on the proxy layer ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/flavor_fix-a53c6b326dc34a2c.yaml0000664000175000017500000000040100000000000025346 0ustar00zuulzuul00000000000000--- features: - Flavors will always contain an 'extra_specs' attribute. Client cruft, such as 'links', 'HUMAN_ID', etc. has been removed. fixes: - Setting and unsetting flavor extra specs now works. This had been broken since the 1.2.0 release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/floating_ip_normalization-41e0edcdb0c98aee.yaml0000664000175000017500000000056000000000000030706 0ustar00zuulzuul00000000000000--- upgrade: - | No Munch conversion and normalization of the floating ips is happening anymore. For Neutron network a pure FloatingIP object is being returned, for Nova still munch object. deprecations: - | search_floating_ips method is deprecated and should not be used anymore. It is going to be dropped approximately after one major cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/fnmatch-name-or-id-f658fe26f84086c8.yaml0000664000175000017500000000030200000000000026461 0ustar00zuulzuul00000000000000--- features: - name_or_id parameters to search/get methods now support filename-like globbing. This means search_servers('nb0*') will return all servers whose names start with 'nb0'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/force_ipv4_no_ipv6_address-9842168b5d05d262.yaml0000664000175000017500000000031700000000000030143 0ustar00zuulzuul00000000000000--- upgrade: - | Cloud with the `force_ipv4` flag will no longer return a `public_v6` value, even if one is provided by the cloud. This is to avoid having entries for unconfigured interfaces. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/futurist-b54b0f449d410997.yaml0000664000175000017500000000045700000000000024712 0ustar00zuulzuul00000000000000--- features: - | Switched to the ``futurist`` library for managing background concurrent tasks. Introduced a new ``pool_executor`` parameter to `Connection` that allows passing any any futurist Executor for cases where the default ``ThreadPoolExecutor`` would not be appropriate. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/generate-form-signature-294ca46812f291d6.yaml0000664000175000017500000000020700000000000027546 0ustar00zuulzuul00000000000000--- features: - | Added methods to manage object store temp-url keys and generate signatures needed for FormPost middleware. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/get-limits-c383c512f8e01873.yaml0000664000175000017500000000010500000000000025065 0ustar00zuulzuul00000000000000--- features: - Allow to retrieve the limits of a specific project ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/get-object-raw-e58284e59c81c8ef.yaml0000664000175000017500000000021700000000000026010 0ustar00zuulzuul00000000000000--- features: - | Added ``get_object_raw`` method for downloading an object from swift and returning a raw requests Response object. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/get-server-by-id-none-3e8538800fa09d82.yaml0000664000175000017500000000045700000000000027047 0ustar00zuulzuul00000000000000--- fixes: - | The ``get_server_by_id`` method is supposed to return ``None`` if the server in question can't be found, but a regression was introduced causing it to raise ``ResourceNotFound`` instead. This has been corrected and ``get_server_by_id`` returns ``None`` correctly again. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/get-usage-72d249ff790d1b8f.yaml0000664000175000017500000000010400000000000025042 0ustar00zuulzuul00000000000000--- features: - Allow to retrieve the usage of a specific project ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/get_compute_usage-01811dccd60dc92a.yaml0000664000175000017500000000017100000000000026711 0ustar00zuulzuul00000000000000--- upgrade: - | cloud.get_compute_usage method return instance of compute.usage.Usage class instead of munch. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/get_object_api-968483adb016bce1.yaml0000664000175000017500000000014500000000000026106 0ustar00zuulzuul00000000000000--- features: - Added a new API call, OpenStackCloud.get_object(), to download objects from swift. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/glance-image-pagination-0b4dfef22b25852b.yaml0000664000175000017500000000017300000000000027663 0ustar00zuulzuul00000000000000--- issues: - Fixed an issue where glance image list pagination was being ignored, leading to truncated image lists. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/glance-image-stores-2baa66e6743a2f2d.yaml0000664000175000017500000000013300000000000027046 0ustar00zuulzuul00000000000000--- features: - | Add support for specifying stores when doing glance image uploads. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/global-request-id-d7c0736f43929165.yaml0000664000175000017500000000107600000000000026270 0ustar00zuulzuul00000000000000--- features: - | Added support for setting ``global_request_id`` on a ``Connection``. If done, this will cause all requests sent to send the request id header to the OpenStack services. Since ``Connection`` can otherwise be used multi-threaded, add a method ``global_request`` that returns a new ``Connection`` based on the old ``Connection`` but on which the new ``global_request_id`` has been set. Since a ``Connection`` can be used as a context manager, this also means the ``global_request`` method can be used in ``with`` statements. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/grant-revoke-assignments-231d3f9596a1ae75.yaml0000664000175000017500000000011300000000000030025 0ustar00zuulzuul00000000000000--- features: - add granting and revoking of roles from groups and users ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/identity-auth-url-f3ae8ef22d2bcab6.yaml0000664000175000017500000000036700000000000027041 0ustar00zuulzuul00000000000000--- features: - | The ``auth_url`` will be used for the default value of ``identity_endpoint_override`` in the absence of project or system-scope information. This should simplify some actions such as listing available projects. ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=openstacksdk-4.0.0/releasenotes/notes/identity-cloud-mixin-inherited-role-assignments-8fe9ac9509d99f4d.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/identity-cloud-mixin-inherited-role-assignments-8fe9ac9509d99f0000664000175000017500000000057600000000000033423 0ustar00zuulzuul00000000000000--- features: - | Add support for ``inherited_to`` filter for listing identity role assignments in the cloud layer. This allows filtering by whether role grants are inheritable to sub-projects. deprecations: - | Deprecate ``os-inherit-extension-inherited-to`` in favor of ``inherited_to`` filter for listing identity role_assignments in the cloud layer. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/image-flavor-by-name-54865b00ebbf1004.yaml0000664000175000017500000000061300000000000026752 0ustar00zuulzuul00000000000000--- features: - The image and flavor parameters for create_server now accept name in addition to id and dict. If given as a name or id, shade will do a get_image or a get_flavor to find the matching image or flavor. If you have an id already and are not using any caching and the extra lookup is annoying, passing the id in as "dict(id='my-id')" will avoid the lookup. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/image-from-volume-9acf7379f5995b5b.yaml0000664000175000017500000000010200000000000026522 0ustar00zuulzuul00000000000000--- features: - Added ability to create an image from a volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/image-id-filter-key-b9b6b52139a27cbe.yaml0000664000175000017500000000044100000000000026752 0ustar00zuulzuul00000000000000--- features: - | It is now possible to filter ``openstack.image.v2.Image`` resources by ID using the ``id`` filter. While this is of little value when used with single IDs, it can be useful when combined with operators like ``in:`` to e.g. filter by multiple image IDs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/image-import-proxy-params-f19d8b6166104ebe.yaml0000664000175000017500000000051700000000000030210 0ustar00zuulzuul00000000000000--- features: - | The ``openstack.image.Image.import_image`` method and ``import_image`` image proxy method now accept the following additional paramters: - ``remote_region`` - ``remote_image_id`` - ``remote_service_interface`` These are required to support the ``glance-download`` image import method. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/image-import-support-97052cdbc8ce449b.yaml0000664000175000017500000000026700000000000027350 0ustar00zuulzuul00000000000000--- features: - | Added support for using the image import feature when creating an image. SDK will now fall back to using image import if there is an error during PUT. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/image-proxy-layer-kwarg-only-arguments-94c9b2033d386160.yaml0000664000175000017500000000024400000000000032376 0ustar00zuulzuul00000000000000--- upgrade: - | The signatures of the ``openstack.image.v2.import_image`` has changed. All arguments except ``image`` and ``method`` are now kwarg-only. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/image-update-76bd3bf24c1c1380.yaml0000664000175000017500000000024100000000000025477 0ustar00zuulzuul00000000000000--- upgrade: - | When using the Image API, it is no longer possible to set arbitrary properties, not known to the SDK, via ``image.update_image`` API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/improve-metrics-5d7ce70ce4021d72.yaml0000664000175000017500000000024400000000000026271 0ustar00zuulzuul00000000000000--- upgrade: - | API metrics emitted by OpenStackSDK to StatsD now contain status_code part of the metric name in order to improve information precision. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/infer-secgroup-source-58d840aaf1a1f485.yaml0000664000175000017500000000064700000000000027405 0ustar00zuulzuul00000000000000--- features: - If a cloud does not have a neutron service, it is now assumed that Nova will be the source of security groups. To handle clouds that have nova-network and do not have the security group extension, setting secgroup_source to None will prevent attempting to use them at all. If the cloud has neutron but it is not a functional source of security groups, set secgroup_source to nova. ././@PaxHeader0000000000000000000000000000024200000000000011453 xustar0000000000000000140 path=openstacksdk-4.0.0/releasenotes/notes/introduce-source-and-destination-ip-prefixes-into-metering-label-rules-e04b797adac5d0d0.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/introduce-source-and-destination-ip-prefixes-into-metering-lab0000664000175000017500000000016300000000000034110 0ustar00zuulzuul00000000000000--- features: - | Add ``source_ip_prefix`` and ``destination_ip_prefix`` to Neutron metering label rules.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/introspection-node-6a3b7d55839ef82c.yaml0000664000175000017500000000013700000000000027012 0ustar00zuulzuul00000000000000--- fixes: - | Fixes using a full `Node` object as an argument to `start_introspection`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/ironic-conductors-support-3bf27e8b2f0299ba.yaml0000664000175000017500000000007200000000000030410 0ustar00zuulzuul00000000000000--- features: - | Support for Ironic Conductor API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/ironic-deploy-steps-2c0f39d7d2a13289.yaml0000664000175000017500000000011600000000000027000 0ustar00zuulzuul00000000000000--- features: - | Adds ``deploy_steps`` to baremetal node provisioning. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/ironic-deploy-template-support-fa56005365ed6e4d.yaml0000664000175000017500000000007700000000000031256 0ustar00zuulzuul00000000000000--- features: - | Support Deploy Templates for Ironic API././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/ironic-introspection_rules_support-18b0488a76800122.yaml0000664000175000017500000000011600000000000032030 0ustar00zuulzuul00000000000000features: - | Add support for Ironic Inspector Introspection Rules API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/ironic-microversion-ba5b0f36f11196a6.yaml0000664000175000017500000000017000000000000027137 0ustar00zuulzuul00000000000000--- features: - Add support for passing Ironic microversion to the ironicclient constructor in get_legacy_client. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/ironic-node-shard-35f2557c3dbfff1d.yaml0000664000175000017500000000011300000000000026615 0ustar00zuulzuul00000000000000--- features: - | Adds support for Node shards to baremetal service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/ironic-volume_target-support-8130361804366787.yaml0000664000175000017500000000007600000000000030373 0ustar00zuulzuul00000000000000--- features: - | Support for Ironic Volume Target API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/ksa-discovery-86a4ef00d85ea87f.yaml0000664000175000017500000000017700000000000026033 0ustar00zuulzuul00000000000000--- other: - | All endpoint discovery logic is now handled by keystoneauth. There should be no behavior differences. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/less-file-hashing-d2497337da5acbef.yaml0000664000175000017500000000026200000000000026615 0ustar00zuulzuul00000000000000--- upgrade: - shade will now only generate file hashes for glance images if both hashes are empty. If only one is given, the other will be treated as an empty string. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/list-all_projects-filter-27f1d471a7848507.yaml0000664000175000017500000000137700000000000027666 0ustar00zuulzuul00000000000000--- features: - | A number of APIs support passing an admin-only ``all_projects`` filter when listing certain resources, allowing you to retrieve resources from all projects rather than just the current projects. This filter is now explicitly supported at the proxy layer for services and resources that support it. These are: * Block storage (v2) * ``find_snapshot`` * ``snapshots`` * ``find_volume`` * ``volumes`` * Block storage (v3) * ``find_snapshot`` * ``snapshots`` * ``find_volume`` * ``volumes`` * Compute (v2) * ``find_server`` * ``find_server_group`` * ``server_groups`` * Workflow (v2) * ``find_cron_triggers`` * ``cron_triggers`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/list-az-names-a38c277d1192471b.yaml0000664000175000017500000000007700000000000025477 0ustar00zuulzuul00000000000000--- features: - Added list_availability_zone_names API call. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/list-network-resources-empty-list-6aa760c01e7d97d7.yaml0000664000175000017500000000020600000000000031732 0ustar00zuulzuul00000000000000--- fixes: - | Basic networking list calls in the cloud layer been fixed to return an empty list if neutron is not running. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/list-role-assignments-keystone-v2-b127b12b4860f50c.yaml0000664000175000017500000000013100000000000031476 0ustar00zuulzuul00000000000000--- features: - Implement list_role_assignments for keystone v2, using roles_for_user. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/list-servers-all-projects-349e6dc665ba2e8d.yaml0000664000175000017500000000035000000000000030277 0ustar00zuulzuul00000000000000--- features: - Add 'all_projects' parameter to list_servers and search_servers which will tell Nova to return servers for all projects rather than just for the current project. This is only available to cloud admins. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/load-yaml-3177efca78e5c67a.yaml0000664000175000017500000000044000000000000025121 0ustar00zuulzuul00000000000000--- features: - Added a flag, 'load_yaml_config' that defaults to True. If set to false, no clouds.yaml files will be loaded. This is beneficial if os-client-config wants to be used inside of a service where end-user clouds.yaml files would make things more confusing. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/location-server-resource-af77fdab5d35d421.yaml0000664000175000017500000000030300000000000030244 0ustar00zuulzuul00000000000000--- fixes: - | Corrected the location property on the ``Server`` resource to use the ``project_id`` from the remote resource rather than the information from the token of the user. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/log-request-ids-37507cb6eed9a7da.yaml0000664000175000017500000000026000000000000026340 0ustar00zuulzuul00000000000000--- other: - The contents of x-openstack-request-id are no longer added to object returned. Instead, they are logged to a logger named 'openstack.cloud.request_ids'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/machine-get-update-microversions-4b910e63cebd65e2.yaml0000664000175000017500000000064200000000000031572 0ustar00zuulzuul00000000000000--- features: - | The ``get_machine``, ``update_machine`` and ``patch_machine`` calls now support all Bare Metal API microversions supported by the SDK. Previously they used 1.6 unconditionally. upgrade: - | The baremetal API now returns ``available`` as provision state for nodes available for deployment. Previously, ``None`` could be returned for API version 1.1 (early Kilo) and older. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/magic-fixes-dca4ae4dac2441a8.yaml0000664000175000017500000000033700000000000025552 0ustar00zuulzuul00000000000000--- fixes: - Refactor ``OpenStackConfig._fix_backward_madness()`` into ``OpenStackConfig.magic_fixes()`` that allows subclasses to inject more fixup magic into the flow during ``get_one_cloud()`` processing. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/make-cloud-region-standalone-848a2c4b5f3ebc29.yaml0000664000175000017500000000037100000000000030665 0ustar00zuulzuul00000000000000--- features: - | Updated the ``openstack.config.cloud_config.CloudRegion`` object to be able to store and retreive cache settings and the password callback object without needing an ``openstack.config.loader.OpenStackConfig`` object. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/make-rest-client-dd3d365632a26fa0.yaml0000664000175000017500000000022000000000000026277 0ustar00zuulzuul00000000000000--- deprecations: - Renamed session_client to make_rest_client. session_client will continue to be supported for backwards compatability. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/make-rest-client-version-discovery-84125700f159491a.yaml0000664000175000017500000000032000000000000031477 0ustar00zuulzuul00000000000000--- features: - Add version argument to make_rest_client and plumb version discovery through get_session_client so that versioned endpoints are properly found if unversioned are in the catalog. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/make_object_metadata_easier.yaml-e9751723e002e06f.yaml0000664000175000017500000000036600000000000031411 0ustar00zuulzuul00000000000000--- features: - create_object() now has a "metadata" parameter that can be used to create an object with metadata of each key and value pair in that dictionary - Add an update_object() function that updates the metadata of a swift object ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/merge-shade-os-client-config-29878734ad643e33.yaml0000664000175000017500000000014700000000000030267 0ustar00zuulzuul00000000000000--- other: - The shade and os-client-config libraries have been merged into python-openstacksdk. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/meta-passthrough-d695bff4f9366b65.yaml0000664000175000017500000000041200000000000026466 0ustar00zuulzuul00000000000000--- features: - Added a parameter to create_image 'meta' which allows for providing parameters to the API that will not have any type conversions performed. For the simple case, the existing kwargs approach to image metadata is still the best bet. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/metadata-key-name-bugfix-77612a825c5145d7.yaml0000664000175000017500000000020500000000000027500 0ustar00zuulzuul00000000000000--- fixes: - Fixed a bug related to metadata's key name. An exception was raised when setting it to "delete"," clear" or "key" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/min-max-legacy-version-301242466ddefa93.yaml0000664000175000017500000000147300000000000027365 0ustar00zuulzuul00000000000000--- features: - Add min_version and max_version to get_legacy_client and to get_session_endpoint. At the moment this is only really fully plumbed through for cinder, which has extra special fun around volume, volumev2 and volumev3. Min and max versions to both methods will look through the options available in the service catalog and try to return the latest one available from the span of requested versions. This means a user can say volume_api_version=None, min_version=2, max_version=3 will get an endpoint from get_session_endpoint or a Client from cinderclient that will be either v2 or v3 but not v1. In the future, min and max version for get_session_endpoint should be able to sort out appropriate endpoints via version discovery, but that does not currently exist. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/mtu-settings-8ce8b54d096580a2.yaml0000664000175000017500000000033600000000000025544 0ustar00zuulzuul00000000000000--- features: - | create_network now exposes the mtu api option in accordance to network v2 api. This allows the operator to adjust the given MTU value which is needed in various complex network deployments. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/multiple-updates-b48cc2f6db2e526d.yaml0000664000175000017500000000101300000000000026602 0ustar00zuulzuul00000000000000--- features: - Removed unneeded calls that were made when deleting servers with floating ips. - Added pagination support for volume listing. upgrade: - Removed designateclient as a dependency. All designate operations are now performed with direct REST calls using keystoneauth Adapter. - Server creation calls are now done with direct REST calls. fixes: - Fixed a bug related to neutron endpoints that did not have trailing slashes. - Fixed issue with ports not having a created_at attribute. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/munch-sub-dict-e1619c71c26879cb.yaml0000664000175000017500000000016300000000000025724 0ustar00zuulzuul00000000000000--- fixes: - | Fixed a regression with sub-dicts of server objects were not usable with object notation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/nat-source-field-7c7db2a724616d59.yaml0000664000175000017500000000046500000000000026245 0ustar00zuulzuul00000000000000--- features: - Added nat_source flag for networks. In some more complex clouds there can not only be more than one valid network on a server that NAT can attach to, there can also be more than one valid network from which to get a NAT address. Allow flagging a network so that it can be found. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/nat-source-support-92aaf6b336d0b848.yaml0000664000175000017500000000015200000000000026742 0ustar00zuulzuul00000000000000--- features: - Add support for networks being configured as the primary nat_source in clouds.yaml. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/net_provider-dd64b697476b7094.yaml0000664000175000017500000000012100000000000025524 0ustar00zuulzuul00000000000000--- features: - Network provider options are now accepted in create_network(). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/network-add-tap-mirror-46376bd98ee69c81.yaml0000664000175000017500000000020700000000000027427 0ustar00zuulzuul00000000000000--- features: - | Add ``Tap Mirror`` and introduce the support for creating, reading, updating and deleting ``tap_mirrors``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/network-data-bd94e4a499ba3e0d.yaml0000664000175000017500000000017300000000000025714 0ustar00zuulzuul00000000000000--- fixes: - | Fixes ``openstack.baremetal.configdrive.build`` to actually handle the ``network_data`` argument. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/network-data-deb5772edc111428.yaml0000664000175000017500000000032700000000000025550 0ustar00zuulzuul00000000000000--- features: - | Adds support for `network_data `_ when building baremetal configdrives. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/network-list-e6e9dafdd8446263.yaml0000664000175000017500000000065000000000000025711 0ustar00zuulzuul00000000000000--- features: - Support added for configuring metadata about networks for a cloud in a list of dicts, rather than in the external_network and internal_network entries. The dicts support a name, a routes_externally field, a nat_destination field and a default_interface field. deprecations: - external_network and internal_network are deprecated and should be replaced with the list of network dicts. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/network-qos-rule-filter-keys-324e3222510fd362.yaml0000664000175000017500000000050300000000000030403 0ustar00zuulzuul00000000000000--- features: - | Added two filtering keys to ``QoSRuleType`` class query mapping, used for filtering the "list" command: "all_rules", to list all network QoS rule types implemented in Neutron, and "all_supported", to list all network QoS rule types supported by at least one networking mechanism driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/network-quotas-b98cce9ffeffdbf4.yaml0000664000175000017500000000030000000000000026631 0ustar00zuulzuul00000000000000--- features: - Add new APIs, OperatorCloud.get_network_quotas(), OperatorCloud.set_network_quotas() and OperatorCloud.delete_network_quotas() to manage neutron quotas for projects and users././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=openstacksdk-4.0.0/releasenotes/notes/network-security-group-query-parameter-id-f6dda45b2c09dbaa.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/network-security-group-query-parameter-id-f6dda45b2c09dbaa.yam0000664000175000017500000000053300000000000033467 0ustar00zuulzuul00000000000000--- features: - | The ``id`` field was added a query parameter for security_groups. A single security group id, or a list of security group ids can be passed. For example:: conn.network.security_groups(id=['f959e85a-1a87-4b5c-ae56-dc917ceeb584', 'a55c0100-7ded-40af-9c61-1d1b9a9c2692']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/network_add_bgp_resources-c182dc2873d6db18.yaml0000664000175000017500000000077000000000000030402 0ustar00zuulzuul00000000000000--- features: - | Add BGP Speaker and BGP Peer resources, and introduce support for CRUD operations for these. Additional REST operations introduced for speakers: add_bgp_peer, remove_bgp_peer, add_gateway_network, remove_gateway_network, get_advertised_routes, get_bgp_dragents, add_bgp_speaker_to_draget, remove_bgp_speaker_from_dragent. One new REST method is added to agents to cover the features of Dynamic Routing Agents schedulers: get_bgp_speakers_hosted_by_dragent ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/network_add_bgpvpn_resources-b3bd0b568c3c99db.yaml0000664000175000017500000000027700000000000031267 0ustar00zuulzuul00000000000000--- features: - | Add BGPVPN, BGPVPN Network Association, BGPVPN Port Association, and BGPVPN Router Association resources and introduce support for CRUD operations for these. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/network_add_sfc_resources-8a52c0c8c1f8e932.yaml0000664000175000017500000000026600000000000030406 0ustar00zuulzuul00000000000000--- features: - | Add SFC resources: FlowClassifier, PortChain, PortPair, PortPairGroup and ServiceGraph resources and introduce support for CRUD operations for these. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/network_add_taas_resources-86a947265e11ce84.yaml0000664000175000017500000000020100000000000030420 0ustar00zuulzuul00000000000000--- features: - | Add ``Tap Service`` and ``Tap Flow`` resources, and introduce support for CRUD operations for these. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/neutron-discovery-54399116d5f810ee.yaml0000664000175000017500000000020000000000000026510 0ustar00zuulzuul00000000000000--- fixes: - | Added workaround for using neutron on older clouds where the version discovery document requires auth. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/neutron_availability_zone_extension-675c2460ebb50a09.yaml0000664000175000017500000000047600000000000032444 0ustar00zuulzuul00000000000000--- features: - | ``availability_zone_hints`` now accepted for ``create_network()`` when ``network_availability_zone`` extension is enabled on target cloud. - | ``availability_zone_hints`` now accepted for ``create_router()`` when ``router_availability_zone`` extension is enabled on target cloud. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/new-floating-attributes-213cdf5681d337e1.yaml0000664000175000017500000000017500000000000027650 0ustar00zuulzuul00000000000000--- features: - Added support for created_at, updated_at, description and revision_number attributes for floating ips. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/no-import-fallback-a09b5d5a11299933.yaml0000664000175000017500000000021700000000000026472 0ustar00zuulzuul00000000000000--- upgrade: - | Image upload will no longer fall back to attempting to use the import workflow if the initial upload does not work. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/no-inspect-associated-563e272785bb6016.yaml0000664000175000017500000000026100000000000027127 0ustar00zuulzuul00000000000000--- fixes: - | Machine inspection is now blocked for machines associated with an instance. This is to avoid "stealing" a machine from under a provisioner (e.g. Nova). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/no-more-troveclient-0a4739c21432ac63.yaml0000664000175000017500000000030500000000000026703 0ustar00zuulzuul00000000000000--- upgrade: - troveclient is no longer a hard dependency. Users who were using shade to construct a troveclient Client object should use os_client_config.make_legacy_client instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/no-start-task-manager-56773f3ea5eb3a59.yaml0000664000175000017500000000031500000000000027300 0ustar00zuulzuul00000000000000--- fixes: - | Fixed a regression in the new `TaskManager` code which caused programs that were passing in a `TaskManager` that they had been running `start` on to fail due to a double call. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/node-boot-devices-2ab4991d75a2ab52.yaml0000664000175000017500000000035000000000000026446 0ustar00zuulzuul00000000000000--- features: - | Adds ``get_boot_device`` and ``get_supported_boot_devices`` to ``openstack.baremetal.v1.Node``. - | Adds ``get_node_boot_device`` and ``get_node_supported_boot_devices`` to the baremetal Proxy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/node-consoles-63589f22da98a689.yaml0000664000175000017500000000034500000000000025612 0ustar00zuulzuul00000000000000--- features: - | Adds ``get_console`` and ``set_console_state`` to ``openstack.baremetal.v1.Node``. - | Adds ``get_node_console``, ``enable_node_console`` and ``disable_node_console`` to the baremetal Proxy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/node-create-027ea99193f344ef.yaml0000664000175000017500000000065100000000000025273 0ustar00zuulzuul00000000000000--- upgrade: - | Changes the baremetal ``create_node`` call to be closer to how Ironic behaves. If no provision state is requested, the default state of the current microversion is used (which usually means ``enroll``). If the ``available`` state is requested, the node does not go through cleaning (it won't work without creating ports), an old API version is used to achieve this provision state. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/node-inject-nmi-53d12681026e0b6c.yaml0000664000175000017500000000021300000000000025753 0ustar00zuulzuul00000000000000--- features: - | Adds ``inject_nmi`` ``openstack.baremetal.v1.Node``. - | Adds ``inject_nmi_to_node`` to the baremetal Proxy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/node-owner-7f4b083ff9da8cce.yaml0000664000175000017500000000042300000000000025460 0ustar00zuulzuul00000000000000--- features: - | The ``openstack.baremetal.v1.Node`` resource now has an ``owner`` property which was added in the baremetal API `microversion 1.50`_. .. _microversion 1.50: https://docs.openstack.org/ironic/latest/contributor/webapi-version-history.html#id7 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/node-set-provision-state-3472cbd81c47458f.yaml0000664000175000017500000000053100000000000027761 0ustar00zuulzuul00000000000000--- features: - | Adds ``set_provision_state`` and ``wait_for_provision_state`` to ``openstack.baremetal.v1.Node``. - | Adds ``node_set_provision_state`` and ``wait_for_nodes_provision_state`` to the baremetal Proxy. - | The ``node_set_provision_state`` call now supports provision states up to the Queens release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/norm_role_assignments-a13f41768e62d40c.yaml0000664000175000017500000000017200000000000027475 0ustar00zuulzuul00000000000000--- fixes: - Role assignments were being returned as plain dicts instead of Munch objects. This has been corrected. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/normalize-images-1331bea7bfffa36a.yaml0000664000175000017500000000041300000000000026620 0ustar00zuulzuul00000000000000--- features: - Image dicts that are returned are now normalized across glance v1 and glance v2. Extra key/value properties are now both in the root dict and in a properties dict. Additionally, cloud and region have been added like they are for server. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/normalize-machine-290d9f2a3b3a7ef0.yaml0000664000175000017500000000013700000000000026630 0ustar00zuulzuul00000000000000--- fixes: - | Fixes normalization of bare metal machines in the ``patch_machine`` call. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/nova-flavor-to-rest-0a5757e35714a690.yaml0000664000175000017500000000022300000000000026555 0ustar00zuulzuul00000000000000--- upgrade: - Nova flavor operations are now handled via REST calls instead of via novaclient. There should be no noticable difference. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/nova-old-microversion-5e4b8e239ba44096.yaml0000664000175000017500000000030700000000000027332 0ustar00zuulzuul00000000000000--- upgrade: - Nova microversion is being requested. Since shade is not yet actively microversion aware, but has been dealing with the 2.0 structures anyway, this should not affect anyone. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/object-checksum-generation-ea1c1e47d2290054.yaml0000664000175000017500000000010500000000000030251 0ustar00zuulzuul00000000000000--- features: - Add flag for disabling object checksum generation ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/object-chunked-data-ee619b7d4759b8d2.yaml0000664000175000017500000000030700000000000026765 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue where passing an iterator to the ``data`` parameter of ``create_object`` for chunked uploads failed due to attempting to calculate the length of the data. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/object-search-a5f5ec4b2df3e045.yaml0000664000175000017500000000027600000000000026023 0ustar00zuulzuul00000000000000--- features: - | Objects are now searchable both with a JMESPath expression or a dict of object attributes via the ``openstack.connection.Connection.search_object`` function. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/old-placement-4b3c34abb8fe7b81.yaml0000664000175000017500000000026100000000000026031 0ustar00zuulzuul00000000000000--- fixes: - | Workaround an issue using openstacksdk with older versions of the placement service that are missing a status field in their version discovery doc. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/optimize-server-console-1d27c107b9a1cdc3.yaml0000664000175000017500000000034600000000000030023 0ustar00zuulzuul00000000000000--- features: - | Optimizes compute server console creation by adding older get_server_console method to the server and create_console proxy method calling appropriate method depending on the supported microversion. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/option-precedence-1fecab21fdfb2c33.yaml0000664000175000017500000000040400000000000027034 0ustar00zuulzuul00000000000000--- fixes: - Reverse the order of option selction in ``OpenStackConfig._validate_auth()`` to prefer auth options passed in (from argparse) over those found in clouds.yaml. This allows the application to override config profile auth settings. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/port-device-profile-af91e25c45321691.yaml0000664000175000017500000000025500000000000026671 0ustar00zuulzuul00000000000000--- features: - | Add ``device_profile`` attribute to ``port`` resource. This parameter can be define during the port creation. This parameter is nullable string. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/power-wait-751083852f958cb4.yaml0000664000175000017500000000010500000000000025037 0ustar00zuulzuul00000000000000--- features: - | Support waiting for bare metal power states. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/project-cleanup-exclude-option-65cba962eaa5b61a.yaml0000664000175000017500000000031300000000000031326 0ustar00zuulzuul00000000000000--- features: - | Project cleanup now supports skipping specific resources, which will be kept as-is. Resource names are based on the resource registry names, e. g. "block_storage.volume". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/project-cleanup-swift-f67615e5c3ab8fd8.yaml0000664000175000017500000000033100000000000027467 0ustar00zuulzuul00000000000000--- features: - | Project cleanup now supports cleaning Swift (object-store). If supported by the server bulk deletion is used. Currently only filtering based on updated_at (last_modified) is supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/provision-state-negotiation-0155b4d0e932054c.yaml0000664000175000017500000000054200000000000030462 0ustar00zuulzuul00000000000000--- fixes: - | Fixes API version negotiation in the following bare metal node calls: * ``set_node_provision_state`` * ``set_node_power_state`` * ``patch_node`` Previously an unexpectingly low version could be negotiated, breaking certain features, for example calling the ``provide`` provisioning action with a node name. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/python-3.5-629817cec092d528.yaml0000664000175000017500000000050600000000000024650 0ustar00zuulzuul00000000000000--- fixes: - | openstacksdk does not test or support python2 as of 0.40, but the releases have still accidentally worked (except for 0.44 which was broken for python2). We're now explicitly marking releases as requiring >= 3.5 so that things don't attempt to install something that's bound to be broken. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/qos-min-pps-rule-52df1b150b1d3f68.yaml0000664000175000017500000000016200000000000026270 0ustar00zuulzuul00000000000000--- features: - | Added QoS minimum packet rate rule object and introduced support for CRUD operations. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/qos-port-network-policy-cab43faa0f8bc036.yaml0000664000175000017500000000016400000000000030123 0ustar00zuulzuul00000000000000--- features: - | ``qos_network_policy_id`` attribute support has been added to the network port resource ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/r1-cab94ae7d749a1ec.yaml0000664000175000017500000000315200000000000023716 0ustar00zuulzuul00000000000000--- prelude: > This is a first major release of OpenStackSDK. From now on interface can be considered stable and will also in future strictly follow SemVer model. This release includes work in ensuring methods and attribute naming are consistent across the code basis and first steps in implementing even more generalizations in the processing logic. Microversion support is now considered as stable and session will be established with the highest version supported by both client and server. upgrade: - | This release includes work in enforcing consistency of the cloud layer methods. Now they all return SDK resource objects where previously Munch objects could have been returned. This leads to few important facts: - Return object types of various cloud.XXX calls now rely on proxy layer functions and strictly return SDK resources. - Some attributes of various resources may be named differently to follow SDK attribute naming convention. - Returned objects may forbid setting attributes (read-only attributes). Mentioned changes are affecting Ansible modules (which rely on OpenStackSDK). Historically Ansible modules return to the Ansible engine whatever SDK returns to it. Under some conditions Ansible may decide to unset properties (if it decides it contain sensitive information). While this is correct SDK forbids setting of some attributes what leads to errors. This release is therefore marking incompatibility with OpenStack Ansible modules in R1.X.X and the work on fixing it is being done in R2.X.X of modules repository. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/r1-d4efe289ebf0cbcd.yaml0000664000175000017500000000377600000000000024074 0ustar00zuulzuul00000000000000--- prelude: > This is a final R1.0 release of the OpenStackSDK. A few technical issues caused us not to reach this milestone cleanly, therefore we decided to one more time explicitly log everything what should be considered as R1.0. For detailed list of changes please see individual release notes from 0.99.0 to 0.103.0. Most important changes are explicitly repeated here. There were issues with maintainability of multiple available access interfaces, which forced us to consider what we are able to maintain in the long run and what we can not. That means that certain things were dropped, which is why we are releasing this as a major release. R1.0 is considered as a first major release with corresponding promise regarding backwards-compatibility. features: - | Cloud layer is now consistently returning ``Resource`` class objects. Previously this was not always the case. - | API response caching is implemented deep inside the code which will minimize roundtrips for repeated requests. - | The majority of services were verified and adapted to the latest state of the API. - | Certain code reorganization to further help in code reduction has been made (metadata, tag and quota support moved to standalone common classes). upgrade: - | Cloud layer methods are returning ``Resource`` class objects instead of ``Munch`` objects. In some cases this cause renaming of the attributes. ``Resource`` class is ``Munch`` compatible and allows both dictionary and attribute base access. - | Some historical methods, which were never properly tested were dropped. deprecations: - | ``Munch`` is dropped as a dependency. The project has no releases since multiple years and was causing huge performance impact already during import. This has directly no negative imapct to SDK users (it now starts faster), but in the code we copied used ``Munch`` pieces. They are going to be consistently eliminated in next releases. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/rackspace-block-storage-v2-fe0dd69b9e037599.yaml0000664000175000017500000000032700000000000030211 0ustar00zuulzuul00000000000000--- upgrade: - | Rackspace Cloud's vendor profile has been updated to use v2 of the Block Storage API. This introduces an endpoint override for the service based on ``region_name`` and ``project_id``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/register-machine-72ac3e65a1ed55b1.yaml0000664000175000017500000000033500000000000026450 0ustar00zuulzuul00000000000000--- upgrade: - | The default behavior of the ``register_machine`` call has been modified to run cleaning by default, if enabled in Ironic. You can pass ``provision_state="enroll"/"manageable"`` to avoid it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/remote-address-group-id-6291816888cb3de7.yaml0000664000175000017500000000026200000000000027474 0ustar00zuulzuul00000000000000--- fixes: - | Fixes a regression sending an unsupported field ``remote_address_group_id`` when creating security groups with an older Neutron (introduced 0.53.0). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/remote-profile-100218d08b25019d.yaml0000664000175000017500000000050100000000000025635 0ustar00zuulzuul00000000000000--- features: - | Vendor profiles can now be fetched from an RFC 5785 compliant URL on a cloud, namely, ``https://example.com/.well-known/openstack/api``. A cloud can manage their own vendor profile and serve it from that URL, allowing a user to simply list ``https://example.com`` as the profile name. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/remove-auto-container-527f1807605b42c0.yaml0000664000175000017500000000031400000000000027144 0ustar00zuulzuul00000000000000--- upgrade: - | ``openstack.connection.Connection.create_object`` no longer creates a container if one doesn't exist. It is the user's responsibility to create a container before using it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/remove-block-store-details-classes-158ab1f46655320a.yaml0000664000175000017500000000031600000000000031576 0ustar00zuulzuul00000000000000--- deprecations: - | Requesting volumes or backups with details from block_storage will return objects of classes Volume and Backup correspondingly, instead of VolumeDetail and BackupDetail. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/remove-cloud-caching-layer-2b0384870a45e8a3.yaml0000664000175000017500000000050400000000000030112 0ustar00zuulzuul00000000000000--- upgrade: - | The cloud-layer caching functionality has been removed in favour of the proxy-layer caching functionality first introduced in openstacksdk 1.0.0. This migration to proxy-layer caching was designed to be transparent to end-users and there should be no user-facing impact from this removal. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/remove-magnumclient-875b3e513f98f57c.yaml0000664000175000017500000000016700000000000027076 0ustar00zuulzuul00000000000000--- upgrade: - magnumclient is no longer a direct dependency as magnum API calls are now made directly via REST. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/remove-metric-fe5ddfd52b43c852.yaml0000664000175000017500000000021500000000000026071 0ustar00zuulzuul00000000000000--- upgrade: - | Removed the metric service. It is not an OpenStack service and does not have an entry in service-types-authority. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/remove-novaclient-3f8d4db20d5f9582.yaml0000664000175000017500000000022600000000000026616 0ustar00zuulzuul00000000000000--- upgrade: - All Nova interactions are done via direct REST calls. python-novaclient is no longer a direct dependency of openstack.cloud. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/remove-serverdetails-resource-f66cb278b224627d.yaml0000664000175000017500000000023100000000000031066 0ustar00zuulzuul00000000000000--- deprecations: - | Listing servers with details `servers(details=True)` will return instances of the Server class instead of ServerDetails. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/removed-deprecated-things-8700fe3592c3bf18.yaml0000664000175000017500000000021600000000000030125 0ustar00zuulzuul00000000000000--- upgrade: - | In anticipation of the upcoming 1.0 release, all the things that have been marked as deprecated have been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/removed-glanceclient-105c7fba9481b9be.yaml0000664000175000017500000000223100000000000027317 0ustar00zuulzuul00000000000000--- prelude: > The ``shade`` and ``os-client-config`` libraries have been merged in to openstacksdk. As a result, their functionality is being integrated into the sdk functionality, and in some cases is replacing exisiting things. The ``openstack.profile.Profile`` and ``openstack.auth.base.BaseAuthPlugin`` classes are no more. Profile has been replace by ``openstack.config.cloud_region.CloudRegion`` from `os-client-config `_ ``openstack.auth.base.BaseAuthPlugin`` has been replaced with the Auth plugins from keystoneauth. Service proxy names on the ``openstack.connection.Connection`` are all based on the official names from the OpenStack Service Types Authority. ``openstack.proxy.Proxy`` is now a subclass of ``keystoneauth1.adapter.Adapter``. Removed local logic that duplicates keystoneauth logic. This means every proxy also has direct REST primitives available. .. code-block:: python connection = connection.Connection() servers = connection.compute.servers() server_response = connection.compute.get('/servers') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/removed-meter-6f6651b6e452e000.yaml0000664000175000017500000000024300000000000025556 0ustar00zuulzuul00000000000000--- upgrade: - | Meter and Alarm services have been removed. The Ceilometer REST API has been deprecated for quite some time and is no longer supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/removed-profile-437f3038025b0fb3.yaml0000664000175000017500000000046500000000000026103 0ustar00zuulzuul00000000000000--- upgrade: - The Profile object has been replaced with the use of CloudRegion objects from openstack.config. - The openstacksdk specific Session object has been removed. - Proxy objects are now subclasses of keystoneauth1.adapter.Adapter. - REST interactions all go through TaskManager now. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/removed-profile-b033d870937868a1.yaml0000664000175000017500000000020100000000000026027 0ustar00zuulzuul00000000000000--- upgrade: - | ``openstack.profile.Profile`` has been removed. ``openstack.config`` should be used directly instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/removed-swiftclient-aff22bfaeee5f59f.yaml0000664000175000017500000000023000000000000027526 0ustar00zuulzuul00000000000000--- upgrade: - Removed swiftclient as a dependency. All swift operations are now performed with direct REST calls using keystoneauth Adapter. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/rename-base-proxy-b9fcb22d373864a2.yaml0000664000175000017500000000023600000000000026510 0ustar00zuulzuul00000000000000--- deprecations: - | `openstack.proxy.BaseProxy` has been renamed to `openstack.proxy.Proxy`. A ``BaseProxy`` class remains for easing transition. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/rename-resource-methods-5f2a716b08156765.yaml0000664000175000017500000000106500000000000027474 0ustar00zuulzuul00000000000000--- upgrade: - | ``openstack.resource.Resource.get`` has been renamed to ``openstack.resource.Resource.fetch`` to prevent conflicting with a ``dict`` method of the same name. While most consumer code is unlikely to call this method directly, this is a breaking change. - | ``openstack.resource.Resource.update`` has been renamed to ``openstack.resource.Resource.commit`` to prevent conflicting with a ``dict`` method of the same name. While most consumer code is unlikely to call this method directly, this is a breaking change. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/rename-service-force-down-6f462d62959a5315.yaml0000664000175000017500000000052100000000000027707 0ustar00zuulzuul00000000000000--- upgrade: - | compute.force_service_down function is renamed to update_service_forced_down to better fit the operation meaning. - | compute.v2.service.force_down is renamed to set_forced_down to fit the operation meaning. - | return of compute.service modification operations is changed to be the service itself ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/renamed-bare-metal-b1cdbc52af14e042.yaml0000664000175000017500000000013600000000000026710 0ustar00zuulzuul00000000000000--- upgrade: - Renamed bare-metal to baremetal to align with the official service type. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/renamed-block-store-bc5e0a7315bfeb67.yaml0000664000175000017500000000021000000000000027133 0ustar00zuulzuul00000000000000--- upgrade: - The block_store service object has been renamed to block_storage to align the API with the official service types. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/renamed-cluster-743da6d321fffcba.yaml0000664000175000017500000000013400000000000026454 0ustar00zuulzuul00000000000000--- upgrade: - Renamed cluster to clustering to align with the official service type. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/renamed-telemetry-c08ae3e72afca24f.yaml0000664000175000017500000000013100000000000027000 0ustar00zuulzuul00000000000000--- upgrade: - Renamed telemetry to meter to align with the official service type. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/replace-appdirs-with-platformdirs-d3f5bcbe726b7829.yaml0000664000175000017500000000054400000000000031775 0ustar00zuulzuul00000000000000--- upgrade: - | The ``appdirs`` dependency is replaced by a requirement for ``platformdirs`` 3.0.0 or later. Users on macOS may need to move configuration files to ``*/Library/Application Support``. See its release notes for further details: https://platformdirs.readthedocs.io/en/latest/changelog.html#platformdirs-3-0-0-2023-02-06 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/request-stats-9d70480bebbdb4d6.yaml0000664000175000017500000000016500000000000026136 0ustar00zuulzuul00000000000000--- features: - | Added support for collecting and reporting stats on calls made to statsd and prometheus. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/resource-find-filter-by-name-e647e5c507ff4b6c.yaml0000664000175000017500000000047300000000000030625 0ustar00zuulzuul00000000000000--- other: - | ``openstack.resource.Resource.find`` now can use the database back-end to filter by name. If the resource class has "name" in the query parameters, this function will add this filter parameter in the "list" command, instead of retrieving the whole list and then manually filtering. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/resource2-migration-835590b300bef621.yaml0000664000175000017500000000066100000000000026705 0ustar00zuulzuul00000000000000--- upgrade: - | The ``Resource2`` and ``Proxy2`` migration has been completed. The original ``Resource`` and ``Proxy`` clases have been removed and replaced with ``Resource2`` and ``Proxy2``. deprecations: - | The ``shade`` functionality that has been merged in to openstacksdk is found in ``openstack.cloud`` currently. None of these interfaces should be relied upon as the merge has not yet completed. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=openstacksdk-4.0.0/releasenotes/notes/retrieve-detailed-view-for-find-proxy-methods-947a3280732c448a.yaml 22 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/retrieve-detailed-view-for-find-proxy-methods-947a3280732c448a0000664000175000017500000000076100000000000032666 0ustar00zuulzuul00000000000000--- features: - | The following proxy ``find_*`` operations will now retrieve a detailed resource by default when retrieving by name: * Block storage (v2) * ``find_volume`` * ``find_snapshot`` * ``find_backup`` * Block storage (v3) * ``find_volume`` * ``find_snapshot`` * ``find_backup`` * ``find_group`` * ``find_group_snapshot`` * Compute (v2) * ``find_image`` * ``find_server`` * ``find_hypervisor`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/revert-futurist-34acc42fd3f0e7f3.yaml0000664000175000017500000000042100000000000026474 0ustar00zuulzuul00000000000000--- upgrade: - | Removed the dependency on futurist, which isn't necessary. Users can still pass futurist executors if they want, as the API is the same, but if nothing is passed, ``concurrent.futures.ThreadPoolExecutor`` will be used as the default. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/rework-compute-hypervisor-a62f275a0fd1f074.yaml0000664000175000017500000000022700000000000030337 0ustar00zuulzuul00000000000000--- features: - | Compute Hypervisor resource and functions are reworked to comply 2.88 microversion with deprecating misleading attributes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/router-extraroute-atomic-1a0c84c3fd90ceb1.yaml0000664000175000017500000000013500000000000030267 0ustar00zuulzuul00000000000000--- features: - | Add support for methods of Neutron extension: ``extraroute-atomic``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/router_ext_gw-b86582317bca8b39.yaml0000664000175000017500000000016700000000000026000 0ustar00zuulzuul00000000000000--- fixes: - No longer fail in list_router_interfaces() if a router does not have the external_gateway_info key. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/sdk-helper-41f8d815cfbcfb00.yaml0000664000175000017500000000013500000000000025341 0ustar00zuulzuul00000000000000--- features: - Added helper method for constructing OpenStack SDK Connection objects. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/search_resource-b9c2f772e01d3b2c.yaml0000664000175000017500000000046700000000000026406 0ustar00zuulzuul00000000000000--- features: - | Add search_resources method implementing generic search interface accepting resource name (as "service.resource"), name_or_id and list of additional filters and returning 0 or many resources matching those. This interface is primarily designed to be used by Ansible modules. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/server-actions-microversion-support-f14b293d9c3d3d5e.yaml0000664000175000017500000000021300000000000032423 0ustar00zuulzuul00000000000000--- features: - | Server actions such as reboot and resize will now default to the latest microversion instead of 2.1 as before. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/server-create-error-id-66c698c7e633fb8b.yaml0000664000175000017500000000016500000000000027466 0ustar00zuulzuul00000000000000--- features: - server creation errors now include the server id in the Exception to allow people to clean up. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/server-security-groups-840ab28c04f359de.yaml0000664000175000017500000000024300000000000027640 0ustar00zuulzuul00000000000000--- features: - Add the `add_server_security_groups` and `remove_server_security_groups` functions to add and remove security groups from a specific server. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/service_enabled_flag-c917b305d3f2e8fd.yaml0000664000175000017500000000031000000000000027332 0ustar00zuulzuul00000000000000--- fixes: - Keystone service descriptions were missing an attribute describing whether or not the service was enabled. A new 'enabled' boolean attribute has been added to the service data. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/session-client-b581a6e5d18c8f04.yaml0000664000175000017500000000030600000000000026112 0ustar00zuulzuul00000000000000--- features: - Added kwargs and argparse processing for session_client. deprecations: - Renamed simple_client to session_client. simple_client will remain as an alias for backwards compat. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/set-bootable-volume-454a7a41e7e77d08.yaml0000664000175000017500000000015500000000000026765 0ustar00zuulzuul00000000000000--- features: - Added a ``set_volume_bootable`` call to allow toggling the bootable state of a volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/shade-helper-568f8cb372eef6d9.yaml0000664000175000017500000000013100000000000025612 0ustar00zuulzuul00000000000000--- features: - Added helper method for constructing shade OpenStackCloud objects. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/shade-into-connection-81191fb3d0ddaf6e.yaml0000664000175000017500000000023000000000000027471 0ustar00zuulzuul00000000000000--- features: - | All of the methods formerly part of the ``shade`` library have been added to the `openstack.connection.Connection`` object. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/shade-location-b0d2e5cae743b738.yaml0000664000175000017500000000052100000000000026116 0ustar00zuulzuul00000000000000--- upgrade: - | The base ``Resource`` field ``location`` is no longer drawn from the ``Location`` HTTP header, but is instead a dict containing information about cloud, domain and project. The location dict is a feature of shade objects and is being added to all objects as part of the alignment of shade and sdk. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/snap-updated_at-a46711b6160e3a26.yaml0000664000175000017500000000012300000000000026042 0ustar00zuulzuul00000000000000--- features: - Added support for the updated_at attribute for volume snapshots. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/stack-update-5886e91fd6e423bf.yaml0000664000175000017500000000015400000000000025556 0ustar00zuulzuul00000000000000--- features: - Implement update_stack to perform the update action on existing orchestration stacks. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/started-using-reno-242e2b0cd27f9480.yaml0000664000175000017500000000006300000000000026616 0ustar00zuulzuul00000000000000--- other: - Started using reno for release notes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/stateful-security-group-f32a78b9bbb49874.yaml0000664000175000017500000000011000000000000030003 0ustar00zuulzuul00000000000000--- features: - New stateful parameter can be used in security group ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/stop-using-tenant-id-42eb35139ba9eeff.yaml0000664000175000017500000000010500000000000027302 0ustar00zuulzuul00000000000000--- features: - | Stop sending tenant_id attribute to Neutron. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/story-2010784-21d23043155497f5.yaml0000664000175000017500000000221400000000000024650 0ustar00zuulzuul00000000000000--- upgrade: - | Many cloud administrators use universal cloud-wide credentials. This is supported in keystone via 'inherited' roles that can be applied cloud- or domain-wide. In previous releases, these credentials could not be usefully defined within ```clouds.yaml``` because ```clouds.yaml``` supports only specifying a single domain and project for auth purposes. This project or domain could not be overridden on the commandline. fixes: - | When some config settings are specified multiple times, the order of precendence has been changed to prefer command-line or env settings over those found in ```clouds.yaml```. The same reordering has been done when a setting is specified multiple times within ```clouds.yaml```; now a higher-level setting will take precedence over that specified within the auth section. Affected settings are: - ``domain_id`` - ``domain_name`` - ``user_domain_id`` - ``user_domain_name`` - ``project_domain_id`` - ``project_domain_name`` - ``auth-token`` - ``project_id`` - ``tenant_id`` - ``project_name`` - ``tenant_name`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/stream-object-6ecd43511dca726b.yaml0000664000175000017500000000014200000000000025756 0ustar00zuulzuul00000000000000--- features: - | Added ``stream_object`` method for getting object content in an iterator. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/stream-to-file-91f48d6dcea399c6.yaml0000664000175000017500000000011700000000000026101 0ustar00zuulzuul00000000000000--- features: - get_object now supports streaming output directly to a file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/strict-mode-d493abc0c3e87945.yaml0000664000175000017500000000035400000000000025413 0ustar00zuulzuul00000000000000--- features: - Added 'strict' mode, which is set by passing strict=True to the OpenStackCloud constructor. strict mode tells shade to only return values in resources that are part of shade's declared data model contract. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/strict-proxies-4a315f68f387ee89.yaml0000664000175000017500000000062700000000000026122 0ustar00zuulzuul00000000000000--- features: - | Added new option for Connection, ``strict_proxies``. When set to ``True``, Connection will throw a ``ServiceDiscoveryException`` if the endpoint for a given service doesn't work. This is useful for OpenStack services using sdk to talk to other OpenStack services where it can be expected that the deployer config is correct and errors should be reported immediately. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/support_stdin_image_upload-305c04fb2daeb32c.yaml0000664000175000017500000000032100000000000030705 0ustar00zuulzuul00000000000000--- features: - | Add support for creating image from STDIN (i.e. from OSC). When creating from STDIN however, no checksum verification is possible, and thus validate_checksum must be also set to False. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/swift-set-metadata-c18c60e440f9e4a7.yaml0000664000175000017500000000025100000000000026654 0ustar00zuulzuul00000000000000--- fixes: - | It is now possible to pass `metadata` parameter directly into the create_container, create_object object_store methods and will not be ignored. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/swift-upload-lock-d18f3d42b3a0719a.yaml0000664000175000017500000000035000000000000026503 0ustar00zuulzuul00000000000000--- fixes: - Fixed an issue where a section of code that was supposed to be resetting the SwiftService object was instead resetting the protective mutex around the SwiftService object leading to an exception of "__exit__" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/switch-coe-to-proxy-c18789ed27cc1d95.yaml0000664000175000017500000000023300000000000027032 0ustar00zuulzuul00000000000000--- features: - | Convert container_infrastructure_management cloud operations to rely fully on service proxy with all resource classes created. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/switch-nova-to-created_at-45b7b50af6a2d59e.yaml0000664000175000017500000000026700000000000030211 0ustar00zuulzuul00000000000000--- features: - The `created` field which was returned by the Nova API is now returned as `created_at` as well when not using strict mode for consistency with other models. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/switch-to-warnings-333955d19afc99ca.yaml0000664000175000017500000000040600000000000026732 0ustar00zuulzuul00000000000000--- upgrade: - | Warnings about deprecated behavior or deprecated/modified APIs are now raised using the ``warnings`` module, rather than the ``logging`` module. This allows users to filter these warnings or silence them entirely if necessary. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/task-manager-parameter-c6606653532248f2.yaml0000664000175000017500000000035000000000000027202 0ustar00zuulzuul00000000000000--- features: - | A new ``task_manager`` parameter to ``Connection`` has been added for passing a TaskManager object. This was present in shade and is used by nodepool, but was missing from the Connection constructor. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/toggle-port-security-f5bc606e82141feb.yaml0000664000175000017500000000060600000000000027341 0ustar00zuulzuul00000000000000--- features: - | Added a new property, 'port_security_enabled' which is a boolean to enable or disable port_secuirty during network creation. The default behavior will enable port security, security group and anti spoofing will act as before. When the attribute is set to False, security group and anti spoofing are disabled on the ports created on this network. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/unprocessed-2d75133911945869.yaml0000664000175000017500000000017600000000000025144 0ustar00zuulzuul00000000000000--- features: - | Supports fetching raw (unprocessed) introspection data from the bare metal introspection service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/unshelve-to-specific-host-84666d440dce4a73.yaml0000664000175000017500000000040300000000000030102 0ustar00zuulzuul00000000000000--- features: - | Add SDK support for Nova microversion 2.91. This microversion allows specifying a destination host to unshelve a shelve offloaded server. And availability zone can be set to None to unpin the availability zone of a server. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/update-role-property-b16e902e913c7b25.yaml0000664000175000017500000000021700000000000027172 0ustar00zuulzuul00000000000000--- features: - | Added the newly supported ``description`` parameter and the missing ``domain_id`` parameter to ``Role`` resource. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/update_endpoint-f87c1f42d0c0d1ef.yaml0000664000175000017500000000046600000000000026475 0ustar00zuulzuul00000000000000--- features: - Added update_endpoint as a new function that allows the user to update a created endpoint with new values rather than deleting and recreating that endpoint. This feature only works with keystone v3, with v2 it will raise an exception stating the feature is not available. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/update_workflow-ecdef6056ef2687b.yaml0000664000175000017500000000010500000000000026535 0ustar00zuulzuul00000000000000features: - | Added ``update_workflow`` to the workflow proxy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/use-interface-ip-c5cb3e7c91150096.yaml0000664000175000017500000000125600000000000026233 0ustar00zuulzuul00000000000000--- fixes: - shade now correctly does not try to attach a floating ip with auto_ip if the cloud has given a public IPv6 address and the calling context supports IPv6 routing. shade has always used this logic to determine the server 'interface_ip', but the auto floating ip was incorrectly only looking at the 'public_v4' value to determine whether the server needed additional networking. upgrade: - If your cloud presents a default split IPv4/IPv6 stack with a public v6 and a private v4 address and you have the expectation that auto_ip should procure a v4 floating ip, you need to set 'force_ipv4' to True in your clouds.yaml entry for the cloud. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/use-proxy-layer-dfc3764d52bc1f2a.yaml0000664000175000017500000000042100000000000026366 0ustar00zuulzuul00000000000000--- upgrade: - | Networking functions of the cloud layer return now resource objects `openstack.resource`. While those still implement Munch interface and are accessible as dictionary modification of an instance might be causing issues (i.e. forbidden). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/v4-fixed-ip-325740fdae85ffa9.yaml0000664000175000017500000000046300000000000025300 0ustar00zuulzuul00000000000000--- fixes: - | Re-added support for `v4-fixed-ip` and `v6-fixed-ip` in the `nics` parameter to `create_server`. These are aliaes for `fixed_ip` provided by novaclient which shade used to use. The switch to REST didn't include support for these aliases, resulting in a behavior regression. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/validate-machine-dcf528b8f587e3f0.yaml0000664000175000017500000000021200000000000026435 0ustar00zuulzuul00000000000000--- deprecations: - | The ``OpenStackCloud.validate_node`` call was deprecated in favor of ``OpenStackCloud.validate_machine``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/vendor-add-betacloud-03872c3485104853.yaml0000664000175000017500000000006000000000000026550 0ustar00zuulzuul00000000000000--- other: - Add betacloud region for Germany ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/vendor-add-limestonenetworks-99b2ffab9fc23b08.yaml0000664000175000017500000000013000000000000031126 0ustar00zuulzuul00000000000000--- other: - | Add Limestone Networks vendor info for us-dfw-1 and us-slc regions ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/vendor-update-betacloud-37dac22d8d91a3c5.yaml0000664000175000017500000000006300000000000027741 0ustar00zuulzuul00000000000000--- other: - Update betacloud region for Germany ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/vendor-updates-f11184ba56bb27cf.yaml0000664000175000017500000000022400000000000026160 0ustar00zuulzuul00000000000000--- other: - Add citycloud regions for Buffalo, Frankfurt, Karlskrona and Los Angles - Add new DreamCompute cloud and deprecate DreamHost cloud ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/version-command-70c37dd7f880e9ae.yaml0000664000175000017500000000015500000000000026350 0ustar00zuulzuul00000000000000--- features: - The installed version can now be quickly checked with ``python -m openstack version``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/version-discovery-a501c4e9e9869f77.yaml0000664000175000017500000000076600000000000026621 0ustar00zuulzuul00000000000000--- features: - Version discovery is now done via the keystoneauth library. shade still has one behavioral difference from default keystoneauth behavior, which is that shade will use a version it understands if it can find one even if the user has requested a different version. This change opens the door for shade to start being able to consume API microversions as needed. upgrade: - keystoneauth version 3.2.0 or higher is required because of version discovery. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/vol-updated_at-274c3a2bb94c8939.yaml0000664000175000017500000000012100000000000026002 0ustar00zuulzuul00000000000000--- features: - Added support for the updated_at attribute for volume objects. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/volume-quotas-5b674ee8c1f71eb6.yaml0000664000175000017500000000027400000000000026066 0ustar00zuulzuul00000000000000--- features: - Add new APIs, OperatorCloud.get_volume_quotas(), OperatorCloud.set_volume_quotas() and OperatorCloud.delete_volume_quotas() to manage cinder quotas for projects and users././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/volume-types-a07a14ae668e7dd2.yaml0000664000175000017500000000015100000000000025675 0ustar00zuulzuul00000000000000--- features: - Add support for listing volume types. - Add support for managing volume type access. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/volume-update-876e6540c8471440.yaml0000664000175000017500000000011400000000000025444 0ustar00zuulzuul00000000000000--- features: - | Added ``update_volume`` to the block storage proxy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/volume_connector-api-f001e6f5fc4d1688.yaml0000664000175000017500000000011500000000000027303 0ustar00zuulzuul00000000000000--- features: - | Adds support for the baremetal volume connector API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/wait-on-image-snapshot-27cd2eacab2fabd8.yaml0000664000175000017500000000044500000000000030014 0ustar00zuulzuul00000000000000--- features: - Adds a new pair of options to create_image_snapshot(), wait and timeout, to have the function wait until the image snapshot being created goes into an active state. - Adds a new function wait_for_image() which will wait for an image to go into an active state. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/wait-provision-state-no-fail-efa74dd39f687df8.yaml0000664000175000017500000000031700000000000030775 0ustar00zuulzuul00000000000000--- features: - | Adds an ability for the bare metal ``wait_for_nodes_provision_state`` call to return an object with nodes that succeeded, failed or timed out instead of raising an exception. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/wait_for_server-8dc8446b7c673d36.yaml0000664000175000017500000000013600000000000026307 0ustar00zuulzuul00000000000000--- features: - New wait_for_server() API call to wait for a server to reach ACTIVE status. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/wait_for_status_delete_callback_param-68d30161e23340bb.yaml0000664000175000017500000000057500000000000032535 0ustar00zuulzuul00000000000000--- features: - | The ``Resource.wait_for_status``, ``Resource.wait_for_delete``, and related proxy wrappers now accept a ``callback`` argument that can be used to pass a callback function. When provided, the wait function will attempt to retrieve a ``progress`` value from the resource in question and pass it to the callback function each time it iterates. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/wire-in-retries-10898f7bc81e2269.yaml0000664000175000017500000000042400000000000026053 0ustar00zuulzuul00000000000000--- features: - | Allows configuring Session's ``connect_retries`` and ``status_code_retries`` via the cloud configuration (options ``_connect_retries``, ``connect_retries``, ``_status_code_retries`` and ``status_code_retries``). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/workaround-transitive-deps-1e7a214f3256b77e.yaml0000664000175000017500000000100700000000000030401 0ustar00zuulzuul00000000000000--- fixes: - Added requests and Babel to the direct dependencies list to work around issues with pip installation, entrypoints and transitive dependencies with conflicting exclusion ranges. Packagers of shade do not need to add these two new requirements to shade's dependency list - they are transitive depends and should be satisfied by the other things in the requirements list. Both will be removed from the list again once the python client libraries that pull them in have been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/notes/xenapi-use-agent-ecc33e520da81ffa.yaml0000664000175000017500000000046000000000000026533 0ustar00zuulzuul00000000000000--- fixes: - | Updated the Rackspace vendor entry to use `"False"` for the value of `xenapi_use_agent` instead of `false`, because that's what the remote side expects. The recent update to use the Resource layer exposed the incorrect setting causing image uploads to Rackspace to fail. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.6214948 openstacksdk-4.0.0/releasenotes/source/0000775000175000017500000000000000000000000020220 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/2023.1.rst0000664000175000017500000000020200000000000021471 0ustar00zuulzuul00000000000000=========================== 2023.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/2023.2.rst0000664000175000017500000000020200000000000021472 0ustar00zuulzuul00000000000000=========================== 2023.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/2024.1.rst0000664000175000017500000000020200000000000021472 0ustar00zuulzuul00000000000000=========================== 2024.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.1 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.6214948 openstacksdk-4.0.0/releasenotes/source/_static/0000775000175000017500000000000000000000000021646 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000000000000000024117 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.6214948 openstacksdk-4.0.0/releasenotes/source/_templates/0000775000175000017500000000000000000000000022355 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/_templates/.placeholder0000664000175000017500000000000000000000000024626 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/conf.py0000664000175000017500000002124400000000000021522 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # oslo.config Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/openstacksdk' openstackdocs_use_storyboard = False # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2017, Various members of the OpenStack Foundation' # Release notes are version independent. # The short X.Y version. version = '' # The full version, including alpha/beta/rc tags. release = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'shadeReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( 'index', 'shadeReleaseNotes.tex', 'Shade Release Notes Documentation', 'Shade Developers', 'manual', ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ( 'index', 'shadereleasenotes', 'shade Release Notes Documentation', ['shade Developers'], 1, ) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( 'index', 'shadeReleaseNotes', 'shade Release Notes Documentation', 'shade Developers', 'shadeReleaseNotes', 'A client library for interacting with OpenStack clouds', 'Miscellaneous', ), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/index.rst0000664000175000017500000000041700000000000022063 0ustar00zuulzuul00000000000000============================ openstacksdk Release Notes ============================ .. toctree:: :maxdepth: 1 unreleased 2024.1 2023.2 2023.1 zed yoga xena wallaby victoria ussuri train stein rocky queens pike ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/ocata.rst0000664000175000017500000000023000000000000022034 0ustar00zuulzuul00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/pike.rst0000664000175000017500000000021700000000000021702 0ustar00zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/queens.rst0000664000175000017500000000022300000000000022247 0ustar00zuulzuul00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/rocky.rst0000664000175000017500000000022100000000000022074 0ustar00zuulzuul00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/stein.rst0000664000175000017500000000022100000000000022067 0ustar00zuulzuul00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/train.rst0000664000175000017500000000017600000000000022073 0ustar00zuulzuul00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/unreleased.rst0000664000175000017500000000015300000000000023100 0ustar00zuulzuul00000000000000============================ Current Series Release Notes ============================ .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/ussuri.rst0000664000175000017500000000020200000000000022276 0ustar00zuulzuul00000000000000=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/victoria.rst0000664000175000017500000000022000000000000022564 0ustar00zuulzuul00000000000000============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: unmaintained/victoria ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/wallaby.rst0000664000175000017500000000021400000000000022402 0ustar00zuulzuul00000000000000============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: unmaintained/wallaby ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/xena.rst0000664000175000017500000000020000000000000021675 0ustar00zuulzuul00000000000000========================= Xena Series Release Notes ========================= .. release-notes:: :branch: unmaintained/xena ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/yoga.rst0000664000175000017500000000020000000000000021701 0ustar00zuulzuul00000000000000========================= Yoga Series Release Notes ========================= .. release-notes:: :branch: unmaintained/yoga ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/releasenotes/source/zed.rst0000664000175000017500000000017400000000000021536 0ustar00zuulzuul00000000000000======================== Zed Series Release Notes ======================== .. release-notes:: :branch: unmaintained/zed ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/requirements.txt0000664000175000017500000000056600000000000017522 0ustar00zuulzuul00000000000000cryptography>=2.7 # BSD/Apache-2.0 decorator>=4.4.1 # BSD dogpile.cache>=0.6.5 # BSD iso8601>=0.1.11 # MIT jmespath>=0.9.0 # MIT jsonpatch!=1.20,>=1.16 # BSD keystoneauth1>=3.18.0 # Apache-2.0 netifaces>=0.10.4 # MIT os-service-types>=1.7.0 # Apache-2.0 pbr!=2.1.0,>=2.0.0 # Apache-2.0 platformdirs>=3 # MIT License PyYAML>=3.13 # MIT requestsexceptions>=1.2.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.1012437 openstacksdk-4.0.0/roles/0000775000175000017500000000000000000000000015353 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.6214948 openstacksdk-4.0.0/roles/deploy-clouds-config/0000775000175000017500000000000000000000000021401 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/roles/deploy-clouds-config/README.rst0000664000175000017500000000000000000000000023056 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.6214948 openstacksdk-4.0.0/roles/deploy-clouds-config/defaults/0000775000175000017500000000000000000000000023210 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/roles/deploy-clouds-config/defaults/main.yaml0000664000175000017500000000005400000000000025017 0ustar00zuulzuul00000000000000zuul_work_dir: "{{ zuul.project.src_dir }}" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.6214948 openstacksdk-4.0.0/roles/deploy-clouds-config/tasks/0000775000175000017500000000000000000000000022526 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/roles/deploy-clouds-config/tasks/main.yaml0000664000175000017500000000041200000000000024333 0ustar00zuulzuul00000000000000- name: Create OpenStack config dir ansible.builtin.file: dest: ~/.config/openstack state: directory recurse: true - name: Deploy clouds.yaml ansible.builtin.template: src: clouds.yaml.j2 dest: ~/.config/openstack/clouds.yaml mode: 0440 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.6214948 openstacksdk-4.0.0/roles/deploy-clouds-config/templates/0000775000175000017500000000000000000000000023377 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/roles/deploy-clouds-config/templates/clouds.yaml.j20000664000175000017500000000004600000000000026066 0ustar00zuulzuul00000000000000--- {{ cloud_config | to_nice_yaml }} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.6254966 openstacksdk-4.0.0/setup.cfg0000664000175000017500000000230000000000000016043 0ustar00zuulzuul00000000000000[metadata] name = openstacksdk summary = An SDK for building applications to work with OpenStack description_file = README.rst author = OpenStack author_email = openstack-discuss@lists.openstack.org home_page = https://docs.openstack.org/openstacksdk/ python_requires = >=3.8 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 [files] packages = openstack [entry_points] console_scripts = openstack-inventory = openstack.cloud.cmd.inventory:main [mypy] show_column_numbers = true show_error_context = true ignore_missing_imports = true follow_imports = skip incremental = true check_untyped_defs = true warn_unused_ignores = false exclude = (?x)( doc | examples | releasenotes ) [mypy-openstack.tests.unit.*] ignore_errors = true [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/setup.py0000664000175000017500000000136500000000000015746 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools setuptools.setup(setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/test-requirements.txt0000664000175000017500000000056700000000000020500 0ustar00zuulzuul00000000000000coverage!=4.4,>=4.0 # Apache-2.0 ddt>=1.0.1 # MIT fixtures>=3.0.0 # Apache-2.0/BSD hacking>=3.1.0,<4.0.0 # Apache-2.0 jsonschema>=3.2.0 # MIT oslo.config>=6.1.0 # Apache-2.0 oslotest>=3.2.0 # Apache-2.0 prometheus-client>=0.4.2 # Apache-2.0 requests-mock>=1.2.0 # Apache-2.0 statsd>=3.3.0 stestr>=1.0.0 # Apache-2.0 testscenarios>=0.4 # Apache-2.0/BSD testtools>=2.2.0 # MIT ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.6214948 openstacksdk-4.0.0/tools/0000775000175000017500000000000000000000000015367 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/tools/keystone_version.py0000664000175000017500000000533200000000000021352 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import pprint import sys from urllib import parse as urlparse import openstack.config def print_versions(r): if 'version' in r: for version in r['version']: print_version(version) if 'values' in r: for version in r['values']: print_version(version) if isinstance(r, list): for version in r: print_version(version) def print_version(version): if version['status'] in ('CURRENT', 'stable'): print( "\tVersion ID: {id} updated {updated}".format( id=version.get('id'), updated=version.get('updated') ) ) verbose = '-v' in sys.argv ran = [] for cloud in openstack.config.OpenStackConfig().get_all_clouds(): if cloud.name in ran: continue ran.append(cloud.name) # We don't actually need a compute client - but we'll be getting full urls # anyway. Without this SSL cert info becomes wrong. c = cloud.get_session_client('compute') endpoint = cloud.config['auth']['auth_url'] try: print(endpoint) r = c.get(endpoint).json() if verbose: pprint.pprint(r) except Exception as e: print(f"Error with {cloud.name}: {str(e)}") continue if 'version' in r: print_version(r['version']) url = urlparse.urlparse(endpoint) parts = url.path.split(':') if len(parts) == 2: path, port = parts else: path = url.path port = None stripped = path.rsplit('/', 2)[0] if port: stripped = f'{stripped}:{port}' endpoint = urlparse.urlunsplit( (url.scheme, url.netloc, stripped, url.params, url.query) ) print(f" also {endpoint}") try: r = c.get(endpoint).json() if verbose: pprint.pprint(r) except Exception: print("\tUnauthorized") continue if 'version' in r: print_version(r) elif 'versions' in r: print_versions(r['versions']) else: print(f"\n\nUNKNOWN\n\n{r}") else: print_versions(r['versions']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/tools/nova_version.py0000664000175000017500000000422200000000000020451 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import openstack.config ran = [] for cloud in openstack.config.OpenStackConfig().get_all_clouds(): if cloud.name in ran: continue ran.append(cloud.name) c = cloud.get_session_client('compute') try: raw_endpoint = c.get_endpoint() have_current = False endpoint = raw_endpoint.rsplit('/', 2)[0] print(endpoint) r = c.get(endpoint).json() except Exception: print("Error with %s" % cloud.name) continue for version in r['versions']: if version['status'] == 'CURRENT': have_current = True print( "\tVersion ID: {id} updated {updated}".format( id=version.get('id'), updated=version.get('updated') ) ) print("\tVersion Max: {max}".format(max=version.get('version'))) print( "\tVersion Min: {min}".format(min=version.get('min_version')) ) if not have_current: for version in r['versions']: if version['status'] == 'SUPPORTED': have_current = True print( "\tVersion ID: {id} updated {updated}".format( id=version.get('id'), updated=version.get('updated') ) ) print( "\tVersion Max: {max}".format(max=version.get('version')) ) print( "\tVersion Min: {min}".format( min=version.get('min_version') ) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/tools/print-services.py0000664000175000017500000001117400000000000020722 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import importlib import warnings import os_service_types from openstack import _log from openstack import service_description _logger = _log.setup_logging('openstack') _service_type_manager = os_service_types.ServiceTypes() def make_names(): imports = ['from openstack import service_description'] services = [] for service in _service_type_manager.services: service_type = service['service_type'] if service_type == 'ec2-api': # NOTE(mordred) It doesn't make any sense to use ec2-api # from openstacksdk. The credentials API calls are all calls # on identity endpoints. continue desc_class = _find_service_description_class(service_type) st = service_type.replace('-', '_') if desc_class.__module__ != 'openstack.service_description': base_mod, dm = desc_class.__module__.rsplit('.', 1) imports.append(f'from {base_mod} import {dm}') else: dm = 'service_description' dc = desc_class.__name__ services.append( "{st} = {dm}.{dc}(service_type='{service_type}')".format( st=st, dm=dm, dc=dc, service_type=service_type ), ) # Register the descriptor class with every known alias. Don't # add doc strings though - although they are supported, we don't # want to give anybody any bad ideas. Making a second descriptor # does not introduce runtime cost as the descriptors all use # the same _proxies dict on the instance. for alias_name in _get_aliases(st): if alias_name[-1].isdigit(): continue services.append(f'{alias_name} = {st}') services.append('') print("# Generated file, to change, run tools/print-services.py") for imp in sorted(imports): print(imp) print('\n') print("class ServicesMixin:\n") for service in services: if service: print(f" {service}") else: print() def _get_aliases(service_type, aliases=None): # We make connection attributes for all official real type names # and aliases. Three services have names they were called by in # openstacksdk that are not covered by Service Types Authority aliases. # Include them here - but take heed, no additional values should ever # be added to this list. # that were only used in openstacksdk resource naming. LOCAL_ALIASES = { 'baremetal': 'bare_metal', 'block_storage': 'block_store', 'clustering': 'cluster', } all_types = set(_service_type_manager.get_aliases(service_type)) if aliases: all_types.update(aliases) if service_type in LOCAL_ALIASES: all_types.add(LOCAL_ALIASES[service_type]) all_aliases = set() for alias in all_types: all_aliases.add(alias.replace('-', '_')) return all_aliases def _find_service_description_class(service_type): package_name = f'openstack.{service_type}'.replace('-', '_') module_name = service_type.replace('-', '_') + '_service' class_name = ''.join( [part.capitalize() for part in module_name.split('_')] ) # We have some exceptions :( # This should have been called 'shared-filesystem' if service_type == 'shared-file-system': class_name = 'SharedFilesystemService' try: import_name = '.'.join([package_name, module_name]) service_description_module = importlib.import_module(import_name) except ImportError as e: # ImportWarning is ignored by default. This warning is here # as an opt-in for people trying to figure out why something # didn't work. warnings.warn( f"Could not import {service_type} service description: {str(e)}", ImportWarning, ) return service_description.ServiceDescription # There are no cases in which we should have a module but not the class # inside it. service_description_class = getattr(service_description_module, class_name) return service_description_class make_names() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/tox.ini0000664000175000017500000001266400000000000015553 0ustar00zuulzuul00000000000000[tox] minversion = 4.3.0 envlist = pep8,py3 [testenv] description = Run unit tests. passenv = OS_* OPENSTACKSDK_* setenv = LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=C OS_LOG_CAPTURE={env:OS_LOG_CAPTURE:true} OS_STDOUT_CAPTURE={env:OS_STDOUT_CAPTURE:true} OS_STDERR_CAPTURE={env:OS_STDERR_CAPTURE:true} deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt commands = stestr run {posargs} stestr slowest [testenv:functional{,-py38,-py39,-py310,-py311,-py312}] description = Run functional tests. # Some jobs (especially heat) takes longer, therefore increase default timeout # This timeout should not be smaller, than the longest individual timeout setenv = {[testenv]setenv} OS_TEST_TIMEOUT=600 OPENSTACKSDK_FUNC_TEST_TIMEOUT_LOAD_BALANCER=600 OPENSTACKSDK_EXAMPLE_CONFIG_KEY=functional OPENSTACKSDK_FUNC_TEST_TIMEOUT_PROJECT_CLEANUP=1200 commands = stestr --test-path ./openstack/tests/functional/{env:OPENSTACKSDK_TESTS_SUBDIR:} run --serial {posargs} stestr slowest # Acceptance tests are the ones running on real clouds [testenv:acceptance-regular-user] description = Run acceptance tests. # This env intends to test functions of a regular user without admin privileges # Some jobs (especially heat) takes longer, therefore increase default timeout # This timeout should not be smaller, than the longest individual timeout setenv = {[testenv]setenv} OS_TEST_TIMEOUT=600 OPENSTACKSDK_FUNC_TEST_TIMEOUT_LOAD_BALANCER=600 # OPENSTACKSDK_DEMO_CLOUD and OS_CLOUD should point to the cloud to test # Othee clouds are explicitly set empty to let tests detect absense OPENSTACKSDK_DEMO_CLOUD_ALT= OPENSTACKSDK_OPERATOR_CLOUD= commands = stestr --test-path ./openstack/tests/functional/{env:OPENSTACKSDK_TESTS_SUBDIR:} run --serial {posargs} --include-list include-acceptance-regular-user.txt stestr slowest [testenv:pep8] description = Run style checks. skip_install = true deps = pre-commit commands = pre-commit run --all-files --show-diff-on-failure [testenv:venv] description = Run specified command in a virtual environment with all dependencies installed. deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt -r{toxinidir}/doc/requirements.txt commands = {posargs} [testenv:debug] description = Run specified tests through oslo_debug_helper, which allows use of pdb. # allow 1 year, or 31536000 seconds, to debug a test before it times out setenv = OS_TEST_TIMEOUT=31536000 allowlist_externals = find commands = find . -type f -name "*.pyc" -delete oslo_debug_helper -t openstack/tests {posargs} [testenv:cover] description = Run unit tests and generate coverage report. setenv = {[testenv]setenv} PYTHON=coverage run --source openstack --parallel-mode commands = stestr run {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml [testenv:ansible] description = Run ansible tests. # Need to pass some env vars for the Ansible playbooks passenv = HOME USER ANSIBLE_VAR_* deps = {[testenv]deps} ansible commands = {toxinidir}/extras/run-ansible-tests.sh -e {envdir} {posargs} [testenv:docs] description = Build documentation in HTML format. deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt commands = sphinx-build -W --keep-going -b html -j auto doc/source/ doc/build/html [testenv:pdf-docs] description = Build documentation in PDF format. deps = {[testenv:docs]deps} allowlist_externals = make commands = sphinx-build -W --keep-going -b latex -j auto doc/source/ doc/build/pdf make -C doc/build/pdf [testenv:releasenotes] description = Build release note documentation in HTML format. deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt commands = sphinx-build -W --keep-going -b html -j auto releasenotes/source releasenotes/build/html [flake8] application-import-names = openstack # The following are ignored on purpose. It's not super worth it to fix them. # However, if you feel strongly about it, patches will be accepted to fix them # if they fix ALL of the occurances of one and only one of them. # E203 Black will put spaces after colons in list comprehensions # E501 Black takes care of line length for us # E704 Black will occasionally put multiple statements on one line # H238 New Style Classes are the default in Python3 # H301 Black will put commas after imports that can't fit on one line # H4 Are about docstrings and there's just a huge pile of pre-existing issues. # W503 Is supposed to be off by default but in the latest pycodestyle isn't. # Also, both openstacksdk and Donald Knuth disagree with the rule. Line # breaks should occur before the binary operator for readability. ignore = E203, E501, E704, H301, H238, H4, W503 import-order-style = pep8 show-source = True exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,openstack/_services_mixin.py [flake8:local-plugins] extension = O300 = checks:assert_no_setupclass O310 = checks:assert_no_deprecated_exceptions paths = ./openstack/_hacking [doc8] extensions = .rst, .yaml ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1725296385.6254966 openstacksdk-4.0.0/zuul.d/0000775000175000017500000000000000000000000015450 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/zuul.d/acceptance-jobs.yaml0000664000175000017500000001610600000000000021361 0ustar00zuulzuul00000000000000--- - semaphore: name: acceptance-cleura max: 1 - job: name: openstacksdk-acceptance-base parent: openstack-tox description: | Acceptance test of the OpenStackSDK on real clouds. .. zuul:jobsvar::openstack_credentials :type: dict This is expected to be a Zuul Secret with these keys: .. zuul:jobvar: auth :type: dict Dictionary with authentication information with mandatory auth_url and others. The structure mimics `clouds.yaml` structure. By default all jobs that inherit from here are non voting. attempts: 1 voting: false timeout: 3600 pre-run: - playbooks/acceptance/pre.yaml post-run: - playbooks/acceptance/post.yaml vars: tox_envlist: acceptance-regular-user tox_environment: OPENSTACKSDK_DEMO_CLOUD: acceptance OS_CLOUD: acceptance OS_TEST_CLOUD: acceptance # Acceptance tests for devstack are different from running for real cloud since # we need to actually deploy devstack first and API is available only on the # devstack host. - job: name: openstacksdk-acceptance-devstack parent: openstacksdk-functional-devstack description: Acceptance test of the OpenStackSDK on real clouds. attempts: 1 run: - playbooks/acceptance/run-with-devstack.yaml post-run: - playbooks/acceptance/post.yaml vars: tox_envlist: acceptance-regular-user tox_environment: OPENSTACKSDK_DEMO_CLOUD: acceptance OS_CLOUD: acceptance OS_TEST_CLOUD: acceptance auth_url: "https://{{ hostvars['controller']['nodepool']['private_ipv4'] }}/identity" secrets: - secret: credentials-devstack name: openstack_credentials - job: name: openstacksdk-acceptance-cleura parent: openstacksdk-acceptance-base description: Acceptance tests of the OpenStackSDK on Cleura semaphores: - name: acceptance-cleura secrets: - secret: credentials-cleura name: openstack_credentials pass-to-parent: true # Devstack secret is not specifying auth_url because of how Zuul treats secrets. # Auth_url comes extra in the job vars and is being used if no auth_url in the # secret is present. - secret: name: credentials-devstack data: auth: username: demo password: secretadmin project_domain_id: default project_name: demo user_domain_id: default region_name: RegionOne verify: false # Contact: tobias [xdot] rydberg [mat] citynetwork [xdot] eu - secret: name: credentials-cleura data: auth: auth_url: https://fra1.citycloud.com:5000 user_domain_name: !encrypted/pkcs1-oaep - B2+GBOl0HqQJ0umGR/8y6Y1SS+O6h7OK6rTa54797UavexKVxx2RZ144wPmW+IogX2QU2 tWtGBveQnZTpI19nxlnLmQQA+YSz8RIzJoFuStBmiITyCHQnvRJPc7kObjnZJLuoVwCT2 Rl3u1iGzJb/ZZvVDjvYH2ZW7a6aH+Ct7HfB+CGhvhETeoMAFDgb29QJ5U/T3OkVdTMwCY XDtdwg2JvoErd2gnNCqYDcIiOMO6lXKcc+35VQtGMGfoaUvu+iMlEi9pJqbdVd7qz5lgY AWBPG1mYt1mOaP8RRvzywhyRPnnnFgfUe2rf2ZozEUa7j4ObwXt7D8oRYXm+USEpk+YfD 9V3CvGvAgmPuuidGWwnZdPcNX/w/VW5p9oWRgJFYChb5+XCu7y0tFJX/usduZEY9/MvJs Iv0+OFf1TXc29qFqwGYVSyfimBroGFdYXmHSwK7wHJ1GUsdSRhQz4eYIdk+6c4LNx9JgO 5Z+3Q29tlh9WwuuQKE/JlKJ/1I9LC0RmyJyxSaiTLDiL+7J2O/hULmyZimbXVcYuXqDdo KAdPryYhmWWyBFkZfUa88GxwVf+WDLQqXhv+CDGRusbW2opVvv6p7NUwLh9PPOGnRLsS2 y1fZDVtz60ZMp8MQPACYjlzvc2lF5Z1Cvskr3O9KbT27V7AyLXmU+tbMrDLpC0= project_domain_name: !encrypted/pkcs1-oaep - B2+GBOl0HqQJ0umGR/8y6Y1SS+O6h7OK6rTa54797UavexKVxx2RZ144wPmW+IogX2QU2 tWtGBveQnZTpI19nxlnLmQQA+YSz8RIzJoFuStBmiITyCHQnvRJPc7kObjnZJLuoVwCT2 Rl3u1iGzJb/ZZvVDjvYH2ZW7a6aH+Ct7HfB+CGhvhETeoMAFDgb29QJ5U/T3OkVdTMwCY XDtdwg2JvoErd2gnNCqYDcIiOMO6lXKcc+35VQtGMGfoaUvu+iMlEi9pJqbdVd7qz5lgY AWBPG1mYt1mOaP8RRvzywhyRPnnnFgfUe2rf2ZozEUa7j4ObwXt7D8oRYXm+USEpk+YfD 9V3CvGvAgmPuuidGWwnZdPcNX/w/VW5p9oWRgJFYChb5+XCu7y0tFJX/usduZEY9/MvJs Iv0+OFf1TXc29qFqwGYVSyfimBroGFdYXmHSwK7wHJ1GUsdSRhQz4eYIdk+6c4LNx9JgO 5Z+3Q29tlh9WwuuQKE/JlKJ/1I9LC0RmyJyxSaiTLDiL+7J2O/hULmyZimbXVcYuXqDdo KAdPryYhmWWyBFkZfUa88GxwVf+WDLQqXhv+CDGRusbW2opVvv6p7NUwLh9PPOGnRLsS2 y1fZDVtz60ZMp8MQPACYjlzvc2lF5Z1Cvskr3O9KbT27V7AyLXmU+tbMrDLpC0= project_name: !encrypted/pkcs1-oaep - IRSHyf964g3q7vHY08reyx69cGDLG/+kkEnZ4fs4qiwBw1RL1wKW3r3Omi1PLXDHHCHfC jlRrwvZh80CzG3nqt94WSiASjn4XvZtCV0++UZxCkdEs/2SXN1YYpBGLqotM91NhQHCpo Xu6KD7U8ckZgjAQFzV/rF7pnFSvzb14PQqBiQ4Ei7nFyrg6sW20ratjC+pBboUORPvPjG wuY/lt8kRXYnPlI/oeFngXMl/WD7z5k0kLwUcg/z9x3uF6b6xozR8Vzjal13RR7FU5Tu7 T5Qr8uREPHlK8aU90XnNrlJqIAfIFuAlmZCeckIMlVqGjGBekI2W/zPXhL/SjR2SNeTIl SwKfInnT0SfGqKTAjgPJAocZSNppt4ql1EsS3Rdp8SQ0EGW7pXs73svexNRhh4k1m7gM1 54OoyS2wtMaTR3Q3L92ZuT2DdxmPbvXThbRO5P2g0yDpp/HuWkQyHq9b1tZD+p7akU7p+ g8fIQFKFueFP0T6XszQSPySjjaTZOWd0CQC2oTlivcf7oZ4etp22Zh7IDCXWLX39C2LkF XLBaEa9LRxn1UwJ2bz2nUPjqDsOz2nRskC9Yz0XOOEKMokJ4POj+uac1iRfAf+hAGd9uE 7rNIp/7oV5ABOimJ5bgCI1SWAsz2F1lRq+bulzbONLmWfPik52bo/elXTxRais= username: !encrypted/pkcs1-oaep - bTHRzdAYEKXeFhrU3sBRN19ygO2t2zzXdeuB4DQq7Q+7VW7Apo8Vo1eaqpqjUnpI2jPG+ DJSg0ZG3tUsnRwwKo3N8RzwFNWj5wcUEtHjmFgMmLBvlv9Jv6OeN7R7AH7b21agTMTvwz X7hGWbYSEgDLn80uNTwcm4GVA0mycXDtIvZ4lPiCGkUJYav9++YbGYzDyiy2pBgVU0r5G 7GTO+cHQWUw+LL/scBijL4khLIxiHNgUNNfgAYOI/JQ720DxXSDF30SN8fRy0H0jl54Gr w0exl9QPBjI+o+qvFKq2Bni8dTp96MaC8pDxP/1/R8mEMYD2Ei3Ame1dfeUz7OgrQfpQv xlDSE/sM2/g0PG3YlpG+aCllZ1el2qM/B+pyq5JXf26swp1RdjehvUSIi3gQaqkC3qpRt 2FgZDKdHW6PYRmRlCphS5WK1otdCQEvyJ+s4QB4PooMcD4rqAf5hURGd5zr/aajqmEgXX eJKeLQrQD+4yJWeopcq7a66R3LeL07Dko2LWWlL6adGeQ5yd3eIZK7zwObTVE64DSbXDs 3UI8U6Qa3EMlrfEk8TXcK1QW2EM1JFiPBSm9e8zojTtg/caAyROXgn1T9qv0FKMcJZrOo Qt+n7vv1wkCSUoUEQFIadcMUn5EoXeTcRbjAOsRFN/OOh6+4jyNTh17cOC5dkc= password: !encrypted/pkcs1-oaep - FbeRKkCs2YlDYm944EUuUbY2mVcTwSgE00gMZokmXR2WjKqRsuLFpkOe9opndwqV1tUyj mxAGizoGlzI+Lg8VnS47zShM+UqgaNzC148iY+WBuLXAEoxS3c9Gxz03Gm/Q2Tu6MJoCG OY8JvQkq+pjwkV61sIawTfQRTZkwjFO8F/viSOuF75PDZthY5SuMN5MEJ8B8Ska0WNbjw Edo623gZnyZsPvZwnqnP+yK0HW0smohKkvjHPZ5SGFiQ0G3eTSHaL5wrYWbkcZ5Gb4UgX x1edebv0ata0fZ8nhIwTrDIVe9icuijuV1ZkvHMGPvB50fkup4/QyObx6QUhL6D0mXaK5 fIq+dgrzkvcoODrwpXvBVxjNYnM+DBeMbN0V8d4vDvsRPsWCxIenETse1gD0PJyXx29br /Vild1xO1JnxoU469fl/gzdntyoV/QaLDteLKMFJISAFuVrcCEUz63s37iKAy6LnCtv/J PjciFvc2OR0cGUC/an3xtmqi18GWcWdinaBA0+OEnArdOdSc79MTZnMifICAeCQ3yiEnA 001hbBrRYTHgitpo4gYJOFMVufhcfvq6yB9wi3MqvpKP8wGH2SyNz7y5Gy9zbUgQFsRP7 2h3LRDRCVGYBVgBLD5mcIMn93HddOko8Q8RO8qVZM13R39dgGAi0KMEhF3bpjA= # We define additional project entity not to handle acceptance jobs in # already complex enough general project entity. - project: post-review: jobs: - openstacksdk-acceptance-devstack - openstacksdk-acceptance-cleura ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/zuul.d/functional-jobs.yaml0000664000175000017500000003766100000000000021446 0ustar00zuulzuul00000000000000--- # Definitions of functional jobs - job: name: openstacksdk-functional-devstack-minimum parent: devstack-tox-functional description: | Minimum job for devstack-based functional tests post-run: playbooks/devstack/post.yaml roles: # NOTE: We pull in roles from the tempest repo for stackviz processing. - zuul: opendev.org/openstack/tempest required-projects: # These jobs will DTRT when openstacksdk triggers them, but we want to # make sure stable branches of openstacksdk never get cloned by other # people, since stable branches of openstacksdk are, well, not actually # things. - name: openstack/openstacksdk override-branch: master - name: openstack/os-client-config override-branch: master timeout: 9000 vars: devstack_localrc: Q_ML2_PLUGIN_EXT_DRIVERS: qos,port_security Q_AGENT: openvswitch Q_ML2_TENANT_NETWORK_TYPE: vxlan Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch devstack_services: # OVN services ovn-controller: false ovn-northd: false ovs-vswitchd: false ovsdb-server: false q-ovn-metadata-agent: false # Neutron services q-agt: true q-dhcp: true q-l3: true q-metering: true q-svc: true # sdk doesn't need vnc access n-cauth: false n-novnc: false # sdk testing uses config drive only n-api-meta: false q-meta: false tox_environment: # Do we really need to set this? It's cargo culted PYTHONUNBUFFERED: 'true' # Is there a way we can query the localconf variable to get these # rather than setting them explicitly? OPENSTACKSDK_HAS_DESIGNATE: 0 OPENSTACKSDK_HAS_HEAT: 0 OPENSTACKSDK_HAS_MAGNUM: 0 OPENSTACKSDK_HAS_NEUTRON: 1 OPENSTACKSDK_HAS_SWIFT: 1 tox_install_siblings: false tox_envlist: functional zuul_copy_output: '{{ ansible_user_dir }}/stackviz': logs zuul_work_dir: src/opendev.org/openstack/openstacksdk - job: name: openstacksdk-functional-devstack-base parent: openstacksdk-functional-devstack-minimum description: | Base job for devstack-based functional tests vars: devstack_plugins: neutron: https://opendev.org/openstack/neutron devstack_local_conf: post-config: $CINDER_CONF: DEFAULT: osapi_max_limit: 6 - job: name: openstacksdk-functional-devstack parent: openstacksdk-functional-devstack-base description: | Run openstacksdk functional tests against a master devstack required-projects: - openstack/heat vars: devstack_localrc: DISABLE_AMP_IMAGE_BUILD: true Q_SERVICE_PLUGIN_CLASSES: qos,trunk # TODO(frickler): drop this once heat no longer needs it KEYSTONE_ADMIN_ENDPOINT: true devstack_plugins: heat: https://opendev.org/openstack/heat tox_environment: OPENSTACKSDK_HAS_HEAT: 1 devstack_services: neutron-qos: true neutron-trunk: true neutron-port-forwarding: true - job: name: openstacksdk-functional-devstack-networking parent: openstacksdk-functional-devstack description: | Run openstacksdk functional tests against a devstack with advanced networking services enabled. required-projects: - openstack/designate - openstack/octavia vars: configure_swap_size: 4096 devstack_local_conf: post-config: $OCTAVIA_CONF: DEFAULT: debug: true controller_worker: amphora_driver: amphora_noop_driver compute_driver: compute_noop_driver network_driver: network_noop_driver certificates: cert_manager: local_cert_manager devstack_localrc: Q_SERVICE_PLUGIN_CLASSES: qos,trunk devstack_plugins: designate: https://opendev.org/openstack/designate octavia: https://opendev.org/openstack/octavia devstack_services: designate: true octavia: true o-api: true o-cw: true o-hm: true o-hk: true neutron-dns: true s-account: false s-container: false s-object: false s-proxy: false h-eng: false h-api: false h-api-cfn: false tox_environment: OPENSTACKSDK_HAS_DESIGNATE: 1 OPENSTACKSDK_HAS_SWIFT: 0 OPENSTACKSDK_HAS_HEAT: 0 - job: name: openstacksdk-functional-devstack-networking-ext parent: openstacksdk-functional-devstack-networking description: | Run openstacksdk functional tests against a devstack with super advanced networking services enabled (VPNaas, FWaas) which still require ovs. required-projects: - openstack/neutron-fwaas - openstack/neutron-vpnaas - openstack/tap-as-a-service vars: INSTALL_OVN: False configure_swap_size: 4096 devstack_local_conf: post-config: $OCTAVIA_CONF: DEFAULT: debug: true controller_worker: amphora_driver: amphora_noop_driver compute_driver: compute_noop_driver network_driver: network_noop_driver certificates: cert_manager: local_cert_manager $NEUTRON_CONF: DEFAULT: router_distributed: True l3_ha: True "/$NEUTRON_CORE_PLUGIN_CONF": ovs: tunnel_bridge: br-tun bridge_mappings: public:br-ex $NEUTRON_L3_CONF: DEFAULT: agent_mode: dvr_snat agent: availability_zone: nova debug_iptables_rules: True $NEUTRON_DHCP_CONF: agent: availability_zone: nova devstack_localrc: Q_SERVICE_PLUGIN_CLASSES: qos,trunk,taas NETWORK_API_EXTENSIONS: "agent,binding,dhcp_agent_scheduler,external-net,ext-gw-mode,extra_dhcp_opts,quotas,router,security-group,subnet_allocation,network-ip-availability,auto-allocated-topology,timestamp_core,tag,service-type,rbac-policies,standard-attr-description,pagination,sorting,project-id,fwaas_v2,vpnaas,taas,tap_mirror" Q_AGENT: openvswitch Q_ML2_TENANT_NETWORK_TYPE: vxlan Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch IPSEC_PACKAGE: libreswan TAAS_SERVICE_DRIVER: TAAS:TAAS:neutron_taas.services.taas.service_drivers.taas_rpc.TaasRpcDriver:default devstack_plugins: designate: https://opendev.org/openstack/designate octavia: https://opendev.org/openstack/octavia neutron-fwaas: https://opendev.org/openstack/neutron-fwaas.git neutron-vpnaas: https://opendev.org/openstack/neutron-vpnaas.git tap-as-a-service: https://opendev.org/openstack/tap-as-a-service.git devstack_services: designate: true octavia: true o-api: true o-cw: true o-hm: true o-hk: true neutron-dns: true s-account: false s-container: false s-object: false s-proxy: false h-eng: false h-api: false h-api-cfn: false q-fwaas-v2: true taas: true tap_mirror: true tox_environment: OPENSTACKSDK_HAS_DESIGNATE: 1 OPENSTACKSDK_HAS_SWIFT: 0 OPENSTACKSDK_HAS_HEAT: 0 - job: name: openstacksdk-functional-devstack-tips parent: openstacksdk-functional-devstack description: | Run openstacksdk functional tests with tips of library dependencies against a master devstack. required-projects: - openstack/keystoneauth - openstack/openstacksdk - openstack/os-client-config vars: tox_install_siblings: true - job: name: openstacksdk-functional-devstack-magnum parent: openstacksdk-functional-devstack description: | Run openstacksdk functional tests against a master devstack with magnum required-projects: - openstack/magnum - openstack/python-magnumclient vars: devstack_plugins: magnum: https://opendev.org/openstack/magnum devstack_localrc: MAGNUM_GUEST_IMAGE_URL: https://tarballs.openstack.org/magnum/images/fedora-atomic-f23-dib.qcow2 MAGNUM_IMAGE_NAME: fedora-atomic-f23-dib devstack_services: s-account: false s-container: false s-object: false s-proxy: false tox_environment: OPENSTACKSDK_HAS_SWIFT: 0 OPENSTACKSDK_HAS_MAGNUM: 1 - job: name: openstacksdk-functional-devstack-ironic parent: openstacksdk-functional-devstack-minimum description: | Run openstacksdk functional tests against a master devstack with ironic required-projects: - openstack/ironic - openstack/ironic-python-agent-builder vars: devstack_localrc: OVERRIDE_PUBLIC_BRIDGE_MTU: 1400 IRONIC_BAREMETAL_BASIC_OPS: true IRONIC_BUILD_DEPLOY_RAMDISK: false IRONIC_CALLBACK_TIMEOUT: 600 IRONIC_DEPLOY_DRIVER: ipmi IRONIC_RAMDISK_TYPE: tinyipa IRONIC_VM_COUNT: 2 IRONIC_VM_LOG_DIR: '{{ devstack_base_dir }}/ironic-bm-logs' IRONIC_VM_SPECS_RAM: 1024 devstack_plugins: ironic: https://opendev.org/openstack/ironic devstack_services: c-api: false c-bak: false c-sch: false c-vol: false cinder: false s-account: false s-container: false s-object: false s-proxy: false n-api: false n-api-meta: false n-cauth: false n-cond: false n-cpu: false n-novnc: false n-obj: false n-sch: false nova: false placement-api: false dstat: false tox_environment: OPENSTACKSDK_HAS_IRONIC: 1 # NOTE(dtantsur): this job cannot run many regular tests (e.g. compute # tests will take too long), so limiting it to baremetal tests only. OPENSTACKSDK_TESTS_SUBDIR: baremetal zuul_copy_output: '{{ devstack_base_dir }}/ironic-bm-logs': logs - job: name: openstacksdk-ansible-functional-devstack parent: openstacksdk-functional-devstack description: | Run openstacksdk ansible functional tests against a master devstack using released version of ansible. vars: tox_envlist: ansible - job: name: openstacksdk-ansible-stable-2.8-functional-devstack parent: openstacksdk-ansible-functional-devstack description: | Run openstacksdk ansible functional tests against a master devstack using git stable-2.8 branch version of ansible. required-projects: - name: github.com/ansible/ansible override-checkout: stable-2.8 - name: openstack/openstacksdk override-checkout: master - name: openstack/devstack override-checkout: master vars: # test-matrix grabs branch from the zuul branch setting. If the job # is triggered by ansible, that branch will be devel which doesn't # make sense to devstack. Override so that we run the right thing. test_matrix_branch: master tox_install_siblings: true - job: name: openstacksdk-ansible-stable-2.9-functional-devstack parent: openstacksdk-ansible-functional-devstack description: | Run openstacksdk ansible functional tests against a master devstack using git stable-2.9 branch version of ansible. required-projects: - name: github.com/ansible/ansible override-checkout: stable-2.9 - name: openstack/openstacksdk override-checkout: master - name: openstack/devstack override-checkout: master vars: # test-matrix grabs branch from the zuul branch setting. If the job # is triggered by ansible, that branch will be devel which doesn't # make sense to devstack. Override so that we run the right thing. test_matrix_branch: master tox_install_siblings: true - job: name: openstacksdk-functional-devstack-masakari parent: openstacksdk-functional-devstack-minimum description: | Run openstacksdk functional tests against a master devstack with masakari required-projects: - openstack/masakari - openstack/masakari-monitors vars: devstack_plugins: masakari: https://opendev.org/openstack/masakari devstack_services: masakari-api: true masakari-engine: true tox_environment: OPENSTACKSDK_HAS_MASAKARI: 1 OPENSTACKSDK_TESTS_SUBDIR: instance_ha zuul_copy_output: '{{ devstack_base_dir }}/masakari-logs': logs - job: name: openstacksdk-functional-devstack-manila parent: openstacksdk-functional-devstack-minimum description: | Run openstacksdk functional tests against a master devstack with manila required-projects: - openstack/manila - openstack/openstacksdk vars: devstack_localrc: # Set up manila with a fake driver - makes things super fast and should # have no impact on the API MANILA_INSTALL_TEMPEST_PLUGIN_SYSTEMWIDE: false SHARE_DRIVER: manila.tests.share.drivers.dummy.DummyDriver MANILA_CONFIGURE_GROUPS: alpha,beta,gamma,membernet MANILA_CONFIGURE_DEFAULT_TYPES: true MANILA_SERVICE_IMAGE_ENABLED: false MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL: 1 MANILA_SERVER_MIGRATION_PERIOD_TASK_INTERVAL: 10 MANILA_REPLICA_STATE_UPDATE_INTERVAL: 10 MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS: 'snapshot_support=True create_share_from_snapshot_support=True revert_to_snapshot_support=True mount_snapshot_support=True' MANILA_ENABLED_BACKENDS: alpha,beta,gamma MANILA_OPTGROUP_alpha_driver_handles_share_servers: false MANILA_OPTGROUP_alpha_replication_domain: DUMMY_DOMAIN MANILA_OPTGROUP_alpha_share_backend_name: ALPHA MANILA_OPTGROUP_alpha_share_driver: manila.tests.share.drivers.dummy.DummyDriver MANILA_OPTGROUP_beta_driver_handles_share_servers: false MANILA_OPTGROUP_beta_replication_domain: DUMMY_DOMAIN MANILA_OPTGROUP_beta_share_backend_name: BETA MANILA_OPTGROUP_beta_share_driver: manila.tests.share.drivers.dummy.DummyDriver MANILA_OPTGROUP_gamma_driver_handles_share_servers: true MANILA_OPTGROUP_gamma_network_config_group: membernet MANILA_OPTGROUP_gamma_share_backend_name: GAMMA MANILA_OPTGROUP_gamma_share_driver: manila.tests.share.drivers.dummy.DummyDriver MANILA_OPTGROUP_gamma_admin_network_config_group: membernet MANILA_OPTGROUP_membernet_network_api_class: manila.network.standalone_network_plugin.StandaloneNetworkPlugin MANILA_OPTGROUP_membernet_network_plugin_ipv4_enabled: true MANILA_OPTGROUP_membernet_standalone_network_plugin_allowed_ip_ranges: 10.0.0.10-10.0.0.209 MANILA_OPTGROUP_membernet_standalone_network_plugin_gateway: 10.0.0.1 MANILA_OPTGROUP_membernet_standalone_network_plugin_mask: 24 MANILA_OPTGROUP_membernet_standalone_network_plugin_network_type: vlan MANILA_OPTGROUP_membernet_standalone_network_plugin_segmentation_id: 1010 devstack_plugins: manila: https://opendev.org/openstack/manila devstack_services: c-api: false c-bak: false c-sch: false c-vol: false cinder: false s-account: false s-container: false s-object: false s-proxy: false n-api: false n-api-meta: false n-cauth: false n-cond: false n-cpu: false n-novnc: false n-obj: false n-sch: false nova: false placement-api: false dstat: false tox_environment: OPENSTACKSDK_HAS_MANILA: 1 OPENSTACKSDK_TESTS_SUBDIR: shared_file_system - project-template: name: openstacksdk-functional-tips check: jobs: - openstacksdk-functional-devstack-tips gate: jobs: - openstacksdk-functional-devstack-tips ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/zuul.d/metal-jobs.yaml0000664000175000017500000000170600000000000020375 0ustar00zuulzuul00000000000000--- # Definitions of Ironic based jobs with a dedicated project entry to keep them # out of general entry. - job: name: metalsmith-integration-openstacksdk-src parent: metalsmith-integration-glance-netboot-cirros-direct required-projects: - openstack/openstacksdk - job: name: bifrost-integration-openstacksdk-src parent: bifrost-integration-tinyipa-ubuntu-jammy required-projects: - openstack/ansible-collections-openstack - openstack/openstacksdk - job: name: ironic-inspector-tempest-openstacksdk-src parent: ironic-inspector-tempest required-projects: - openstack/openstacksdk - project: check: jobs: # Ironic jobs, non-voting to avoid tight coupling - ironic-inspector-tempest-openstacksdk-src: voting: false - bifrost-integration-openstacksdk-src: voting: false - metalsmith-integration-openstacksdk-src: voting: false ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1725296355.0 openstacksdk-4.0.0/zuul.d/project.yaml0000664000175000017500000000277300000000000020013 0ustar00zuulzuul00000000000000--- # Central project entity. It pulls general templates and basic jobs. # functional-jobs, metal-jobs and acceptance-jobs are being # merged with this entity into singe one. - project: templates: - check-requirements - openstack-python3-jobs - openstacksdk-functional-tips - openstacksdk-tox-tips - os-client-config-tox-tips - osc-tox-unit-tips - publish-openstack-docs-pti - release-notes-jobs-python3 check: jobs: - openstack-tox-py312 - opendev-buildset-registry - nodepool-build-image-siblings: voting: false - dib-nodepool-functional-openstack-ubuntu-noble-src: voting: false - openstacksdk-functional-devstack - openstacksdk-functional-devstack-networking - openstacksdk-functional-devstack-networking-ext - openstacksdk-functional-devstack-magnum: voting: false - openstacksdk-functional-devstack-manila: voting: false - openstacksdk-functional-devstack-masakari: voting: false - openstacksdk-functional-devstack-ironic: voting: false - osc-functional-devstack-tips: voting: false - ansible-collections-openstack-functional-devstack: voting: false gate: jobs: - opendev-buildset-registry - openstacksdk-functional-devstack - openstacksdk-functional-devstack-networking - openstacksdk-functional-devstack-networking-ext