././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.866407 oslo.versionedobjects-3.4.0/0000775000175000017500000000000000000000000016064 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/.coveragerc0000664000175000017500000000020500000000000020202 0ustar00zuulzuul00000000000000[run] branch = True source = oslo_versionedobjects omit = oslo_versionedobjects/tests/* [report] ignore_errors = True precision = 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/.mailmap0000664000175000017500000000013100000000000017500 0ustar00zuulzuul00000000000000# Format is: # # ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/.pre-commit-config.yaml0000664000175000017500000000252300000000000022347 0ustar00zuulzuul00000000000000# We from the Oslo project decided to pin repos based on the # commit hash instead of the version tag to prevend arbitrary # code from running in developer's machines. To update to a # newer version, run `pre-commit autoupdate` and then replace # the newer versions with their commit hash. default_language_version: python: python3 repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: 9136088a246768144165fcc3ecc3d31bb686920a # v3.3.0 hooks: - id: trailing-whitespace # Replaces or checks mixed line ending - id: mixed-line-ending args: ['--fix', 'lf'] exclude: '.*\.(svg)$' # Forbid files which have a UTF-8 byte-order marker - id: check-byte-order-marker # Checks that non-binary executables have a proper shebang - id: check-executables-have-shebangs # Check for files that contain merge conflict strings. - id: check-merge-conflict # Check for debugger imports and py37+ breakpoint() # calls in python source - id: debug-statements - id: check-yaml files: .*\.(yaml|yml)$ - repo: local hooks: - id: flake8 name: flake8 additional_dependencies: - hacking>=6.1.0,<6.2.0 language: python entry: flake8 files: '^.*\.py$' exclude: '^(doc|releasenotes|tools)/.*$' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/.stestr.conf0000664000175000017500000000007600000000000020340 0ustar00zuulzuul00000000000000[DEFAULT] test_path=./oslo_versionedobjects/tests top_path=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/.zuul.yaml0000664000175000017500000000105600000000000020027 0ustar00zuulzuul00000000000000- project: check: jobs: - oslo.versionedobjects-src-grenade-multinode templates: - check-requirements - lib-forward-testing-python3 - openstack-python3-jobs - periodic-stable-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 - job: name: oslo.versionedobjects-src-grenade-multinode parent: grenade-multinode voting: false irrelevant-files: - ^(test-|)requirements.txt$ - ^setup.cfg$ required-projects: - opendev.org/openstack/oslo.versionedobjects ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452798.0 oslo.versionedobjects-3.4.0/AUTHORS0000664000175000017500000003762700000000000017153 0ustar00zuulzuul00000000000000Aaron Lee Aaron Lee Aaron Rosen Aaron Rosen Aarti Kriplani Adam Johnson Adam Spiers Ade Lee Aditi Raveesh Ahmad Hassan Akihiro MOTOKI Akihiro Motoki Akihiro Motoki Alessandro Pilotti Alessandro Tagliapietra Alex Gaynor Alex Glikson Alex Meade Alexander Bochkarev Alexander Tivelkov Alexei Kornienko Alexey Roytman Alexis Lee Alvaro Lopez Garcia Andreas Jaeger Andreas Jaeger Andrew Bogott Andrew Clay Shafer Andrew Laski Andrew Laski Andrew Melton Andrey Kurilin Andy McCrae Andy Smith Andy Southgate Anne Gentle Anthony Young Anton V. Yanchenko Anusree Arata Notsu Armando Migliaccio Armando Migliaccio Attila Fazekas Avishay Traeger Balazs Gibizer Belmiro Moreira Ben Nemec Ben Nemec Ben Swartzlander Boris Pavlovic Brad Hall Brant Knudson Brian Elliott Brian Elliott Brian Lamar Brian Waldon Brian Waldon Burt Holzman Cerberus Chang Bo Guo ChangBo Guo(gcb) Chaozhe.Chen Charles Short Chet Burgess Chiradeep Vittal Chris Behrens Chris Dent Chris Friesen Chris Yeoh Christian Berendt Christopher Lefelhocz Christopher Yeoh Chuck Short Chuck Short Clark Boylan Claudiu Belu Corey Bryant Cory Wright Cyril Roelandt Dan Berrange Dan Prince Dan Prince Dan Smith Dan Smith Dan Smith Dan Wendlandt Daniel Bengtsson Daniel P. Berrange Davanum Srinivas (dims) Davanum Srinivas Davanum Srinivas Dave Walker (Daviey) David Pravec David Ripton David Subiros David Xie Dean Troyer Deepak Garg Devananda van der Veen Devendra Modium Devin Carlen Devin Carlen Dinesh Bhor Dirk Mueller Dirk Müller Dmitry Borodaenko Donal Lafferty Doug Hellmann Doug Hellmann Doug Wiegley Drew Thorstensen Duncan McGreggor Ed Leafe Eldar Nugaev Eldar Nugaev Eoghan Glynn Eric Day Eric Guo Eric Windisch Eugene Kirpichov Eugene Nikanorov Eugeniya Kudryashova Ewan Mellor Feodor Tersin Flavio Percoco Gabe Westmaas Gary Kotton Gary Kotton Gevorg Davoian Ghanshyam Ghanshyam Mann Ghe Rivero Graham Hayes Grant Murphy Grzegorz Grasza Guoshuai Li Haiwei Xu Hans Lindgren He Yongli Hengqing Hu Hervé Beraud Hirofumi Ichihara Hisaharu Ishii Ian Wienand Ihar Hrachyshka Ilya Alekseyev Ilya Alekseyev Ionuț Arțăriși Irena Berezovsky Isaku Yamahata Jakub Ruzicka James Carey James E. Blair James E. Blair Jason Cannavale Jason Koelker Jason Kölker Javier Pena Jay Lau Jay Pipes Jesse Andrews Jesse Andrews Jesse Andrews Jian Wen Jian Wen Jim Fehlig Jimmy Bergman Joe Gordon Joe Gordon Joe Heck Joel Coffman Joel Moore joelbm24@gmail.com <> Johannes Erdfelt Johannes Erdfelt Johannes Erdfelt John Bresnahan John Garbutt John Garbutt John Griffith John Herndon John Perkins John Tran Josh Durgin Josh Kearney Josh Kearney Joshua Harlow Joshua Harlow Joshua Harlow Joshua McKenty Joshua McKenty Joshua McKenty Julian Sy Julian Sy Julien Danjou Julien Danjou Justin SB Justin Santa Barbara Justin Santa Barbara Justin Shepherd KIYOHIRO ADACHI Kai Qiang Wu Karen Noel Kei Masumoto Kei masumoto Keisuke Tagami Ken Pepple Ken'ichi Ohmichi Kenneth Giusti Kevin Benton Kevin L. Mitchell Koji Iida Krisztian Gacsal Kun Huang Kylin CG Launchpad Translations on behalf of nova-core <> Leandro I. Costantino Liam Kelleher Liang Chen Lianhao Lu Lorin Hochstein Luigi Toscano Luong Anh Tuan Lvov Maxim MORITA Kazutaka Mark McLoughlin Mark Washenberger Martin Hickey Maru Newby Masanori Itoh Masayuki Igawa Mate Lakat Matt Dietz Matt Joyce Matt Odden Matt Riedemann Matt Riedemann Matthew Booth Matthew Hooker Matthew Oliver Matthew Sherborne Mauro S. M. Rodrigues Mehdi Abaakouk Michael Gundlach Michael Still Michal Jastrzebski (inc0) Miguel Angel Ajo Mike Durnosvistov Mike Lundy Mike Pittaro Mike Scherbakov Mikhail Durnosvistov Mohammed Naser Moisés Guimarães de Medeiros Monsyne Dragon Monty Taylor Morgan Fainberg Moshe Levi MotoKen Muneyuki Noguchi NTT PF Lab Nachi Ueno Nachi Ueno Nachi Ueno Naveed Massjouni Neil Jerram Nicholas Kuechler Nick Bartos Nikola Dipanov Nikolay Sokolov Nirmal Ranganathan Octavian Ciuhandu Oleg Bondarev Ollie Leahy OpenStack Release Bot Paul Murray Pavel Kravchenco Pawel Koniszewski Peng Yong Phil Day Przemyslaw Czesnowicz Pádraig Brady Radoslav Gerganov Rafi Khardalian Renuka Apte Ricardo Carrillo Cruz Rick Clark Rick Harris Rick Harris Robert Collins Robert Pothier Rodolfo Alonso Hernandez Roman Bogorodskiy Roman Podoliaka Ronald Bradford Ronen Kat Rongze Zhu RongzeZhu Ruby Loo Rushi Agrawal Russell Bryant Ryan Lane Ryan Lane Ryan Moe Ryan Rossiter Ryu Ishimoto Sahid Orentino Ferdjaoui Sahid Orentino Ferdjaoui Salvatore Orlando Salvatore Orlando Sam Betts Sam Morrison Sandy Walsh Sandy Walsh Scott Moser Sean Chen Sean Dague Sean Dague Sean Dague Sean McCully Sean McGinnis Sean McGinnis Sergey Nikitin Sergey Skripnick Sergey Vilgelm Serhii Skrypnik Seyeong Kim Shane Wang Shlomi Sasson Shuangtai Tian Sirisha Devineni Sleepsonthefloor Solly Ross Soren Hansen Soren Hansen Stanislaw Pitucha Stephen Finucane Steve Martinelli Steven Kaufer Sulochan Acharya SuperStack Surojit Pathak Sylvain Bauza Takashi Kajinami Takashi Kajinami Takashi NATSUME Thang Pham Thierry Carrez Thomas Goirand Thuleau Édouard Tiago Mello Tim Simpson Todd Willey Todd Willey Tomoki Sekiyama Tony Breeds Tovin Seven Trey Morris Trey Morris Tushar Patil Unmesh Gurjar Unmesh Gurjar Victor Sergeyev Victor Stinner Vincent Hou Vishvananda Ishaya Vishvananda Ishaya Vladik Romanovsky Vu Cong Tuan Wangpan William Wolf William Wolf Xavier Queralt Yaguang Tang Yaguang Tang Yun Mao Yunhong Jiang Yuriy Taraday Yuriy Zveryanskyy Zed Shaw Zhi Yan Liu Zhi Yan Liu ZhiQiang Fan ZhijunWei Zhiteng Huang ZhongShengping Zhongyue Luo Zhongyue Luo andy brian-lamar caoyuan danwent danwent@gmail.com <> dengzhaosen fujioka yuuichi gecong1973 gengchc2 guohliu hartsocks hnyang ivan-zhu jacky06 jaypipes@gmail.com <> jichen jichenjc john-griffith liu-sheng liyingjun lizheming lzyeval masumotok matt.dietz@rackspace.com <> mdietz melanie witt melissaml pengyuwei ricolin root root ruichen s iwata sateesh shihanzhang shreeduth-awasthi shuangtai songwenping tengqm termie termie unicell vladimir.p wangqi yangyawei Édouard Thuleau ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/CONTRIBUTING.rst0000664000175000017500000000134200000000000020525 0ustar00zuulzuul00000000000000If you would like to contribute to the development of oslo's libraries, first you must take a look to this page: https://specs.openstack.org/openstack/oslo-specs/specs/policy/contributing.html If you would like to contribute to the development of OpenStack, you must follow the steps in this page: http://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/oslo.versionedobjects ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452798.0 oslo.versionedobjects-3.4.0/ChangeLog0000664000175000017500000055643100000000000017654 0ustar00zuulzuul00000000000000CHANGES ======= 3.4.0 ----- * reno: Update master for unmaintained/zed * Remove old excludes * Replace usage of pytz * Update master for stable/2024.1 * reno: Update master for unmaintained/xena * reno: Update master for unmaintained/wallaby * reno: Update master for unmaintained/victoria 3.3.0 ----- * reno: Update master for unmaintained/yoga * Bump hacking * Update python classifier in setup.cfg * coveragerc: Remove non-existent path * Update master for stable/2023.2 3.2.0 ----- * Bump bandit and make oslo.versionedobjects compatible with latest rules * Imported Translations from Zanata * Revert "Moves supported python runtimes from version 3.8 to 3.10" * Moves supported python runtimes from version 3.8 to 3.10 * Avoid TypeError when building for Sphinx 6.1.1 and above * Update master for stable/2023.1 * Imported Translations from Zanata 3.1.0 ----- * Add Python3 antelope unit tests * Update master for stable/zed 3.0.1 ----- * Python3.11: Do not use inspect.ArgSpec 3.0.0 ----- * Imported Translations from Zanata * Drop python3.6/3.7 support in testing runtime * Remove unnecessary unicode prefixes * Add Python3 zed unit tests * Update master for stable/yoga * setup.cfg: Replace dashes by underscores 2.6.0 ----- * Update python testing classifier * Add Python3 yoga unit tests * Update master for stable/xena 2.5.0 ----- * Replace deprecated inspect.getargspec * Do not patch inspect.argspec directly * Changed minversion in tox to 3.18.0 * Move flake8 as a pre-commit local target * Add Python3 xena unit tests * Update master for stable/wallaby * Dropping lower constraints references * Dropping lower constraints testing * Use TOX\_CONSTRAINTS\_FILE * Use py3 as the default runtime for tox 2.4.0 ----- * Replace md5 with oslo version * Adding pre-commit * Add Python3 wallaby unit tests * Update master for stable/victoria 2.3.0 ----- * [goal] Migrate testing to ubuntu focal * zuul: port the legacy multinode grenade job * Bump bandit version 2.2.0 ----- * Remove translation sections from setup.cfg 2.1.0 ----- * Stop to use the \_\_future\_\_ module * Cap jsonschema 3.2.0 as the minimal version * Switch to newer openstackdocstheme and reno versions * Imported Translations from Zanata * Update hacking for Python3 * Align contributing doc with oslo's policy * Bump default tox env from py37 to py38 * Add py38 package metadata * trivial: Mock warnings * Add release notes links to doc index * Add Python3 victoria unit tests * Update master for stable/ussuri 2.0.2 ----- * Use unittest.mock instead of third party mock 2.0.1 ----- * remove outdated header 2.0.0 ----- * Provide stable getargspec() behavior for method fingerprinting * Bump to hacking 2.x * Drop use of six * [ussuri][goal] Drop python 2.7 support and testing * Handle unregistered nested objects * gitignore: Ignore reno artefacts 1.37.0 ------ * Import ABCs from collections.abc * Migrate grenade jobs to py3 * tox: Keeping going with docs * Update master for stable/train 1.36.1 ------ * Add Python 3 Train unit tests * Resolve some issue with tox.ini, setup.cfg * Dropping the py35 testing * Replace git.openstack.org URLs with opendev.org URLs * Cap Bandit below 1.6.0, update sphinx requirement, uncap jsonschema 1.36.0 ------ * OpenDev Migration Patch * Fix deprecation warnings * Replace openstack.org git:// URLs with https:// * Update master for stable/stein 1.35.1 ------ * add python 3.7 unit test job * Allow lists to be generated from any non-string iterable * Update hacking version 1.35.0 ------ * Lower ObjectVersionChecker logging to DEBUG level * Add ListOfUUIDField * Use template for lower-constraints * Update mailinglist from dev to discuss * Clean up .gitignore references to personal tools 1.34.1 ------ * Imported Translations from Zanata * add lib-forward-testing-python3 test job * add python 3.6 unit test job * import zuul job settings from project-config * delete unused '=' * Update reno for stable/rocky * Switch to stestr 1.33.3 ------ * Add release notes link to README 1.33.2 ------ * fix tox python3 overrides * Make the hash of a defaulted set field stable * Add support version in exception output * Remove moxstubout usage * Remove stale pip-missing-reqs tox test * Trivial: Update pypi url to new url * set default python to python3 1.33.1 ------ * Don't force unicode strings for UUID coercion 1.33.0 ------ * Fixing UUID coerce function for unicode non uuid form * Updated from global requirements * add lower-constraints job * pypy is not checked at gate * Add bindep.txt file to prevent fallback to generic list * Updated from global requirements 1.32.0 ------ * Imported Translations from Zanata * Update links in README * Zuul: Remove project name * Imported Translations from Zanata * Zuul: Remove project name * Allow escalation of UUID validation warning to error * Update reno for stable/queens * Handle TZ change in iso8601 >=0.1.12 * Updated from global requirements * Updated from global requirements * Updated from global requirements 1.31.1 ------ * Treat doc warnings as errors 1.31.0 ------ * Fix wrong indent of releasenotes * Remove log translations * Add bandit to pep8 job * Updated from global requirements * Follow the new PTI for document build * Avoid tox\_install.sh for constraints support 1.30.0 ------ * Updated from global requirements * Remove setting of version/release from releasenotes * Updated from global requirements * Updated from global requirements * Updated from global requirements * Zuul: add file extension to playbook path 1.29.0 ------ * Imported Translations from Zanata * Migrate to zuulv3 - move legacy jobs to project * Updated from global requirements * Imported Translations from Zanata * Use newer location for iso8601 UTC 1.28.0 ------ * Updated from global requirements 1.27.0 ------ * Updated from global requirements * iso8601.is8601.Utc No Longer Exists * Updated from global requirements * Imported Translations from Zanata * Add a TimestampedObject mixin * Updated from global requirements * Update reno for stable/pike * Updated from global requirements 1.26.0 ------ * Updated from global requirements * Update URLs in documents according to document migration * Add unit test for unicode in object \_\_repr\_\_ * Fix the object is unhashable when inheriting the class "ComparableVersionedObject" 1.25.1 ------ * rearrange existing documentation to fit the new standard layout * switch from oslosphinx to openstackdocstheme * Enable warning-is-error in doc build 1.25.0 ------ * Updated from global requirements * Updated from global requirements * Remove pbr warnerrors in favor of sphinx check * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements 1.24.0 ------ * Fix string interpolation in ValueError * Updated from global requirements * Updated from global requirements * Updated from global requirements 1.23.0 ------ * Start adding some basic ovo examples (with an initial basic one) * Updated from global requirements 1.22.0 ------ * Updated from global requirements * [Fix gate]Update test requirement * Updated from global requirements * Remove support for py34 * Update reno for stable/ocata 1.21.0 ------ * Change Object schema type to be array instead of string * Add Constraints support * Replace six.iteritems() with .items() * Updated from global requirements 1.20.0 ------ 1.19.0 ------ * Fix a typo * Show team and repo badges on README * Updated from global requirements * Add reno for release notes management * fields: fix stringifying of objects when subclasses=True * Refactor get\_schema for patterned FieldTypes * Updated from global requirements * Fix recursive deepcopy * Updated from global requirements * Fix get\_schema() for Enum 1.18.0 ------ * Changed the home-page link * Updated from global requirements * Changed the home-page link * Updated from global requirements * JSON Schema get\_schema implementation for last few fields * Updated from global requirements * Add ObjectListBase concat methods * Fix documentation typo * Fix incorrect timestamp comment * Updated from global requirements 1.17.0 ------ * Add get\_schema for IPV6Address FieldType class * Fix remotable object change tracking 1.16.0 ------ * Updated from global requirements * JSON schema get\_schema implementation for more complex fields 1.15.0 ------ * Updated from global requirements * Adds new fields and field types * Fix to\_json\_schema() call 1.14.0 ------ * Updated from global requirements * Add Python 3.5 classifier and venv 1.13.0 ------ * Updated from global requirements * Imported Translations from Zanata * JSON schema get\_schema implementation for common fields * Updated from global requirements * JSON schema generation for versioned objects * Extend test\_hashes to allow extra info gathering * Updated from global requirements * Improved error message for Object.coerce * Imported Translations from Zanata 1.12.0 ------ * Imported Translations from Zanata 1.11.0 ------ * Updated from global requirements 1.10.0 ------ * Fix ComparableVersionedObject in python 3.4 * Updated from global requirements * Updated from global requirements * Add objectify decorator for readability 1.9.1 ----- 1.9.0 ----- * Updated from global requirements * Fix a typo in Enum error path * Fix issue with coercing valid\_values to a tuple * Replace safe\_utils.getcallargs with inspect.getcallargs * Imported Translations from Zanata * Remove direct dependency on babel * Imported Translations from Zanata * Introduce fixture to enforce sorted order for object changes * Expose object context thru a public property * Updated from global requirements * Fix compare\_obj() to obey missing/unset fields * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Add BaseEnumField valid\_values introspection * Use primitive type name in ValueError for Object field coerce failure * Update formatting for example statemachine field * Add a pci address field * Remove the executable bit from files * Updated from global requirements 1.7.0 ----- * Updated from global requirements * Updated from global requirements 1.6.0 ----- * Updated from global requirements * Updated from global requirements 1.5.0 ----- * Deprecate current UUIDField behaviour * Make sure functions in child object registries work * Fix messages in exceptions raised due to invalid state transitions * Update translation setup * Updated from global requirements * Add ability to pass args/kwargs to obj\_class init * Updated from global requirements * Added a state machine field * Use oslo\_utils for tupleizing versions * Add temporary registry pattern to VersionedObjectRegistry 1.4.0 ----- * Updated from global requirements * Imported Translations from Zanata * Updated from global requirements * Move compare\_obj to the fixture module for external consumption 1.3.0 ----- * Updated from global requirements * Fix MACAddress coerce method to validate string first * Updated from global requirements * Remove meaningless default value in \_\_get\_\_() 1.2.0 ----- * Added checking compat using manifest backports * Fixed nondeterministicness of extra\_data test * Updated from global requirements * Updated from global requirements * Updated from global requirements * Clean up directory openstack/common * Use method is\_compatible from olso\_utils.versionutils * Trival: Remove 'MANIFEST.in' 1.1.0 ----- * Add a callable to gather extra data for fps * Add testing for ObjectVersionChecker fixture * Leverage dict comprehension in PEP-0274 * Use version convert methods from oslo\_utils.versionutils 1.0.0 ----- * Add netaddr-related field types * Updated from global requirements * Adds new MACAddressField * Fix the wrong reference for DeprecationWarning * cleanup tox.ini * Updated from global requirements 0.13.0 ------ * Updated from global requirements * Remove remote\_object\_calls from \_BaseTestCase * Ensure\_\_repr\_\_ return value is encoded * Cleanup docstring for obj\_tree\_get\_versions * Updated from global requirements * Imported Translations from Zanata * Add warnings for deprecated IndirectionAPI methods * Make tests properly use object\_class\_action\_versions() * Make class action calls use version manifest * Updated from global requirements 0.12.0 ------ * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements 0.11.0 ------ * Fix obj\_to\_primitive() when using manifests (list version) * Add SensitiveString field type * Fix obj\_to\_primitive() when used with a manifest * Move \_\_contains\_\_ from VersionedObjectDictCompat to VersionedObject * Fix coverage configuration and execution * Imported Translations from Zanata * make configuration options discoverable * Add shields.io version/downloads links/badges into README.rst * add pbr-generated release history * Change ignore-errors to ignore\_errors * Updated from global requirements * Added test for DateTimeField in obj\_get\_change * Fix missing value types for log message 0.10.0 ------ * Imported Translations from Zanata * ObjectVersionChecker fingerprint documentation 0.9.0 ----- * Updated from global requirements * Allow calling obj\_to\_primitive() with version\_manifest 0.8.0 ----- * Switch back to iso format on the wire * Save and re-raise exception * Check for obj\_relationships in List objects * Updated from global requirements * Make compat of object lists use manifest function * Add tests for subobject backporting by manifest * Make ObjectListBase a collections.Sequence * Switch from deprecated timeutils.isotime * Make direct call to object's obj\_from\_primitive * Add usage information * Imported Translations from Transifex * Updated from global requirements * Add validation on target\_version arg when calling obj\_to\_primitive 0.7.0 ----- * Fix error handling in the exception module on py3 * Imported Translations from Transifex * Updated from global requirements * Support using the version manifest for obj\_make\_compatible() * Updated from global requirements * Implement multi-version object backport method * Add a new DictOfListOfStrings type of field * Imported Translations from Transifex * Updated from global requirements * Add a new VersionPredicate type of field * Add obj\_tree\_get\_versions() utility method 0.6.0 ----- * Imported Translations from Transifex * Updated from global requirements * Updated from global requirements * Fix serializer supported version reporting in object\_backport * Remove dict assumption from remotable decorator * Updated from global requirements * Remove ununsed class in test.py * Replace 'hypervisor' with 'provided' version * Updated from global requirements * Compound fields now coerce their elements' values * Move fixture dependencies to extras * Updated from global requirements * Imported Translations from Transifex 0.5.2 ----- * Sort child versions before generating fingerprint * Imported Translations from Transifex * Manual update the requirements * Remove classifiers not supported * Updated from global requirements 0.5.1 ----- * Revert "Compound fields now coerce their elements' values" 0.5.0 ----- * Add oslo.config to requirements * Add tox target to find missing requirements * Updated from global requirements * fields: allow subclasses in ObjectField/ListOfObjectsField * Updated from global requirements * Fix "comparason" typo in doc for ComparableVersionedObject * Updated from global requirements * Compound fields now coerce their elements' values * fields: report what the wrong type was when coerce fails * Updated from global requirements * Decouple ObjectVersionChecker and VersionedObjectRegistry * Updated from global requirements 0.4.0 ----- * Remove unnecessary openstack-common.conf * Updated from global requirements 0.3.0 ----- * Imported Translations from Transifex * Imported Translations from Transifex * fields: introduce BaseEnumField to allow subclassing * fields: add a FlexibleBoolean field type * Enhance documentation * Add delattr support * Imported Translations from Transifex * Enhance dict compat of VersionedObjectDictCompat * Drop outdated notes and docstrings * Add field name to error messages in object type checking * Adds missing WebOb in requirements.txt * Add enum fieldtype field * Get the indirection\_api from the current VersionedObject class * Updated from global requirements * Imported Translations from Transifex 0.2.0 ----- * Uncap library requirements for liberty * Standardize setup.cfg summary for oslo libs * Update to the latest version of hacking * Copy the default value for field * Updated from global requirements * New config group for oslo\_versionedobjects 0.1.1 ----- * Sync from oslo-incubator * Properly serialize/deserialize arguments in fake indirection api * Allow passing serializer and indirection API objects to Fixture * Make serializer use the provided base class for the indirection api 0.1.0 ----- * Update README.rst * Remove serialize\_args(), per the work items * Allow subclasses to define the serialization namespaces * Sync default-setting change from Nova * Sync obj\_reset\_changes(recursive=) change from Nova * Remove already commented out and unused fixtures * Rename checks to fixture and update requirements * Cleanup comment and H803 hacking * Generalize the object relationships test * Generalize compatibility testing * Fixes for heat implementation * Add a test to ensure subclassibility of the object registry * Generalize object dependency change detection * Generalize object hash-based change detection * Generalize remote testing infrastructure * Add conditional object registration * Remove Nova objects module registration code * Generalize the indirection\_api interface * Disable some unstable tests until they are generalized * Fix py34 for iteritems() use in base.py * Remove internal items from public namespace * Rename utils -> \_utils * Set up translations * Use six.wraps for py2/py3 compat * Remove more Nova-isms * Compatibility with Python 3 * Remove references to Nova * Remove the ambiguous context argument on remotable methods * Replace metaclass-based registry with a decorator-based one * Rename tests/fixtures to test/obj\_fixtures to avoid import collisions * Fix initial test failures * clean up and get most test running * sync oslo-incubator modules * apply oslo-cookiecutter template * rename files * object: serialize set to list * Fix leaking exceptions from scheduler utils * Add flavor fields to Instance object * Revert "Raise if sec-groups and port id are provided on boot" * Use a workarounds option to disable rootwrap * Create a 'workarounds' config group * Revert "Adds keypair type database migration" * Adds keypair type database migration * Revert temporary hack to monkey patch the fake rpc timeout * Raise if sec-groups and port id are provided on boot * Add method for getting the CPU pinning constraint * Ignore warnings from contextlib.nested * Enable check for H238 rule * Call ComputeNode instead of Service for getting the nodes * increase fake rpc POLL\_TIMEOUT to 0.1s * Performance: leverage dict comprehension in PEP-0274 * Do not use deprecated assertRaisesRegexp() * speed up tests setting fake rpc polling timeout * Make numa\_usage\_from\_instances consider CPU pinning * Fix and re-gate on H306 * Move WarningsFixture after DatabaseFixture so emit once * Fix obj\_to\_primitive() expecting the dict interface methods * initialize objects with context in base object tests * Add WarningsFixture to only emit DeprecationWarning once in a test run * Update docstring for wrap\_exception decorator * Add numa\_node to PCIDevice * Nuke XML support from Nova REST API - Phase 2 * Remove unused methods in nova utils * Use get\_my\_ipv4 from oslo.utils * Reuse methods from netutils * Move metadata filtering logic to utils.py * objects: remove NovaObjectDictCompat from Tag object * extract RPC setup into a fixture * Add pci\_device\_pools to ComputeNode object * objects: add method to verify requested hugepages * hardware: add method to return requested memory page size * Reject non existent mock assert calls * objects: allow creation of objects without dict item compat * Fix base obj\_make\_compatible() handling ListOfObjectsField * Add obj\_as\_admin() to NovaPersistentObject * simplify database fixture to the features we use * extract the timeout setup as a fixture * move all conf overrides to conf\_fixture * move ServiceFixture and TranslationFixture * extract fixtures from nova.test to nova.test.fixtures * Objects: add in missing translation * objects: introduce numa pages topology as an object * Libvirt: Fsfreeze during live-snapshot of qemu/kvm instances * Add cn\_get\_all\_by\_host and cn\_get\_by\_host\_and\_node to ComputeNode * Add host field to ComputeNode * rename oslo.concurrency to oslo\_concurrency * Remove needless workaround in utils module * Remove except Exception cases * Adds global API version check for microversions * Implement microversion support on api methods * Add CPU pinning data to InstanceNUMACell object * Adds NUMA CPU Pinning object modeling * objects: Add several complex field types * Compute: Catch binding failed exception while init host * Break V2 XML Support * Added objects Tag and TagList * Switch to moxstubout and mockpatch from oslotest * factor out \_setup\_logging in test.py * extract \_setup\_timeouts in test.py * objects: introduce numa objects * Correct InvalidAggregateAction translation&format * Adds APIVersionRequest class for API Microversions * remove test.ReplaceModule from test.py * Added db API layer for CRUD operations on instance tags * Introduce a .z version element for backportable objects * Add count and limit\_check methods to quota object * Add obj\_set\_defaults() to NovaObject * Convert hardware.VirtCPUTopology to nova object * compute: rename hvtype.py to hv\_type.py * Replacement \`\_\` on \`\_LW\` in all LOG.warning part 1 * Replacement \`\_\` on \`\_LE\` in all LOG.exception * Exceptions: finish sentence with fullstop * Pass expected\_attrs to instance\_get\_active\_by\_window\_joined * Add update\_cells to BandwidthUsage.create() * Generalize dependent object backporting * GET servers API sorting compute/instance/DB updates * move all tests to nova/tests/unit * Fixed typos in nova.objects.base docstrings * Switch Nova to use oslo.concurrency * remove use of explicit lockutils invocation in tests * Revert "Switch Nova to use oslo.concurrency" * Switch Nova to use oslo.concurrency * Faster get\_attrname in nova/objects/base.py * Replacement \`\_\` on \`\_LE\` in all LOG.error * Add supported\_hv\_specs to ComputeNode object * Don't log every (friggin) migration version step during unit tests * Update NoMoreFixedIps message description * Use oslo.utils * console: introduce a new exception InvalidConnectionInfo * console: introduce a new exception InvalidToken * VMware: Remove tests for None in fake.\_db\_content['files'] * Fixes missing ec2 api address disassociate error on failure * add time to logging in unit tests * Revert "libvirt: support live migrate of instances with conf drives" * Handle volume bdm not found in lvm.get\_volume\_size * compute: tweaks to vm\_mode APIs to align with arch/hvtype * Remove stale code from ObjectListBase * virt: move assertPublicAPISignatures into base test class * compute: Add standard constants for hypervisor virt types * Add InstancePCIRequests object * Network: enable instance deletion when dhcp release fails * Move to oslo.db * Support image property for config drive * Add bandwidth usage object * virt: setup TCP chardevice in libvirt driver * console: add serial console module * Add instance\_extra table and related objects * Move and generalize decorator serialize\_args to nova.objects.base * Remove concatenation with translated messages * Add NetworkRequest object and associated list * Remove use of str on exceptions * libvirt: add validation of migration hostname * Add a Set and SetOfIntegers object fields * Make Object FieldType from\_primitive pass objects * Add QuotaError handling to servers rebuild API * libvirt: support live migrations of instances with config drives * Change error status code for out of quota to be 403 instead of 413 * Correct seconds of a day from 84400 to 86400 * Extend the docstring for obj\_make\_compatible() with examples * Add api extension for new network fields * Use real exceptions for network create and destroy * virt: helper for processing NUMA topology configuration * Make NovaObjectSerializer work with dicts * Split EC2 ID validator to validator per resource type * Create a Scheduler client library * docs - Fix errors,warnings from document generation * Enhance PCI whitelist * add log exception hints in some modules * Add ListOfDictOfNullableString field type * Network: interface attach and detach raised confusing exception * docs - Fix docstring issues * Add standard constants for CPU architectures * Fix and Gate on E265 * Turn periodic tasks off in all unit tests * Add valid method check for quota resources * Use oslo.i18n * Avoid possible timing attack in metadata api * Cleanup and gate on hacking E713 rule * Correct exception for flavor extra spec create/update * Improve shared storage checks for live migration * Fix and gate on H305 and H307 * Make compute api use util.check\_string\_length * Prevent max\_count > 1 and specified ip address as input * virt: add helper module for determining VCPU topology * Add \_\_repr\_\_ handler for NovaObjects * Use default rpc\_response\_timeout in unit tests * Replace nova.utils.cpu\_count() with processutils.get\_worker\_count() * Add Agent object * Avoid traceback logs from simple tenant usage extension * Catch ProcessExecutionError in revoke\_cert * Initialize objects field in ObjectsListBase class * Fix object change detection * Don't translate debug level logs in nova * Add testing for hooks * Use VIF details dictionary to get physical\_network * Add read\_only field attribute * Add missing translation support * Check if volume is bootable when creating an instance * Fix EC2 not found errors for volumes and snapshots * xenapi: move StorageError into global exception.py * Add unit test trap for object change detection * Enable flake8 F841 checking * Raise HTTPInternalServerError when boot\_from\_volume with cinder down * Fix nova/compute direct use of instance module objects * Address issues with objects of same name * Register objects in more services * Check object's field * Use Field in fixed\_ip * Improve conductor error cases when unshelving * objects: restore some datetime field comments * Rename NotAuthorized exception to Forbidden * Remove utils.reset\_is\_neutron() to avoid races * Fix up import of conductor * Use debug level logging in unit tests, but don't save them * Avoid the possibility of truncating disk info file * support local debug logging * Revert "Use debug level logging during unit tests" * libvirt: remove\_logical\_volumes should remove each separately * Nova utils: add in missing translation * Require admin context for interfaces on ext network * Persist image format to a file, to prevent attacks based on changing it * Add a decorator decorator that checks func args * Raise error on nova-api if missing subnets/fixed\_ips on networks/port * Refuse to block migrate instances with config drive * Make NovaObject report changed-ness of its children * Add security\_group\_rule to objects registry * Revert "Adding image multiple location support" * Use debug level logging during unit tests * Adding image multiple location support * Add a missing space in a log message * Sync the latest DB code from oslo-incubator * Add watchdog device support to libvirt driver * When a claim is rejected, explain why * Support IPv6 when booting instances * Enable flake8 H404 checking * Use oslo-common's logging fixture * libvirt: Enable custom video RAM setting * libvirt: host specific virtio-rng backend * VMware: fix the VNC port allocation * Make is\_neutron() thread-safe * Break out the meat of the object hydration process * Fix VirtualInterfaceMacAddressException message * Remove vi modelines * Port to oslo.messaging * Add FloatingIP object implementation * Add FixedIP Object implementation * Add block device mapping objects implementation * Make obj\_to\_primitive() handle netaddr types * Add Network object * Use (# of CPUs) workers by default * Add DNSDomain object * Small edits on help strings * Add sort() method to ObjectListBase * Add VirtualInterface object * Make exception message more friendly * Add preserve\_ephemeral option to rebuild * Refactor CIDR field to use netaddr.IPNetwork * Remove unused dict BYTE\_MULTIPLIERS * replace type() to isinstance() in nova * Adds new method nova.utils.get\_hash\_str * Use the full string for localisation * Fix interprocess locks when running unit-tests * ValueError should use '%' instead of ',' * Setting the xen vm device id on vm record * Rename instance\_type to flavor in nova.utils and nova.compute.utils * Make Serializer/Conductor able to backlevel objects * Require List objects to be able to backlevel their contents * Add IPAddress field type in object models * Correct uses of :params in docstrings * Libvirt: Making the video driver element configurable * Make obj\_from\_primitive() preserve version information * Make it possible to override test timeout value * Fix monkey\_patch docstring bug * Added a new scheduler metrics weight plugin * Misc typos in nova * Fix a tiny double quote matching in field obj model * Add API input validation framework * Add FloatField for objects * objects: declare some methods as static * Handle UnicodeEncodeError in validate\_integer * Clean up how test env variables are parsed * Rename InstanceType exceptions to Flavor * Added monitor (e.g. CPU) to monitor and collect data * Xenapi: Allow windows builds with xentools 6.1 and 6.2 * Refactor UnexpectedTaskStateError for handling of deleting instances * Move \`diff\_dict\` to compute API * Include name/level in unit test log messages * Nova-all: Replace basestring by six for python3 compatability * Pull system\_metadata for notifications on instance.save() * Add nova.db.migration.db\_initial\_version() * Apply six for metaclass * Reply with a meaningful exception, when libvirt connection is broken * Make Object FieldType take an object name instead of a class * Merging two mkfs commands * Add obj\_make\_compatible() * A nicer calling convention for object instantiation * Adding support for multiple hypervisor versions * Add CIDR field type * Adding Read-Only volume attaching support to Nova * Use \`versionutils.is\_compatible\` for Nova Objects * Fix NovaObject versioning attribute usage * Use the oslo fixture module * Make a note about Object deepcopy helper * Remove transitional callable field interface * Make the base object infrastructure use Fields * Migrate NovaPersistentObject and ObjectListBase to Fields * Move exception definitions out of db api * Make field object support transitional call-based interface * Add Field model and tests * Fix conductor's object change detection * Fixes typos in the files in the nova folder * Move \`utils.hash\_file\` -> \`imagecache.\_hash\_file\` * Remove \`utils.timefunc\` function * Remove \`utils.total\_seconds\` * Remove \`utils.get\_from\_path\` * Remove unused dict functions from utils * Prefix \`utils.get\_root\_helper\` with underscore * Remove \`utils.debug\` * Remove \`utils.last\_octet\` * Remove \`utils.parse\_mailmap\` * Remove unecessary \`get\_boolean\` function * Make Exception.format\_message aware of Messages * Fix incorrect exception raised during evacuate * Remove exceptions.Duplicate * Fixes modules with wrong file mode bits * Object cleanups * Add new-world Quota object * Improve "keypair data is invalid" error message * Don't use sudo to discover ipv4 address * Fix asymmetric view of object fields * Wrong arguments when calling safe\_utils.getcallargs() * Add key manager implementation with static key * Removed duplicated class in exception.py * VMware image clone strategy settings and overrides * Clean up duplicated change-building code in objects * Add Neutron port check for the creation of multiple instances * Remove unused exceptions * Fixes unexpected exception message in ProjectUserQuotaNotFound * Fixes unexpected exception message in PciConfigInvalidWhitelist * Add methods to get image metadata from instance * fix conversion type missing * Port to oslo.messaging.Notifier API * Generalize the \_make\_list() function for objects * PCI passthrough Libvirt vm config * Create mixin class for common DB fields * Add nova.utils.get\_root\_helper() * Inherit base image properties on instance creation * Port all rpcapi modules to oslo.messaging interface * xenapi: add support for auto\_disk\_config=disabled * Check ephemeral and swap size in the API * Add support for API message localization * Improve EC2 API error responses * Remove EC2 postfix from InvalidInstanceIDMalformedEC2 * Introduce InternalError EC2 error code * Introduce UnsupportedOperation EC2 error code * Introduce SecurityGroupLimitExceeded EC2 error code * Introduce IncorrectState EC2 error code * Introduce AuthFailure EC2 error code * Handle port over-quota when allocating network for instance * Introduce InvalidPermission.Duplicate EC2 error code * PCI devices resource tracker * Add PCI device filters support * Change prep\_resize paths to use objects * Make compute\_api confirm/revert resize use objects * Introduce Invalid\* EC2 error codes * Improve parameter related EC2 error codes * PCI alias support * Add PCI stats * Add PCI device object support * Pci Device DB support * PCI utils * Fix remove\_fixed\_ip test with CastAsCall * Clean up some unused wrap\_exception() stuff * Adding support for iSER transport protocol * Make API part of instance boot use new BDM format * VMware: Ensure Neutron networking works with VMware drivers * Remove deprecated CONF.fixed\_range * Fix message for server name with whitespace * Make InvalidInstanceIDMalformed an EC2 exception * maint: remove redundant default=None for config options * Enhance object inheritance * Make NovaObject.get() avoid lazy-load when defaulting * Add plug-in modules for direct downloads of glance locations * xenapi: Moving Glance fetch code into image/glance:download\_vhd * Add obj\_attr\_is\_set() method to NovaObject * Add ObjectActionFailed exception and make Instance use it * maint: remove unused exceptions * Remove the monkey patching of \_ into the builtins * Set lock\_path in tests * Add basic BDM format validation in the API layer * Add latest oslo DB support * Per-project-user-quotas for more granularity * Move \_validate\_int\_value controller func to utils * Raise exceptions when Spice/VNC are unavailable * Missed message -> msg\_fmt conversion * Remove locals() from various places * Avoid shadowing Exception 'message' attribute * Don't attach to multiple Quantum networks by default * Load cell data from a configuration file * xenapi:populating hypervisor version in host state * Change force\_dhcp\_release default to True * Add unique constraint to ConsolePool * Add unique constraint to AgentBuild * Make instance show and index use objects * Add unique constraints to Service * Add unique constraint to FixedIp * Exceptions raised by quantum validate\_networks result in 500 error * Fix and gate on E125 * Add unique constraints to Quota * Add unique constraint for security groups * Fix metadata access in prep for instance objects * xenapi: Remove vestigial \`compile\_metrics\` code * Add update() method to NovaObject for dict compatibility * Add obj\_to\_primitive() to recursively primitiveize objects * Remove broken config\_drive image\_href support * Better default for my\_ip if 8.8.8.8 is unreachable * Fix a couple typos in the nova.exception module * Make NovaObject support the 'in' operator * Add basic SecurityGroup model * Fix serialization of iterable types * Do not raise NEW exceptions * Fix importing InstanceInfoCache during register\_all() * xenapi: revisit error handling around calls to agent * Fill context on objects in lists * Make NovaObject support extra attributes in items() * Fix instance obj refresh() * Add unique constraints to Cell * Accept is\_public=None when listing all flavors * Remove unused cert db method * Organize limits units and per-units constants * Replace utils.to\_bytes() with strutils.to\_bytes() * Remove unused arg from make\_class\_properties.getter method * Fix obj\_load() in NovaObject base class * Backup and restore object registry for tests * Fix and enable H403 tests * xenapi: remove auto\_disk\_config check during resize * Nova instance group DB support * Replace functions in utils with oslo.fileutils * Enhance group handling in extract\_opts * BDM class and transformation functions * Use InstanceList object for init\_host * Use Instance Objects for Start/Stop * Add base mixin class for object lists * Add deleted flag to NovaObject base * Speeding up scheduler tests * Adds check that the core V3 API is loaded * Improve Keypair error messages in osapi * Import osapi\_v3/enabled option in nova/test * Make object actions pass positional arguments * Call scheduler for run\_instance from conductor * Make instance object tolerate isotime strings * Adds ability to black/whitelist v3 API extensions * Improve Python 3.x compatibility * Adds v3 API disable config option * Add base object model * In utils.tempdir, pass CONF.tempdir as an argument * Add missing conversion specifier to ServiceGroupUnavailable * Fix tests for sqlalchemy utils * Moved sample network creation out of unittest base class constructor * Make a few places tolerant of sys\_meta being a dict * Remove ImageTooLarge exception * Use strict=True instead of \`is\_valid\_boolstr\` * Mox should cleanup before stubs * xenapi: ensure vdi is not too big when resizing down * Cells: Don't allow active -> build * Use Oslo's \`bool\_from\_string\` * Hide lock\_prefix argument using synchronized\_with\_prefix() * Convert to using newly imported processutils * Convert to using oslo's execute() method * Reset migrating task state for MigrationError exceptions * Update to using oslo periodic tasks implementation * Refactor \_run\_instance() to unify control flow * Remove unused exception and variable from scheduler * Import and convert to oslo loopingcall * Remove unnecessary LOG initialisation * Move console scripts to entrypoints * Limit the checks for block device becoming available * Remove print statements * Return proper error message when network conflicts * 'injected\_files' should be base 64 encoded * Add a format\_message method to the Exceptions * set up FakeLogger for root logger * Don't include traceback when wrapping exceptions * Add quotas for fixed ips * Makes safe xml data calls raise 400 http error instead of 500 * Prevent rescue for volume-backed instances * Remove uses of instance['instance\_type'] from nova/compute * Remove parameters containing passwords from Notifications * Standarize ip validation along the code * instance\_info\_cache\_update creates wrongly * don't stack trace if long ints are passed to db * Adds retry on upload\_vhd for xapi glance plugin * Additional tests for safe parsing with minidom * Identify baremetal nodes by UUID * Wait for baremetal deploy inside driver.spawn * Add Nova quantum security group proxy * Use oslo-config-2013.1b4 * Add support for network adapter hotplug * Add a safe\_minidom\_parse\_string function * Default SG rules for the Security Group "Default" * create new cidr type for data storage * clean up missing whitespace after ':' * Canonizes IPv6 before insert it into the db * Allow VIF model to be chosen per image * Check the length of flavor name in "flavor-create" * Fix nova-compute use of missing DBError * Move floating ip db access to calling side * Implement ZooKeeper driver for ServiceGroup API * Remove strcmp\_const\_time * Use oslo database code * Default value of monkey\_patch\_modules is broken * Reimplement is\_valid\_ipv4() * Tweakify is\_valid\_boolstr() * Make system\_metadata update in place * Record instance actions and events * Handle compute node not available for live migration * Fixes "is not" usage * Code cleanup for rebuild block device mapping * validate specified volumes to boot from at the API layer * Make sure there are no unused import * Allow users to specify a tmp location via config * VMware Compute Driver OVF Support * Avoid db lookup in info\_from\_instance() * Add support for Option Groups in LazyPluggable * enforce server\_id can only be uuid or int * Add encryption method using an ssh public key * Create ports in quantum matching hypervisor MAC addresses * Reject user ports that have MACs the hypervisor cannot use * Cells: Add cells API extension * Keep self and context out of error notification payload * Move service\_down\_time to nova.service * fix N402 for rest of nova * New instance\_actions and events table, model, and api * Move global glance opts into nova.image.glance * fix N401 errors, stop ignoring all N4\* errors * Invert test stream capture logic for debugging * Refactor periodic tasks * Timeout individual tests after one minute * Cells: Add the main code * Add helper methods to nova.paths * Move global path opts in nova.paths * Removed unused imports * Enable nova exception format checking in tests * Parameterize database connection in test.py * Move baremetal database tests to fixtures * Add option to make exception format errors fatal * Remove fake\_tests opt from test.py * Add DBDuplicateEntry exception for unique constraint violations * Move TimeOverride to the general reusable-test-helper place * Remove the WillNotSchedule exception * Replace fixtures.DetailStream with fixtures.StringStream * Use testr to run nova unittests * Add general mechanism for testing api coverage * Fixes string formatting error * Move some opts into nova.utils * Properly scope password options * Move monkey patch config opts into nova.utils * Move all temporary files into a single /tmp subdir * Use fixtures library for nova test fixtures * Add agent build API support for list/create/delete/modify agent build * Make policy.json not filesystem location specific * Fix test suite to use MiniDNS * Add pluggable ServiceGroup monitoring APIs * Add SSL support to utils.generate\_glance\_url() * Truncate large console logs in libvirt * Move global fixture setup into nova/test.py * Xenapi: Don't resize down if not auto\_disk\_config * Cells: Re-add DB model and calls * Remove extra space in exception * Use CONF.import\_opt() for nova.config opts * Remove nova.config.CONF * Remove nova.flags * Allow group='foo' in self.flags() for tests * Remove gen\_uuid() * Isolate tests from the environment variable http\_proxy * Remove unused volume exceptions * Add missing exception NetworkDuplicated * Remove custom test assertions * Switch from FLAGS to CONF in tests * Removed two unused imports * Fixes a bug in nova.utils, due to Windows compatibility issues * Make utils.mkfs() set label when fs=swap * Remove out-of-date comment * make utils.mkfs() more general * Use uuidutils.is\_uuid\_like for uuid validation * Switch from FLAGS to CONF in misc modules * Move parse\_args to nova.config * sync deprecated log method from openstack-common * Increased MAC address range to reduce conflicts * Move to a more canonicalized output from qemu-img info * Use testtools as the base testcase class * Make nova-rootwrap optional * Migrate to fileutils and lockutils * Fix Incorrect Exception when metadata is over 255 characters * Remove deprecated root\_helper config * SanISCSIDriver SSH execution fixes * Fix bad Log statement in nova-manage * Move mkfs from libvirt.utils to utils * Dis-associate an auto-assigned floating IP should return proper warning * Proxy floating IP calls to quantum * ip\_protocol for ec2 security groups * Add TestCase.stub\_module to make stubbing modules easier * remove deprecated connection\_type flag * Remove TestCase.assertNotRaises * Fixes error message for flavor-create duplicate ID * Check that an image is active before spawning instances * Restore SIGPIPE default action for subprocesses * Fix marker pagination for /servers * Add lookup by ip via Quantum for metadata service * Adds new volume API extensions * Return 400 if create volume snapshot force parameter is invalid * Backport changes from Cinder to Nova-Volume * Check flavor id on resize * Stop fetch\_ca from throwing IOError exceptions * Stop lock decorator from leaving tempdirs in tests * Use volume driver specific exceptions * Implement paginate query use marker in nova-api * Fix synchronized decorator path cleanup * Fix creation of iscsi targets * Address race condition from concurrent task state update * Add a new exception for live migration * External locking for image caching * Correct utils.execute() to check 0 in check\_exit\_code * Implement project specific flavors API * Move ensure\_tree to utils * Remove unused permitted\_instance\_types * Provide a hint for missing EC2 image ids * Remove unused exceptions from nova/exception.py * Keep the ComputeNode model updated with usage * Makes sure tests don't leave lockfiles around * Revert per-user-quotas * Remove unused imports * Fix spelling typos * Allow nova to guess device if not passed to attach * Adding port attribute in network parameter of boot * Simple checks for instance user data * Config drive v2 * Uniqueness checks for floating ip addresses * Driver for IBM Storwize and SVC storage * Fix invalid exception format strings * Return 409 error if get\_vnc\_console is called before VM is created * Improve external locking on Windows * Solve possible race in semaphor creation * Adds per-user-quotas support for more detailed quotas management * Move root\_helper deprecation warning into execute * Simplify file hashing * Improve external lock implementation * Remove unused imports * Check instance lock in compute/api * Deprecate root\_helper in favor of rootwrap\_config * Clarify TooManyInstances exception message * Setting root passwd no longer fails silently * reduce debugging from utils.trycmd() * Refactor glance image service code * Convert fixed\_ips to using instance\_uuid * Inject instance metadata into xenstore * Fix wrong regex in cleanup\_file\_locks * Ensure 413 response for security group over-quota * Allow NoMoreFloatingIps to bubble up to FaultWrapper * Return 413 status on over-quota in the native API * General-host-aggregates part 1 * Refactor instance\_usage\_audit. Add audit tasklog * Expose over-quota exceptions via native API * Distinguish over-quota for volume size and number * Switch to common logging * Cleanup of image service code * Removes utils.logging\_error (no longer used) * Removes utils.fetch\_file (no longer used) * SM volume driver: DB changes and tests * added deprecated.warn helper method * Replaces functions in utils.py with openstack/common/timeutils.py * Add CPU arch filter scheduler support * Remove unused DB calls * Remove utils.deprecated functions * add unit tests for new virt driver loader * Rename image to image\_id * Convert consoles to use instance uuid * Migrate instance\_metadata to use a uuid to refer to instances * Adds \`disabled\` field for instance-types * fix the instance quota overlimit message * Revert "blueprint " * Unused imports cleanup (folsom-2) * blueprint * convert virt drivers to fully dynamic loading * Eliminate a race condition on instance deletes * Make sure an exception is logged when config file isn't found * Use cfg's new global CONF object * Grammar fixes * Use utils.utcnow rather than datetime.utcnow * Finish quota refactor * Use cfg's new behavior of reset() clearing overrides * Rearchitect quota checking to partially fix bug 938317 * Stop using nova.exception from nova.rpc * Make use of openstack.common.jsonutils * Alphabetize imports * Create an internal key pair API * Use ConfigOpts.find\_file() to find paste config * Remove instance Foreign Key in volumes table, replace with instance\_uuid * Remove old flagfile support * Defer image\_ref update to manager on rebuild * pylint cleanup * Replaces exceptions.Error with NovaException * Remove state altering in live-migration code * Find context arg by type rather than by name * Add instance\_system\_metadata modeling * Use ConfigOpts.find\_file() to locate policy.json * Pass context to notification drivers when we can * Use save\_and\_reraise\_exception() from common * Provide better quota error messages * adjust logging levels for utils.py * Use openstack.common.importutils * Moves \`usage\_from\_instance\` into nova.compute.utils * Add deleted\_at to instance usage notification * Renamed current\_audit\_period function to last\_completed\_audit\_period to clarify its purpose * Remove python-novaclient dependency from nova * Improved localization testing * Log kwargs on a failed String Format Operation * Run tools/hacking.py instead of pep8 mandatory * Delete fixed\_ips when network is deleted * Cleanup xenapi driver logging messages to include instance * Remove nova.rpc.impl\_carrot * fix TypeError with unstarted threads in nova-network * ensure atomic manipulation of libvirt disk images * Add periodic\_fuzzy\_delay option * Implement quota classes * Fixes bug 957708 * Make sqlite in-memory-db usable to unittest * Fix run/terminate race conditions * Workaround issue with greenthreads and lockfiles * various cleanups * Remove Virtual Storage Array (VSA) code * Refix mac change to work around libvirt issue * Add pybasedir and bindir options * Use a high number for our default mac addresses * Add adjustable offset to audit\_period * Clear created attributes when tearing down tests * fix restructuredtext formatting in docstrings that show up in the developer guide * Only raw string literals should be used with \_() * assertRaises(Exception, ...) considered harmful * Fixes for ec2 images * Retry download\_vhd with different glance host each time * Refactor spawn to use UndoManager * Fail gracefully when the db doesn't speak unicode * Add missing format string type on some exception messages * Add missing filters for new root commands * Use constant time string comparisons for auth * Rename zones table to cells and Instance.zone\_name to cell\_name * Add utils.tempdir() context manager for easy temp dirs * Call detach\_volume when attach fails * OS X Support fixed, bug 942352 * Adds temporary chown to sparse\_copy * Clean stale lockfiles on service startup : fixes bug 785955 * blueprint host-aggregates: xenapi implementation * Add exception SnapshotIsBusy to be handled as VolumeIsBusy * Add attaching state for Volumes * Escape apostrophe in utils.xhtml\_escape() (lp#872450) * Partial fix for bug 919051 * Support non-UTC timestamps in changes-since filter * Adding traceback to async faults * Allow file logging config * Removed zones from api and distributed scheduler * Prevent Duplicate VLAN IDs * Remove unnecessary constructors for exceptions * Don't allow EC2 removal of security group in use * Replace ApiError with new exceptions * Standardize logging delaration and use * Retry on network failure for melange GET requests * Extend glance retries to show() as well * Remove the last of the gflags shim layer * Use named logger when available * Removes constraints from instance and volume types * Backslash continuations (misc.) * Fix support for --flagfile argument * Allows nova to read files as root * Re-run nova-manage under sudo if unable to read conffile * Move cfg to nova.openstack.common * blueprint nova-image-cache-management phase1 * Optionally disable file locking * Add support for pluggable l3 backends * lockfile.FileLock already appends .lock * Ties quantum, melange, and nova network model * Fix VPN ping packet length * Remove utils.runthis() * Implementation of new Nova Volume driver for SolidFire ISCSI SAN * Raise 400 if bad kepair data is provided * Refactor away the flags.DEFINE\_\* helpers * blueprint host-aggregates: OSAPI/virt integration, via nova.compute.api * Blueprint xenapi-provider-firewall and Bug #915403 * Create nova cert worker for x509 support * Add nova.exception.InvalidRPCConnectionReuse * KVM and XEN Disk Management Parity * Handle error in associate floating IP (bug 845507) * ComputeNode Capacity support * scheduler host\_manager needs service for filters * Rename 'zone' to 'domain.' * Implements blueprint vnc-console-cleanup * blueprint host-aggregates * Add policy checking to nova.network.api.API * Implement BP untie-nova-network-models * First implementation of bp/live-migration-resource-calc * Add policy checks to Compute.API * catch InstanceInvalidState in more places * Add @utils.deprecated() * Refactors utils.load\_cached\_file * Adds simple policy engine support * Workaround bug 852095 without importing mox * Bug #912858: test\_authors\_up\_to\_date does not deal with capitalized names properly * Adds workaround check for mox in to\_primitive * PEP8 type comparison cleanup * Adds running\_deleted\_instance\_reaper task * PEP8 remove direct type comparisons * Clean up pylint errors in top-level files * Ensure generated passwords meet minimum complexity * Fixing novaclient\_converter NameError * Add an API for associating floating IPs with DNS entries * 'except:' to 'except Exception:' as per HACKING * Add exit\_code, stdout, stderr etc to ProcessExecutionException * Bug#898257 abstract out disk image access methods * Make UUID format checking more correct * Document return type from utils.execute() * Fixes bug 723235 * Update utils.execute so that check\_exit\_code handles booleans. Fixes LP bug #904560 * Fixes bug 887402 * Refactor vm\_state and task\_state checking * Vm state management and error states * Moves find config to utils because it is useful * fixed\_ips by vif does not raise * Log it when we get a lock * Adds network model and network info cache * First steps towards consolidating testing infrastructure * remove duplicate netaddr in nova/utils * Implement resize down for XenAPI * Fix RPC responses to allow None response correctly * removed logic of throwing exception if no floating ip * Fixes bug 888649 * snapshot/backup in compute manager to use uuids * Follow hostname RFCs * Log the URL to an image\_ref and not just the ID * Verify security group parameters * Refactor of QuotaError * More spelling fixes inside of nova * Refactor logging\_error into utils * Exception cleanup in scheduler * exception.KeypairNotFound usage correction * Fixes lp883279 * Log original dropped exception when a new exception occurs * Improve the liveness checking for services * Repartition and resize disk when marked as managed * Remove unused flag\_overrides from TestCase * Xenapi driver can now generate swap from instance\_type * Adds more usage data to Nova's usage notifications * Remove AoE, Clean up volume code * Include original exception in ClassNotFound exception * Enable admin access to EC2 API server * moved floating ip db access and sanity checking from network api into network manager added floating ip get by fixed address added fixed\_ip\_get moved floating ip testing from osapi into the network tests where they belong * Moving admin actions to extension * Snapshots/backups can no longer happen simultaneously. Tests included * Accept message as sole argument to NovaException * Raise InsufficientFreeMemory * Add minDisk and minRam to OSAPI image details * This patch adds flavor filtering, specifically the ability to flavor on minRam, minDisk, or both, per the 1.1 OSAPI spec * Add next links for server lists in OSAPI 1.1. This adds servers\_links to the json responses, and an extra atom:link element to the servers node in the xml response * Update exception.wrap\_exception so that all exceptions (not just Error and NovaException types) get logged correctly * Merging trunk * This patch adds instance progress which is used by the OpenStack API to indicate how far along the current executing action is (BUILD/REBUILD, MIGRATION/RESIZE) * Merging trunk * Fixes lp:855115 -- issue with disassociating floating ips * Renumbering instance progress migration * Fixing tests * Keystone support in Nova across Zones * trunk merge fixup * Adds an 'alternate' link to image views per 3.10 and 3.11 of http://docs.openstack.org/cactus/openstack-compute/developer/openstack-compute-api-1.1/content/LinksReferences.html * Merging trunk * Instance deletions in Openstack are immediate. This can cause data to be lost accidentally * Makes sure ips are moved on the bridge for nodes running dnsmasq so that the gateway ip is always first * clean up based on cerberus review * Remove keystone middlewares * Merged trunk * merged trunk * Merging trunk * merge trunk, fix conflicts * Fixed unit tests with some minor refactoring * merge from trunk * convert images that are not 'raw' to 'raw' during caching to node * Add iptables filter rules for dnsmasq (lp:844935) * merge with trunk r1601 * merged with trunk * Reverted some changes to instance\_get\_all\_by\_filters() that was added in rev 1594. An additional argument for filtering on instance uuids is not needed, as you can add 'uuid: uuid\_list' into the filters dictionary. Just needed to add 'uuid' as an exact\_match\_filter. This restores the filtering to do a single DB query * merged trunk and resolved conflict * Adds the ability to automatically confirm resizes after the \`resize\_confirm\_window\` (0/disabled by default) * PEP8 cleanup * \* Remove the foreign key and backrefs tying vif<->instance \* Update instance filtering to pass ip related filters to the network manager \* move/update tests * Merging trunk * merge with trunk * Merged trunk * merge the sknurt * Fixes the handling of snapshotting in libvirt driver to actually use the proper image type instead of using raw for everything. Also cleans up an unneeded flag. Based on doude's initial work * merge with trunk * Some Linux systems can also be slow to start the guest agent. This branch extends the windows agent timeout to apply to all systems * Fix a bug that would make spawning new instances fail if no port/protocol is given (for rules granting access for other security groups) * Merging trunk * Authorize to start a LXC instance withour, key, network file to inject or metadata * Update the v1.0 rescue admin action and the v1.1 rescue extension to generate 'adminPass'. Fixes an issue where rescue commands were broken on XenServer. lp#838518 * merge the trunks * Fixes libvirt rescue to use the same strategy as xen. Use a new copy of the base image as the rescue image. It leaves the original rescue image flags in, so a hand picked rescue image can still be used if desired * merge the trunks * Merged trunk * I am using iputils-arping package to send arping command. You will need to install this package on the network nodes using apt-get command apt-get install iputils-arping * Removed sudo from the arguments * merge from trunk * Merged trunk * Update exception.wrap\_exception so that all exceptions (not just Error and NovaException types) get logged correctly * trunk merge * updates Exception.NoMoreFixedIps to subclass NovaException instead of Error * NoMoreFixedIps now subclasses NovaException instead of Error * it merges the trunk; or else it gets the conflicts again * This makes the OS api extension for booting from volumes work. The \_get\_view\_builder method was replaced in the parent class, but the BootFromVolume controller was not updated to use the new method * Merged trunk * Adding flavor extra data extension * Merged from trunk and resolved conflicts * Merged trunk * The 1.1 API specifies that two vendor content types are allowed in addition to the standard JSON and XML content types * Adding progress * merge trunk * merge trunks * resolve conflicts / merge with trunk revno 1569 * Fixes an issue where 'invalid literal for int' would occur when listing images after making a v1.1 server snapshot (with a UUID) * merge the trunk * trunk merge * This branch changes XML Serializers and their tests to use lxml.etree instead of minidom * - remove translation of non-recognized attributes to user metadata, now just ignored - ensure all keys are defined in image dictionaries, defaulting to None if glance client doesn't provide one - remove BaseImageService - reorganize some GlanceImageService tests * we're back * merging trunk; resolving conflicts * Some arches dont have dmidecode, check to see if libvirt is capable of running rather getInfo of the arch its running on * fixups * parent merge * bug fixes * merging trunk * trunk merge * When vpn=true in allocate ip, it attempts to allocate the ip that is reserved in the network. Unfortunately fixed\_ip\_associate attempts to ignore reserved ips. This fix allows to filter reserved ip address only when vpn=True * Stock zones follows a fill-first methodology—the current zone is filled with instances before other zones are considered. This adds a flag to nova to select a spread-first methodology. The implementation is simply adding a random.shuffle() prior to sorting the list of potential compute hosts by weights * Pass reboot\_type (either HARD or SOFT) to the virt layers from the API * merging trunk * pull-up from trunk; move spread\_first into base\_scheduler.py * trunk merge * Merged trunk * merged rbp * adds a fake\_network module to tests to generate sensible network info for tests. It does not require using the db * Merged trunk * child zone queries working with keystone now * Added docstring to explain usage of reserved keyword argument * One more bug fix to make zones work in trunk. Basic problem is that in novaclient using the 1.0 OSAPI, servers.create() takes an ipgroups argument, but when using the 1.1 OSAPI, it doesn't, which means booting instances in child zones won't work with OSAPI v1.0. This fix works around that by using keyword arguments for all the arguments after the flavor, and dropping the unused ipgroups argument * Fixes the reroute\_compute decorator in the scheduler API so that it properly: * Fix lp:844155 * Changing a behavior of update\_dhcp() to write out dhcp options file. This option file make dnsmasq offer a default gateway to only NICs of VM belonging to a network that the first NIC of VM belongs to. So, first NIC of VM must be connected to a network that a correct default gateway exists in. By means of this, VM will not get incorrect default gateways * merged trunk * merging trunk * merging trunk * merged trunk * Make weigh\_hosts() return a host per instance, instead of just a list of hosts * Merged trunk * pull-up from trunk * pull-up from trunk * pull-up from trunk * This code contains contains a new NetworkManager class that can leverage Quantum + Melange * create a new exception ZoneRequestError to use for returning errors when zone requests couldn't complete * merge trunk * pull-up from trunk * Fixes a case where if a VIF is returned with a NULL network it might not be able to be deleted. Added test case for that fix * Merged trunk * merged trunk * An AMI image without ramdisk image should start * At present, the os servers.detail api does not return server.user\_id or server.tenant\_id. This is problematic, since the servers.detail api defaults to returning all servers for all users of a tenant, which makes it impossible to tell which user is associated with which server * merged trunk * trunk merge * meeging trunk * Merged trunk * Added list of security groups to the newly added extension (Createserverext) for the Create Server and Get Server detail responses * merged trunk * Fixes a small bug which causes filters to not work at all. Also reworks a bit of exception handling to allow the exception related to the bug to propagate up * Fixed review comments * pull-up from trunk * Merged trunk * Glance can now perform its own authentication/authorization checks when we're using keystone * Resolved conflicts and fixed pep8 errors * trunk merge * pull-up from trunk * - implements changes-since for servers resource - default sort is now created\_at desc for instances * merging trunk * Fixes NotFound exceptions to show the proper instance id in the ec2 api * Accept keypair when you launch a new server. These properties would be stored along with the other server properties in the database (like they are currently for ec2 api) * merge trunk, fix tests * merge trunk * Simple usage extension for nova. Uses db to calculate tenant\_usage for specified time periods * Fix for LP Bug #838251 * merged trunk * Fixed and improved the way instance "states" are set. Instead of relying on solely the power\_state of a VM, there are now explicitly defined VM states and VM task states which respectively define the current state of the VM and the task which is currently being performed by the VM * Implements lp:798876 which is 'switch carrot to kombu'. Leaves carrot as the default for now... decision will be made later to switch the default to kombu after further testing. There's a lot of code duplication between carrot and kombu, but I left it that way in preparation for ripping carrot out later and to keep minimal changes to carrot * Disassociated previously associated floating ips when calling network\_api.associate\_floating\_ip. Also guard against double-association in the network.manager * trunk merge * merged trunk * fix FloatingIpAlreadyInUse to use correct string pattern, convert ApiErrors to 400 responses * merged trunk * The exception 'RamdiskNotFoundForImage' is no longer used * disassociate floating ips before re-associating, and prevent re-association of already associated floating ips in manager * merged trunk * Merged trunk * Adds assertIn and assertNotIn support to TestCase for compatibility with python 2.6 This is a very minimal addition which doesn't require unittest2 * support the extra optional arguments for msg to assertIn and assertNotIn * fix for assertIn and assertNotIn use which was added in python 2.7. this makes things work on 2.6 still * merge trunk * use 'uuid' field in networks table rather than 'bridge'. Specify project\_id when creating instance in unit test * Virtual Storage Array (VSA) feature. - new Virtual Storage Array (VSA) objects / OS API extensions / APIs / CLIs - new schedulers for selecting nodes with particular volume capabilities - new special volume driver - report volume capabilities - some fixes for volume types * merged trunk * merge trunk * merged with rev.1499 * VSA code redesign. Drive types completely replaced by Volume types * merged trunk * Merged trunk * Added: - volume metadata - volume types - volume types extra\_specs * merged trunk * Merged trunk * Once a network is associated with project, I can’t delete this network with ‘nova-manage network delete’. As you know, I can delete network by scrubbing the project with ‘nova-manage project scrub’. However it is too much. The cause of this problem is there is no modify command of network attribute * merged with volume types (based on rev.1490). no code rework yet * merged with volume\_types. no code refactoring yet * merged with nova 1490 * merged trunk * Merged from trunk * The notifiers API was changed to take a list of notifiers. Some people might want to use more than one notifier so hopefully this will be accepted into trunk * merge trunk, fix tests * Fix for trying rebuilds when instance is not active * merged with rev.1485 * Merged trunk * Commit with test data in migration * Merged from trunk * Fix not found exceptions to properly use ec2\_ips for not found * Merged from upstream * I added notifications decorator for each API call using monkey\_patching. By this merge, users can get API call notification from any modules * Fixes bug that causes 400 status code when an instance wasn't attached to a network * Merged from upstream * merging trunk * Removed blank line * Merged with trunk * Fixed typo and docstring and example class name * Merged trunk * This branch does the final tear out of AuthManager from the main code. The NoAuth middlewares (active by default) allow a user to specify any user and project id through headers (os\_api) or access key (ec2\_api) * merge trunk, resolve conflicts, fix tests * Our goal is to add optional parameter to the Create server OS 1.0 and 1.1 API to achieve following objectives:- * Fixes bug 831627 where nova-manage does not exit when given a non-existent network address * initial cut on volume type APIs * Merged from trunk,resolved conflicts and fixed broken unit tests due to changes in the extensions which now include ProjectMapper * Fixed conflict with branch * merged trunk * Added Test Code, doc string, and fixed pip-requiresw * Merged trunk * Merged from upstream * merged trunk * implemented tenant ids to be included in request uris * Upstream merge * Merged trunk * merge with trunk * Adds accessIPv4 and accessIPv6 to servers requests and responses as per the current spec * Fixes utils.to\_primitive (again) to handle modules, builtins and whatever other crap might be hiding in an object * Added OS APIs to associate/disassociate security groups to/from instances * add/remove security groups to/from the servers as server actions * Merged from trunk * Assorted fixes to os-floating-ips to make it play nicely with an in-progress novaclient implementation, as well as some changes to make it more consistent with other os rest apis. Changes include: * Merged trunk * Merged from trunk and fixed review comments * Fixed review comments * Fixed typo * merged trunk * Merged with trunkw * merge from trunk * Added monkey patching notification code function w * Next round of prep for keystone integration * Merged from trunk * Fixes primitive with builtins, modules, etc * merged trunk * merge with trunk * Added uuid column in virtual\_interfaces table, and an OpenStack extension API for virtual interfaces to expose these IDs. Also set this UUID as one of the external IDs in the OVS vif driver * merge * Merged trunk * merged trunk * Currently, rescue/unrescue is only available over the admin API. Non-admin tenants also need to be able to access this functionality. This patch adds rescue functionality over an API extension * Makes all of the binary services launch using the same strategy.  \* Removes helper methods from utils for loading flags and logging  \* Changes service.serve to use Launcher  \* Changes service.wait to actually wait for all the services to exit  \* Changes nova-api to explicitly load flags and logging and use service.serve \* Fixes the annoying IOError when /etc/nova/nova.conf doesn't exist * merged trunk * added volume metadata. Fixed test\_volume\_types\_extra\_specs * merge trunk * Fixes lp828207 * Accept binary user\_data in radix-64 format when you launch a new server using OSAPI. This user\_data would be stored along with the other server properties in the database. Once the VM instance boots you can query for the user-data to do any custom installation of applications/servers or do some specific job like setting up networking route table * Change the call name * merged trunk * Merged with trunk * first cut on types & extra-data (only DB work, no tests) * merge from trunk * Merged trunk * Fixed several logical errors in the scheduling process. Renamed the 'ZoneAwareScheduler' to 'AbstractScheduler', since the zone-specific designation is no longer relevant. Created a BaseScheduler class that has basic filter\_hosts() and weigh\_hosts() capabilities. Moved the filters out of one large file and into a 'filters' subdirectory of nova/scheduler * Merged trunk * merged trunk * Merged with trunk and fixed broken testcases * merged with nova-1450 * Make all services use the same launching strategy * Merged trunk * merge from trunk * Merged trunk * merge trunk * Resolved conflicts and merged with trunk * Added uuid for networks and made changes to the Create server API format to accept network as uuid instead of id * I'm taking Thierry at his word that I should merge early and merge often :) * Allow local\_gb size to be 0. libvirt uses local\_gb as a secondary drive, but XenServer uses it as the root partition's size. Now we support both * Merged trunk * merge from trunk * Use netaddr's subnet features to calculate subnets * merge from trunk * Updated the EC2 metadata controller so that it returns the correct value for instance-type metadata * merge the trunk * Merged with upstream * merge with trunk * Validate the size of VHD files in OVF containers * Merged trunk * Merged trunk * Merged trunk * merge trunk * Adding kvm-block-migration feature * merge trunk, remove \_validate\_cidrs and replace functionality with a double for loop * Merged with trunk * Merged trunk * Add durable flag for rabbit queues * merged trunk * Merged trunk * Added ability too boot VM from install ISO. System detects an image of type iso. Images is streamed to a VDI and mounted to the VM. Blank disk allocated to VM based on instance type * Add source-group filtering * added logic to make the creation of networks (IPv4 only) validation a bit smarter: - detects if the cidr is already in use - detects if any existing smaller networks are within the range of requested cidr(s) - detects if splitting a supernet into # of num\_networks && network\_size will fit - detects if requested cidr(s) are within range of already existing supernet (larger cidr) * Fix v1.1 /servers/ PUT request to match API documentation by returning 200 code and the server data in the body * have NetworkManager generate MAC address and pass it to the driver for plugging. Sets the stage for being able to do duplicate checks on those MACs as well * merge trunk, fix conflict frim dprince's branch to remove hostname from bin/nova-dhcpbridge * merge in trunk, resolving conflicts with ttx's branch to switch from using sudo to run\_as\_root=True * remerge trunk * Merged with trunk and fixed broken unit testcases * merged rev1418 and fixed code so that less than 1G image can be migrated * merge from trunk * merge from trunk * Merged trunk * Allows for a tunable number of SQL connections to be maintained between services and the SQL server using new configuration flags. Only applies when using the MySQLdb dialect in SQLAlchemy * Merged trunk * Merged trunk * merged trunk * Merged with trunk * Support for management of security groups in OS API as a new extension * Merged with trunk * Check compressed image size and PEP8 cleanup * merge from trunk * merged with 1416 * merged trunk * \* Removes rogue direct usage of subprocess module by proper utils.execute calls \* Adds a run\_as\_root parameter to utils.execute, that prefixes your command with FLAG.root\_helper (which defaults to 'sudo') \* Turns all sudo calls into run\_as\_root=True calls \* Update fakes accordingly \* Replaces usage of "sudo -E" and "addl\_env" parameter into passing environment in the command (allows it to be compatible with alternative sudo\_helpers) \* Additionally, forces close\_fds=True on all utils.execute calls, since it's a more secure default * Fixed broken unit testcases * merge from trunk * tenant\_id -> project\_id * These fixes are the result of trolling the pylint violations here * merge trunk * Merged with trunk * Merged with trunk * merge from trunk * merged with nova-1411 * This adds the servers search capabilities defined in the OS API v1.1 spec.. and more for admins * merged trunk * Update the OSAPI v1.1 server 'createImage' and 'createBackup' actions to limit the number of image metadata items based on the configured quota.allowed\_metadata\_items that is set * Rename sudo\_helper FLAG into root\_helper * Initial validation for ec2 security groups name * Command args can be a tuple, convert them to list * Fix usage of sudo -E and addl\_env in dnsmasq/radvd calls, remove addl\_env support, fix fake\_execute allowed kwargs * Use close\_fds by default since it's good for you * Fix ajaxterm's use of shell=True, prevent vmops.py from running its own version of utils.execute * With this branch, boot-from-volume can be marked as completed in some sense. The remaining is minor if any and will be addressed as bug fixes * Added xml schema validation for extensions resources. Added corresponding xml schemas. Added lxml dep, which is needed for doing xml schema validation * Fixing a bug in nova.utils.novadir() * Adds the ability to read/write to a local xenhost config. No changes to the nova codebase; this will be used only by admin tools that have yet to be created * Merged trunk * nova.exception.wrap\_exception will re-raise some exceptions, but in the process of possibly notifying that an exception has occurred, it may clobber the current exception information. nova.utils.to\_primitive in particular (used by the notifier code) will catch and handle an exception clobbering the current exception being handled in wrap\_exception. Eventually when using the bare 'raise', it will attempt to raise None resulting a completely different and unhelpful exception * Import sys as well * Resolve conflicts and fixed broken unit testcases * This branch adds additional capability to the hosts API extension. The new options allow an admin to reboot or shutdown a host. I also added code to hide this extension if the --allow-admin-api is False, as regular users should have no access to host API calls * Adds OS API 1.1 support * another trunk merge * Merged trunk * Merged trunk * merged with 1383 * Updated with code changes on LP * Merged trunk * Save exception and re-raise that instead of depending on thread local exception that may have been clobbered by intermediate processing * Sync trunk * Sync trunk * Added possibility to mark fixed ip like reserved and unreserved * Glance Image Service now understands how to use glance client to paginate through images * Implemented @test.skip\_unless and @test.skip\_if functionality in nova/test.py * merged with 1382 * Updates v1.1 servers/id/action requests to comply with the 1.1 spec * merging trunk * glance image service pagination * trunk merge * Add run\_as\_root parameter to utils.execute, uses new sudo\_helper FLAG to prefix command * Remove spurious direct use of subprocess * Trunk contained PEP8 errors. Fixed * Trunk merge * merged trunk * merged with nova trunk * utilized functools.wraps * tests and merge with trunk * merged trunk * For nova-manage network create cmd, added warning when size of subnet(s) being created are larger than FLAG.network\_size, in attempt to alleviate confusion. For example, currently when 'nova-manage network create foo 192.168.0.0/16', the result is that it creates a 192.168.0.0/24 instead without any indication to why * Remove instances of the "diaper pattern" * There was a recent change to how we should flip FLAGS in tests, but not all tests were fixed. This covers the rest of them. I also added a method to test.UnitTest so that FLAGS.verbose can be set. This removes the need for flags to be imported from a lot of tests * Merged in the power action changes * Fixed rescue/unrescue since the swap changes landed in trunk. Minor refactoring (renaming callback to \_callback since it's not used here) * another merge * Removed temporary debugging raise * Merged trunk * Merged trunk * Added xenhost config get/setting * remove storing original flags verbosity * remove set\_flags\_verbosity.. it's not needed * Merged trunk * Update the OS API servers metadata resource to match the current v1.1 specification - move /servers//meta to /servers//metadata - add PUT /servers//metadata * merged trunk * Sync with latest tests * Moves code restarting instances after compute node reboot from libvirt driver to compute manager; makes start\_guests\_on\_host\_boot flag global * Moved server actions tests to their own test file. Updated stubbing and how flags are set to be in line with how they're supposed to be set in tests * merging trunk * Nova uses instance\_type\_id and flavor\_id interchangeably when they almost always different values. This can often lead to an instance changing instance\_type during migration because the values passed around internally are wrong. This branch changes nova to use instance\_type\_id internally and flavor\_id in the API. This will hopefully avoid confusion in the future * Fixed rescue and unrescue * Conditionals were not actually runing the tests when they were supposed to. Renamed example testcases * Remove instances of the "diaper pattern" * Initial version * switch FLAGS.\* = in tests to self.flags(...) remove unused cases of FLAGS from tests modified test.TestCase's flags() to allow multiple overrides added missing license to test\_rpc\_amqp.py * more cleanup of API tests regarding FLAGS * Merged trunk * Merged trunk * Merged trunk and fixed conflicts to make tests pass * Yet another conflict resolved * merged from trunk * merged from trunk * merge trunk * Resolved pep8 errors * merging trunk * Merged trunk * Fixes lp819523 * Fix for bug #798298 * Merged trunk * Add support for 300 Multiple Choice responses when no version identifier is used in the URI (or no version header is present) * Merged trunk * Glance has been updated for integration with keystone. That means that nova needs to forward the user's credentials (the auth token) when it uses the glance API. This patch, combined with a forth-coming patch for nova\_auth\_token.py in keystone, establishes that for nova itself and for xenapi; other hypervisors will need to set up the appropriate hooks for their use of glance * Added changes from mini server * merge from trunk * merge the trunk * Merged trunk * merged trunk * Merged trunk * Merged from lab * merge from trunk * Moves image creation from POST /images to POST /servers//action * Merged trunk * pull-up from trunk/fix merge conflict * pull-up from trunk * Removing the xenapi\_image\_service flag in favor of image\_service * Merged trunk * merge from trunk * While we currently trap JSON encoding exceptions and bail out, for error notification it's more important that \*some\* form of the message gets out. So, we take complex notification payloads and convert them to something we know can be expressed in JSON * Better error handling for resizing * merged trunk rev1348 * merged with nova trunk * Added @test.skip\_unless and @test.skip\_if functionality. Also created nova/tests/test\_skip\_examples.py to show the skip cases usage * merge trunk, resolve conflict in net/manater.py in favor of vif-plug * initial commit of vif-plugging for network-service interfaces * Merged trunk * merged from trunk * merge with trunk, resolve conflicts * merge from trunk * Resync to trunk * merging * FlavorNotFound already existed, no need to create another exception * Created exceptions for accepting in OSAPI, and handled them appropriately * Merged with trunk * Merged trunk * merging trunk * pull-up from trunk and conflict resolution * merge trunk * Round 1 of changes for keystone integration. \* Modified request context to allow it to hold all of the relevant data from the auth component. \* Pulled out access to AuthManager from as many places as possible \* Massive cleanup of unit tests \* Made the openstack api fakes use fake Authentication by default * pull-up from trunk * merged trunk * This change creates a minimalist API abstraction for the nova/rpc.py code so that it's possible to use other queue mechanisms besides Rabbit and/or AMQP, and even use other drivers for AMQP rather than Rabbit. The change is intended to give the least amount of interference with the rest of the code, fixes several bugs in the tests, and works with the current branch. I also have a small demo driver+server for using 0MQ which I'll submit after this patch is merged * made the whole instance handling thing optional * pull-up from trunk; fix problem obscuring context module with context param; fix conflicts and no-longer-skipped tests * --Stolen from https://code.launchpad.net/~cerberus/nova/lp809909/+merge/68602 * Use the util.import\_object to import a module * merged trunk and fix time call * merge trunk * merged trunk * added instance support to to\_primitive and tests * merge with trunk * Adds XML serialization for servers responses that match the current v1.1 spec * merging trunk * merge trunk * Updated deserialization of POST /servers in the OSAPI to match the latest v1.1 spec * pull-up from trunk * merge trunk * merge from trunk * merge to trunk * merged with nova-1336 * merged trunk * updates handling of arguments in nova-manage network create. updates a few of the arguments to nova-manage and related help. updates nova-manage to raise proper exceptions * Fail silently * merge trunk * Fixed conflict * Merged with trunk and fixed broken unit test cases * merged trunk * Fixes typo in attach volume * merged trunk * added ipv6 requirements to nova-manage network create. changed --network to --fixed\_range\_v4 * updated nova-manage create network. better help, handling of required args, and exceptions. Also updated FLAG flat\_network\_bridge to default to None * add invalid device test and make sure NovaExceptions don't get wrapped * merge from trunk * pull-up from trunk * Makes security group rules with the newer version of the ec2 api and correctly supports boto 2.0 * merging parent branch servers-xml-serialization * merged recent trunk * merge with trunk * Resolved conflicts with trunk * Implements a simplified messaging abstraction with the least amount of impact to the code base * merging parent branch lp:~rackspace-titan/nova/osapi-create-server * Updates to the compute API and manager so that rebuild, reboot, snapshots, and password resets work with the most recent versions of novaclient * merging trunk; resolving conflicts * merged from trunk * merged trunk * merging trunk * pull-up from trunk * Updates /servers requests to follow the v1.1 spec. Except for implementation of uuids replacing ids and access ips both of which are not yet implemented. Also, does not include serialized xml responses * merged trunk * merge from trunk * merged trunk * I'm sorry, for my fail with rebasing. Any way previous branch grew to many other futures, so I supersede it. 1. Used optparse for parsing arg string 2. Added decorator for describe method params 3. Added option for assigning network to certain project. 4. Added field to "network list" for showing which project owns network * Moved the VIF network connectivity logic('ensure\_bridge' and 'ensure\_vlan\_bridge') from the network managers to the virt layer. In addition, VIF driver class is added to allow customized VIF configurations for various types of VIFs and underlying network technologies * merge with trunk, resolve conflicts * fixing merge conflict * merge from trunk * merged with 1320 * merged trunk * This fixes the xml serialization of the /extensions and /extensions/foo resources. Add an ExtensionsXMLSerializer class and corresponding unit tests * merge with trunk, resolve conflicts * Merged with 1306 + fix for dns change * merge with 1305 * Adds ability to set DNS entries on network create. Also allows 2 dns servers per network to be specified * merged trunk * Merged Dan's branch * Merged trunk * merge with trunk, resolve conflicts * merge ryu's branch * change context to maintain exact time, store roles, use ids instead of objects and use a uuid for request\_id * Resolved conflict with trunk * merge trunk * This fixes issues with invalid flavorRef's being passed in returning a 500 instead of a 400, and adds tests to verify that two separate cases work * merge from trunk * Perform fault wrapping in the openstack WSGI controller. This allows us to just raise webob Exceptions in OS API controllers with the appropriate explanations set. This resolves some inconsistencies with exception raising and returning that would cause HTML output to occur when faults weren't being handled correctly * Merged with trunk which includes ha-net changes * Fixes lp813006 * Fixes lp808949 - "resize doesn't work with recent novaclient" * merge with trunk * Merged trunk * Merged lp:~~danwent/nova/network-refactoring * Adds HA networking (multi\_host) option to networks * merge ryu's branch * Merged trunk * merged trunk * network api release\_floating\_ip method will now check to see if an instance is associated to it, prior to releasing * Fixes lp809587 * Merged with trunk * merged from trunk * Merged trunk * Merged with trunk * merged trunk * merged trunk * fixed reviewer's comment. 1. ctxt -> context, 2. erase unnecessary exception message from nova.sccheduler.driver * merged trunk * This change adds the basic boot-from-volume support to the image service * Merged with trunk * pep8'd * Some basic validation for creating ec2 security groups. (LP: #715443) * VSA: first cut. merged with 1279 * Tests passing again * Updated with some changes from manual testing * merging trunk * Adds greater configuration flexibility to rate limiting via api-paste.ini. In particular: * merge with trunk * - Present ip addresses in their actual networks, not just a static public/private - Floating ip addresses are grouped into the networks with their associated fixed ips - Add addresses attribute to server entities * merge with trunk, resolve conflicts * Existing Windows agent behaves differently than the Unix agents and require some workarounds to operate properly. Fixes are going into the Windows agent to make it behave better, but workarounds are needed for compatibility with existing installed base * Merged with trunk and fixed pep errors * merging trunk * pull-up from trunk, while we're at it * Merged with Trunk * Updated responses for GET /images and GET /images/detail to respect the OSAPI v1.1 spec * merge * merge from trunk * Extends the exception.wrap\_exception decorator to optionally send an update to the notification system in the event of a failure * trunk merge * merging trunk * merged branch lp:~rackspace-titan/nova/images-response-formatting * merged trunk * merge with trunk * Starting part of multi-nic support in the guest. Adds the remove\_fixed\_ip code, but is incomplete as it needs the API extension that Vek is working on * merged trunk * fix reviewer's comment * fixed marshalling problem to cast\_compute.. * This doesn't actually fix anything anymore, as the wsgi\_refactor branch from Waldon took care of the issue. However, a couple rescue unit tests would have caught this originally, so I'm proposing this to include those * Merged with Trunk * add optional parameter networks to the Create server OS API * Made xen plugins rpm noarch * Set the proper return code for server delete requests * merging trunk * minor tweaks * Adds an extension which makes add\_fixed\_ip() available through an OpenStack extension * Fix the bug 800759 * pre trunk merge * fix conflict * Fixed up an incorrect key being used to check Zones * merged trunk * Make the instance migration calls available via the API * Merged trunk * image/fake: added teardown method * merge with trunk * pull-up from trunk * pull-up from trunk * Merging issuse * implemented clean-up logic when VM fails to spawn for xenapi back-end * Adds the os-hosts API extension for interacting with hosts while performing maintenance. This differs from the previous merge prop as it uses a RESTful design instead of GET-based actions * stricter zone\_id checking * trunk merge * Merged trunk * Updated the links container for flavors to be compliant with the current spec * merged trunk * Add a socket server responding with an allowing flash socket policy for all requests from flash on port 843 to nova-vncproxy * Pull-up from trunk (post-multi\_nic) * merged trunk * merged trunk * merged trunk * merged trunk * merged trunk * merged trunk * merged trunk * First round of changes for ha-flatdhcp * fixed a bug which prevents suspend/resume after block-migration * after trunk merge * Added the GroupId param to any pertinent security\_group methods that support it in the official AWS API * Fixed the case where an exception was thrown when trying to get a list of flavors via the api yet there were no flavors to list * fix up tests * review fixes * added multi-nic support * trunk merge with migration renumbering * Child Zone Weight adjustment available when adding Child Zones * trunk merge * merge trunk * merged trunk * Windows instances will often take a few minutes setting up the image on first boot and then reboot. We should be more patient for those systems as well check if the domid changes so we can send agent requests to the current domid * - add metadata container to /images/detail and /images/ responses - update xml serialization to encode image entities properly * merging trunk * trunk merge * done and done * phew ... working * compute\_api.get\_all should be able to recurse zones (bug 744217). Also, allow to build more than one instance at once with zone\_aware\_scheduler types. Other cleanups with regards to zone aware scheduler.. * fix issue of recurse\_zones not being converted to bool properly add bool\_from\_str util call add test for bool\_from\_str slight rework of min/max\_count check * merged trunk * pulled in koelkers test changes * merge with trey * Merged trunk * merged trunk, fixed the floating\_ip fixed\_ip exception stupidity * trunk merge * Implement backup with rotation and expose this functionality in the OS API * Merged trunk * adopt merge * moved to wrap\_exception approach * moved migration again & trunk merge * moved to wrap\_exception decorator * Merged trunk * merging trunk * This adds system usage notifications using the notifications framework. These are designed to feed an external billing or similar system that subscribes to the nova feed and does the analysis * Refactored usage generation * Re-worked some of the WSGI and WSGIService code to make launching WSGI services easier, less error prone, and more testable. Added tests for WSGI server, new WSGI loader, and modified integration tests where needed * Merged trunk * pep8 fix * Adds support for "extra specs", additional capability requirements associated with instance types * resync with trunk * remerged trunk * Added floating IP support in OS API * merge with trey * trunk merge, getting fierce. * Merged trunk * Added nova.version to utils.py * Pulled trunk, merged boot from ISO changes * review issues fixed * merge with trunk * Upstream merge * merging trunk; adding error handling around image xml serialization * Fix for bug #788265. Remove created\_at, updated\_at and deleted\_at from instance\_type dict returned by methods in sqlalchemy API * PEP8 fix * Merged from trunk * Add api methods to delete provider firewall rules * Removes the usage of the IPy module in favor of the netaddr module * merged * trunk merged. conflicts resolved * added disassociate method to tests * some tests and refactoring * Trunk merge fixes * Merging trunk * Refactored backup rotate * Merged from trunk * Merged with trunk * Unwind last commit, force anyjson to use our serialization methods * Created Bootstrapper to handle Nova bootstrapping logic * trunk merge * This adds a way to create global firewall blocks that apply to all instances in your nova installation * merge from trunk * proper xml serialization for images * Add xml serialization for all /images//meta and /images//meta/ responses * trunk merge and migration bump * Merged markwash's fixes * Merged trunk * Merged from trunk * This catches the InstanceNotFound exception on create, and ignores it. This prevents errors in the compute log, and causes the server to not be built (it should only get InstanceNotFound if the server was deleted right after being created). This is a temporary fix that should be fixed correctly once no-db-messaging stuff is complete * merging trunk * Adding backup rotation * fix some issues with flags and logging * api/ec2, boot-from-volume: an unit test for describe instances * merge with trunk * trunk merge * merge from trunk * This branch adds support to the xenapi driver for updating the guest agent on creation of a new instance. This ensures that the guest agent is running the latest code before nova starts configuring networking, setting root password or injecting files * merge from trunk * merge with trey * fixed reviewer's comment. 1. adding dest-instance-dir deleting operation to nova.compute.manager, 2. fix invalid raise statement * Merged trunk * added adjust child zone test * tests working again * updated the exceptions around virtual interface creation, updated flatDHCP manager comment * more trunks * another trunk merge * This patch adds support for working with instances by UUID in addition to integer IDs * Merging trunk, fixing conflicts * Cleanup and addition of tests for WSGI server * merged rev trunk 1198 * Introduced Loader concept, for paste decouple * Cleaned up nova-api binary and logging a bit * General cleanup and refactor of a lot of the API/WSGI service code * Adding tests for is\_uuid\_like * Implements a portion of ec2 ebs boot. What's implemented - block\_device\_mapping option for run instance with volume (ephemeral device and no device isn't supported yet) - stop/start instance * updated fixed ip and floating ip exceptions * Merging trunk * renamed VirtualInterface exception and extend NovaException * bunch of docstring changes * Removes nova/image/local.py (LocalImageService) * Increased error message readability for the OpenStack API * merging trunk * Upstream merge * Rename: intance\_type\_metadata -> instance\_type\_extra\_specs * merged trunk * merge with trey * Merged trunk * Prep-work to begin on reroute\_compute * Adding uuid test * merge with nova trunk * The Xen driver supports running instances in PV or HVM modes, but the method it uses to determine which to use is complicated and doesn't work in all cases. The result is that images that need to use HVM mode (such as FreeBSD 64-bit) end up setting a property named 'os' set to 'windows' * Phew ... ok, this is the last dist-scheduler merge before we get into serious testing and minor tweaks. The heavy lifting is largely done * Changed requests with malformed bodies to return a HTTP 400 Bad Request instead of a HTTP 500 error * merged trunk * mp fixes * merged trunk rev 1178 * merge with trey * Created new exception for handling malformed requests Wrote tests Raise httpBadRequest on malformed request bodies * - fixes bug that prevented custom wsgi serialization * merging trunk, fixing pep8 * This fixes the server\_metadata create and update functions that were returning req.body (as a string) instead of body (deserialized body dictionary object). It also adds checks where appropriate to make sure that body is not empty (and return 400 if it is). Tests updated/added where appropriate * merging trunk * trunk merge * merge trunk * block migration feature added * ec2 api method allocate\_address ; raises exception.NoFloatingIpsDefined instead of UnknownError when there aren't any floating ips available * Allows Nova to talk to multiple Glance APIs (without the need for an external load-balancer). Chooses a random Glance API for each request * added new exception more descriptive of not having available floating addresses avail for allocation * trunk merge and ec2 tests fixed * tests working after merge-3 update * Pull-up from multi\_nic * merged koelkers tests branch * Merging trunk * Merged trunk * Fix merge conflict * merged trunk again * Now forwards create instance requests to child zones. Refactored nova.compute.api.create() to support deferred db entry creation * MySQL database tables are currently using the MyISAM engine. Created migration script nova/db/sqlalchemy/migrate\_repo/versions/021\_set\_engine\_mysql\_innodb.py to change all current tables to InnoDB * merged trunk again * Cleaned up some pylint errors * removed network\_info shims in vmops * trunk merge * merge trunk * Cleaned up some of the larger pylint errors. Set to ignore some lines that pylint just couldn't understand * pep8 * Make libvirt snapshotting work with images that don't have an 'architecture' property * run\_instances will check image for 'available' status before attempting to create a new instance * merged from trunk * This branch allows marker and limit parameters to be used on image listing (index and detail) requests. It parses the parameters from the request, and passes it along to the glance\_client, which can now handle these parameters. Essentially all of the logic for the pagination is handled in glance, we just pass along the correct parameters and do some error checking * merge from trunk, resolved conflicts * Update the OSAPI images controller to use 'serverRef' for image create requests * merge with trey * Merged trunk * merge trunk * merge with trunk * This branch removes nwfilter rules when instances are terminated to prevent resource leakage and serious eventual performance degradation. Without this patch, launching instances and restarting nova-compute eventually become very slow * merge with trunk * resolve conflicts with trunk * Update migrate script version to 22 * trunk merge after 2b hit * Distributed Scheduler developer docs * merged trunk again * paramiko is not installed into the venv, but is required by smoketests/base.py. Added paramiko to tools/pip-requires * Changes all uses of utcnow to use the version in utils. This is a simple wrapper for datetime.datetime.utcnow that allows us to use fake values for tests * Set pylint to ignore correct lines that it could not determine were correct, due to the means by which eventlet.green imported subprocess Minimized the number of these lines to ignore * LDAP optimization and fix for one small bug caused huge performance leak. Dashboard's benchmarks showed overall x22 boost in page request completion time * Adds LeastCostScheduler which uses a series of cost functions and associated weights to determine which host to provision to * trunk merge * Merged with trunk * This change set adds the ability to create new servers with an href that points to a server image on any glance server (not only the default one configured). This means you can create a server with imageRef = http://glance1:9292/images/3 and then also create one with imageRef = http://glance2:9292/images/1. Using the old way of passing in an image\_id still works as well, and will use the default configured glance server (imageRef = 3 for instance) * merged trunk * Tests that all exceptions can be raised properly, and fix the couple of instances where they couldn't be constructed due to typos * merge trunk... yay.. * make all uses of utcnow use our testable utils.utcnow * Fixing conflicts * Tests to assure all exceptions can be raised as well as fixing NotAuthorized * This adds the ability to publish nova errors to an error queue * Sudo chown the vbd device to the nova user before streaming data to it. This resolves an issue where nova-compute required 'root' privs to successfully create nodes with connection\_type=xenapi * Bugfix #780784. KeyError when creating custom image * merged from trunk * fix pep8 issue from merge * - move osapi-specific wsgi code from nova/wsgi.py to nova/api/openstack/wsgi.py - refactor wsgi modules to use more object-oriented approach to wsgi request handling: - Resource object steps up to original Controller position - Resource coordinates deserialization, dispatch to controller, serialization - serialization and deserialization broken down to be more testable/flexible * merge from trunk * Merged from trunk * Adds hooks for applying ovs flows when vifs are created and destroyed for XenServer instances * Fixing a bunch of conflicts * Incremented version of migration script to reflect changes in trunk * Basic hook-up to HostFilter and fixed up the passing of InstanceType spec to the scheduler * Resolving conflict and finish test\_images * merge * Merged trunk * Merged trunk and fixed conflicts * added pause/suspend implementation to nova.virt.libvirt\_conn * Added the filtering of image queries with image metadata. This is exposing the filtering functionality recently added to Glance. Attempting to filter using the local image service will be ignored * This enables us to create a new volume from a snapshot with the EC2 api * Add vnc\_keymap flag, enable setting keymap for vnc console and fix bug #782611 * Rebased to trunk rev 1120 * trunk merge * Cleaned up text conflict * pep8 fixes * Handle the case when a v1.0 api tries to list servers that contain image hrefs * merge trunk * merge from trunk * This adds a volume snapshot support with the EC2 api * Fixing pep8 problems * Adding accessor methods for instance type metadata * trunk merge * Adds the ability to make a call that returns multiple times (a call returning a generator). This is also based on the work in rpc-improvements + a bunch of fixes Vish and I worked through to get all the tests to pass so the code is a bit all over the place * merge with dietz * Virt tests passing while assuming the old style single nics * merge trunk * Essentially adds support for wiring up a swap disk when building * Merged trunk * branch 2a merge (including trunk) * trunk merge * merging trunk * merge with dietz * Renamed image\_ref variables to image\_href. Since the convention is that x\_ref vars may imply that they are db objects * Added test skipper class * cleanup the code for merging * lots of fixes for rpc and extra imports * almost everything working with fake\_rabbit * merge with dietz * Fixing divergence * Merged trunk * Fixed the mistyped line referred to in bug 787023 * Merged trunk and resolved conflicts * Merged with trunk * Several changes designed to bring the openstack api 1.1 closer to spec - add ram limits to the nova compute quotas - enable injected file limits and injected file size limits to be overridden in the quota database table - expose quota limits as absolute limits in the openstack api 1.1 limits resource - add support for controlling 'unlimited' quotas to nova-manage * During the API create call, the API would kick off a build and then loop in a greenthread waiting for the scheduler to pick a host for the instance. After API would see a host was picked, it would cast to the compute node's set\_admin\_password method * Merged upstream * merged trunk * Merged trunk * Created new libvirt directory, moved libvirt\_conn.py to libvirt/connection.py, moved libvirt templates, broke out firewall and network utilities * merge against 2a * trunk merge * New tests added * merged recent trunk * merged recent trunk * eventlet.spawn\_n() expects the function and arguments, but it expects the arguments unpacked since it uses \*args * merge with trey * merge trunk * moved auto assign floating ip functionality from compute manager to network manager * Fixes some minor doc issues - misspelled flags in zones doc and also adds zones doc to an index for easier findability * Synchronise with Diablo development * zone1 merge * merged from trunk * Renaming service\_image\_id vars to image\_id to reduce confusion. Also some minor cleanup * get rid of all mention of drivers ... it's filter only now * merge trunk * merge branch lp:~rackspace-titan/nova/ram-limits * Rebased to trunk rev 1101 * merge from trunk * moved utils functions into nova/image/ * Trunk merge * Fix bug #744150 by starting nova-api on an unused port * Removing utils.is\_int() * merge trunk * merging trunk * Merged with trunk * print information about nova-manage project problems * merge from trunk * This is the groundwork for the upcoming distributed scheduler changes. Nothing is actually wired up here, so it shouldn't break any existing code (and all tests pass) * Merging trunk * Get rid of old virt/images.py functions that are no longer needed. Checked for any loose calls to these functions and found none. All tests pass for me * Update OSAPI v1.1 extensions so that it supports RequestExtensions. ResponseExtensions were removed since the new RequestExtension covers both use cases. This branch also removes some of the odd serialization code in the RequestExtensionController that converted dictionary objects into webob objects. RequestExtension handlers should now always return proper webob objects * foo * Fixed some tests * merge with trunk * Added an EC2 API endpoint that'll allow import of public key. Prior, api only allowed generation of new keys * Add new flag 'max\_kernel\_ramdisk\_size' to specify a maximum size of kernel or ramdisk so we don't copy large files to dom0 and fill up /boot/guest * Merged with trunk * merge from trunk * Merged trunk and resolved horrible horrible conflicts * Merging trunk * minor cleanup, plus had to merge because of diverged-branches issue * merge from trunk * merge lp:nova * default to port 80 if it isnt in the href/uri * skeleton of forwarding calls to child zones * merge trunk * Adding FlagNotSet exception * Implements a basic mechanism for pushing notifications out to interested parties. The rationale for implementing notifications this way is that the responsibility for them shouldn't fall to Nova. As such, we simply will be pushing messages to a queue where another worker entirely can be written to push messages around to subscribers * Merging trunk * fix pep8 issues * fixed QuotaTestCases * fixed ComputeTestCase tests * made ImageControllerWithGlanceServiceTests pass * get integrated server\_tests passing * Removed all utils.import\_object(FLAGS.image\_service) and replaced with utils.get\_default\_image\_service() * added is\_int function to utils * Pep8 fixes * updates to utils methods, initial usage in images.py * added util functions to get image service * Adding fill first cost function * Fixes the naming of the server\_management\_url in auth and tests * Merging in Sandy's changes adding Noop Cost Fn with tests * merged trunk * Fixes improper attribute naming around instance types that broke Resizes * Convert instance\_type\_ids in the instances table from strings to integers to enable joins with instance\_types. This in particular fixes a problem when using postgresql * merge lp:nova * Re-pull changed notification branch * failure conditions are being sent back properly now * Migrate quota schema from hardcoded columns to a key-value approach. The hope is that this change would make it easier to change the quota system without future schema changes. It also adds the concept of quotas that are unlimited * Added missing flavorRef and imageRef checks in the os api xml deserialization code along with tests * This branch splits out the IPv6 address generation into pluggable backends. A new flag named ipv6\_backend specifies which backend to use * Review changes and merge from trunk * merge trunk * Adds proper error handling for images that can't be found and a test for deregister image * Merging in trunk * I'm assuming that openstack doesnt work with python < 2.6 here (which I read somewhere on the wiki). This patch will check to make sure python >= 2.6 is installed, and also allow it to work with python 2.7 (and greater in the future) * merge lp:nova * XenAPI was not implemented to allow for multiple simultaneous XenAPI requests. A single XenAPIConnection (and thus XenAPISession) is used for all queries. XenAPISession's wait\_for\_task method would set a self.loop = for looping calls to \_poll\_task until task completion. Subsequent (parallel) calls to wait\_for\_task for another query would overwrite this. XenAPISession.\_poll\_task was pulled into the XenAPISession.wait\_for\_task method to avoid having to store self.loop * Merged trunk * Merging in Sandy's changes * volume/driver: implement basic snapshot * merge trunk * trunk merge * merge trunk * merged from trunk * if a LoopingCall has canceled the loop, break out early instead of sleeping any more than needed * merged from trunk * merged from trunk * merge from trunk * 1 Set default paths for nova.conf and api-paste.ini to /etc/nova/ 2 Changed countryName policy because https://bugs.launchpad.net/nova/+bug/724317 still affected * merge from trunk and update .mailmap file * Merged trunk * Fixes an issue with conversion of images that was introduced by exception refactoring. This makes the exceptions when trying to locate an ec2 id clearer and also adds some tests for the conversion methods * make sure proper exceptions are raised for ec2 id conversion and add tests * merge trunk * Merged trunk * Abstract out IPv6 address generation to pluggable backends * Merged trunk * extracted xenserver capability reporting from dabo's dist-scheduler branch and added tests * Enable RightAWS style signature checking using server\_string without port number, add test cases for authenticate() and a new helper routine, and fix lp753660 * Set root password upon XenServer instance creation * trunk merge * fix mismerge by 1059 * volume/driver: implement basic snapshot/clone * Host Filtering for Distributed Scheduler (done before weighing) * Rebased to trunk rev 1057 * merge from trunk * Simple fix for this issue. Tries to raise an exception passing in a variable that doesn't exist, which causes an error * Merged trunk * merge from trunk * Sanitize get\_console\_output results. See bug #758054 * Merged trunk * terminology: no more plug-ins or queries. They are host filters and drivers * merge prop fixes * merge with trunk * merge from trunk * Merged with current trunk * Merged trunk * tests and better driver loading * Adding OSAPI v1.1 limits resource * Adding support for server rebuild to v1.0 and v1.1 of the Openstack API * looking for default flagfile * merging trunk * merging trunk * Merged trunk * Merged trunk * ensure create image conforms to OS API 1.1 spec * merge updates from trunk * merged from trunk * merging trunk; resolving conflicts; fixing issue with ApiError test failing since r1043 * Implement get\_host\_ip\_addr in the libvirt compute driver * merging trunk; resolving conflicts * ApiError 'code' arg set to None, and will only display a 'code' as part of the str if specified * merging trunk * Final cleanup of nova/exceptions.py in my series of refactoring branches * Uses memcached to cache roles so that ldap is actually usable * Rebased to trunk rev 1035 * Added more unit-test for multi-nic-nova libvirt * further cleanup of nova/exceptions.py * merge with trunk and resolve conflicts * Refactoring usage of nova.exception.NotFound * merging trunk * Refactoring the usage of nova.exception.Duplicate * Rebased to trunk rev 1030 * merged from trunk * merging trunk * Merged trunk and fixed simple exception conflict * merging trunk * Refactoring nova.exception.Invalid usage * Use runtime XML instead of VM creation time XML for createXML() call in order to ensure volumes are attached after RebootInstances as a workaround, and fix bug #747922 * Rebased to trunk rev 1027, and resolved a conflict in nova/virt/libvirt\_conn.py * Rebased to trunk rev 1027 * clarifies error when trying to add duplicate instance\_type names or flavorids via nova-manage instance\_type * merge trunk * Rework completed. Added test cases, changed helper method name, etc * merge trunk, resolved conflict * merge trunk * Provide option of auto assigning floating ip to each instance. Depend on auto\_assign\_floating\_ip boolean flag value. False by default * Restore volume state on migration failure to fix lp742256 * Fixes cloudpipe to get the proper ip address * merging trunk * Fix bug with content-type and small OpenStack API actions refactor * merge with trunk * merge trunk * merged trunk * Merged trunk and fixed api servers conflict * Addressing exception.NotFound across the project * Fixed network\_info creation in libvirt driver. Now creating same dict as in xenapi driver * rebase trunk * Rebased to trunk rev 1015 * Utility method reworked, etc * Docstring cleanup and formatting (nova dir). Minor style fixes as well * merge trunk * cleanups per code review * docstring cleanup, nova dir * merge with trunk * Rebased to trunk rev 1005 * Merged trunk * trunk merged * merging lp:~rackspace-titan/nova/exceptions-refactor-invalid * Round 1 of pylint cleanup * Implement quotas for the new v1.1 server metadata controller * pep8 fix * refactoring usage of exception.Duplicate errors * merging lp:~rackspace-titan/nova/exceptions-refactor-invalid * one last i18n string * Merged trunk * multi-line string spacing * moving dynamic i18n to static * Add support for creating a snapshot of a nova volume with euca-create-snapshot * Add support for creating a snapshot of a nova volume with euca-create-snapshot * trunk merged * use 'is not None' instead of '!= None' * Support admin password when specified in server create requests * merge lp:nova and resolve conflicts * use 'is not None' instead of '!= None' * trunk merged * Rebased to trunk rev 995 * Rebased to trunk rev 995 * merge trunk * trunk merged. conflict resolved * Add additional logging for WSGI and OpenStack API authentication * Merged trunk * merging trunk * adding documentation & error handling * correcting tests; pep8 * initial roundup of all 'exception.Invalid' cases * Updated following to RIck's comments * Blushed up a little bit * Merged lp:~rackspace-titan/nova/server\_metadata\_quotas as a prereq * Merged trunk * Merged trunk * merge trunk * Rebase to trunk rev 937 * merge trunk * Rebased to trunk rev 973 * merge trunk * resolved lazy\_match conflict between bin/nova-manage instance and instance\_type by moving instance subcommand under vm command. documented vm command in man page. removed unused instance\_id from vm list subcommand * Rebased to trunk rev 971 * Reabased to trunk rev 971 * There is a race condition when a VDI is mounted and the device node is created. Sometimes (depending on the configuration of the Linux distribution) nova loses the race and will try to open the block device before it has been created in /dev * merge trunk * removes log command from nova-manage as it no longer worked in multi-log setup * corrects incorrect openstack api responses for metadata (numeric/string conversion issue) and image format status (not uppercase) * Implement a mechanism to enforce a configurable quota limit for image metadata (properties) within the OS API image metadata controller * merge trunk * merge trunk * Fixes issues with describe instances due to improperly set metadata * Added support for listing addresses of a server in the openstack api. Now you can GET \* /servers/1/ips \* /servers/1/ips/public \* /servers/1/ips/private Supports v1.0 json and xml. Added corresponding tests * This fixes how the metadata and addresses collections are serialized in xml responses * merged trunk * merged trunk and resolved conflict * Update instances table to use instance\_type\_id instead of the old instance\_type column which represented the name (ex: m1.small) of an instance type * Remove and from AllocateAddress response, and fix bug #751176 * Blush up a bit * Rebased to trunk rev 949 * Rebased to trunk rev 949 * pep8 cleanup * ApiError code should default to None, and will only display a code if one exists. Prior was output an 'ApiError: ApiError: error message' string, which is confusing * merged trunk * Merged trunk * Support providing an XML namespace on the XML output from the OpenStack API * Merged with trunk, fixed up test that wasn't checking namespace * Enable RightAWS style signing on server\_string without port number portion * Improved unit tests Fixed docstring formatting * Only create ca\_path directory if it does not already exist * merged trunk * merged trunk * Moved 'name' from to , corrected and fixes bug # 750482 * Rebased to trunk 942 * merge trunk * merging trunk * Declares the flag for vncproxy\_topic in compute.api * fixes incorrect case of OpenStack API status response * merge trunk * Added synchronize\_session parameter to a query in fixed\_ip\_disassociate\_all\_by\_timeout() and fix #735974 * Rebased to trunk 930 * merge trunk * Add a change password action to /servers in openstack api v1.1, and associated tests * merge lp:nova * Merged with Waldon * adding 'building' power state; testing for 409 from OSAPI when rebuild requested on server being rebuild * Rebased to trunk rev 925 * Merged with trunk (after faults change to return correct content-type) * OpenStack API faults have been changed to now return the appropriated Content-Type header * Merged with trunk * Merged with trunk * Merged trunk * merge trunk * merged trunk * The VNC Proxy is an OpenStack component that allows users of Nova to access their instances through a websocket enabled browser (like Google Chrome) * Support for volumes in the OpenStack API * Merged with trunk * This branch adds support for linux containers (LXC) to nova. It uses the libvirt LXC driver to start and stop the instance * Glance used to return None when a date field wasn't set, now it returns ''. Glance used to return dates in format "%Y-%m-%dT%H:%M:%S", now it returns "%Y-%m-%dT%H:%M:%S.%f" * Adds support for versioned requests on /images through the OpenStack API * Merged trunk * Added VLAN networking support for XenAPI * Merged with trunk * Merged trunk * merge trunk * merged from trunk * merge lp:nova * merge trunk * merge trunk * Merged trunk * merge with trunk * merge lp:nova * Mixins for tests confuse pylint no end, and aren't necessary... you can stop the base-class from being run as a test by prefixing the class name with an underscore * Merged with trunk * merge trunk * merge trunk, fixed conflicts * merge trunk addressing Trey's comments * Merged with trunk, resolved conflicts & code-flicts * merged trunk * merge trunk * merge lp:nova * Adding links container to openstack api v1.1 servers entities * Merged trunk * Merged trunk * merging trunk * merge trunk * Merged trunk and fixed broken/conflicted tests * - add a "links" container to versions entities for Openstack API v1.1 - add testing for the openstack api versions resource and create a view builder * merging trunk * This is basic network injection for XenServer, and includes: * merging trunk * Implement image metadata controller for the v1.1 OS API * merging trunk * merging trunk, resolving conflicts * Add a "links" container to flavors entities for Openstack API v1.1 * merge trunk * merge trunk * merging trunk and resolving conflicts * Implement metadata resource for Openstack API v1.1. Includes: -GET /servers/id/meta -POST /servers/id/meta -GET /servers/id/meta/key -PUT /servers/id/meta/key -DELETE /servers/id/meta/key * merge trunk, add unit test * merge trunk * merge trunk addressing reviewer's comments * Support for markers for pagination as defined in the 1.1 spec * merge trunk * Ports the Tornado version of an S3 server to eventlet and wsgi, first step in deprecating the twistd-based objectstore * Merged with trunk Updated net injection for xenapi reflecting recent changes for libvirt * Support for markers for pagination as defined in the 1.1 spec * port the objectstore tests to the new tests * update test base class to monkey patch wsgi * merge trunk * Implementation of blueprint hypervisor-vmware-vsphere-support. (Link to blueprint: https://blueprints.launchpad.net/nova/+spec/hypervisor-vmware-vsphere-support) * Adds serverId to OpenStack API image detail per related\_image blueprint * Implement API extensions for the Openstack API. Based on the Openstack 1.1 API the following types of extensions are supported: * Merging trunk * Adds unit test coverage for XenAPI Rescue & Unrescue * libvirt driver multi\_nic support. In this phase libvirt can work with and without multi\_nic support, as in multi\_nic support for xenapi: https://code.launchpad.net/~tr3buchet/nova/xs\_multi\_nic/+merge/53458 * Merging trunk * Merged trunk * style and spacing fixed * Merged with trunk, fix problem with behaviour of (fake) virt driver when instance doesn't reach scheduling * In this branch we are forwarding incoming requests to child zones when the requested resource is not found in the current zone * trunk merge * Fixes a bug that was causing tests to fail on OS X by ensuring that greenthread sleep is called during retry loops * Merged trunk * Fix some errors that pylint found in nova/api/openstack/servers.py * Merged trunk * Pylint 'Undefined variable' E0602 error fixes * Merged with trunk * Merged trunk and resolved conflict in nova/db/sqlalchemy/api.py * Merged with trunk * Aggregates capabilities from Compute, Network, Volume to the ZoneManager in Scheduler * merged trunk r864 * merging trunk r864 * trunk merged. conflicts resolved * Merged trunk * merge trunk * Small refactor * merging trunk r863 * Merged trunk * trunk merge * merge trunk * merge trunk * Pass a fake timing source to live\_migration\_pre in every test that expectes it to fail, shaving off a whole minute of test run time * merge trunk * Poll instance states periodically, so that we can detect when something changes 'behind the scenes' * Merged with conflict and resolved conflict (with my own patch, no less) * Merged with trunk * Added a mechanism for versioned controllers for openstack api versions 1.0/1.1. Create servers in the 1.1 api now supports imageRef/flavorRef instead of imageId/flavorId * Merged trunk * Offers the ability to run a periodic\_task that sweeps through rescued instances older than 24 hours and forcibly unrescues them * Merged trunk * merge trunk * Merged with lp:nova, fixed conflicts * Move all types of locking into utils.synchronize decorator * Better method name * small fix * Added docstring * Updates the previously merged xs\_migration functionality to allow upsizing of the RAM and disk quotas for a XenServer instance * Fix lp735636 by standardizing the format of image timestamp properties as datetime objects * migration gateway\_v6 to network\_info * fix utils.execute retries for osx * Merged trunk * Automatically unrescue instances after a given timeout * trunk merge * trunk merged * Merged trunk * Merged trunk * merged with trunk Updated xenapi network injection for IPv6 Updated unit tests * merge trunk * merge trunk * Merging trunk * Merged with lp:nova * Merged with lp:nova * Filtering images by user\_id now * Added space in between # and TODO in #TODO * Enable flat manager support for ipv6 * Adding a talk bubble to the nova.openstack.org site that points readers to the 2011.1 site and the docs.openstack.org site - similar to the swift.openstack.org site. I believe it helps people see more sites are available, plus they can get to the Bexar site if they want to. Going forward it'll be nice to use this talk bubble to point people to the trunk site from released sites * Test the login behavior of the OpenStack API. Uncovered bug732866 * trunk merge * Renamed check\_instance -> check\_isinstance to make intent clearer * Pep8 fix * Merging trunk * Adding BASE\_IMAGE\_ATTRS to ImageService * Resolved conflicts * Remove unused global semaphore * Addressed reviewer's comments * Merged trunk * merging trunk r843 * merging trunk r843 * merging trunk r843 * Make synchronized decorator not leak semaphores, at the expense of not being truly thread safe (but safe enough for Eventlet style green threads) * merge trunk * Make synchronized support both external (file based) locks as well as internal (semaphore based) locks. Attempt to make it native thread safe at the expense of never cleaning up semaphores * merge with trunk * xenapi support for multi\_nic. This is a phase of multi\_nic which allows xenapi to work as is and with multi\_nic. The other virt driver(s) need to be updated with the same support * merge lp:nova * wrap and log errors getting image ids from local image store * merge lp:nova * merging trunk * Provide more useful exception messages when unable to load the virtual driver * Openstack api 1.0 flavors resource now implemented to match the spec * merging trunk r837 * zones3 and trunk merge * trunk merge * merge with trunk * merge trunk * merge trunk * merge trunk * fixes nova-manage instance\_type compatibility with postgres db * Make smoketests' exit code reveal whether they were succesful * merge trunk * Merged trunk * merge lp:nova * merge trunk * Cleanup of FakeAuthManager * Re-implementation (or just implementation in many cases) of Limits in the OpenStack API. Limits is now available through /limits and the concept of a limit has been extended to include arbitrary regex / http verb combinations along with correct XML/JSON serialization. Tests included * merge with trunk * Merged trunk * merge trunk * merge trunk * merge trunk * Make "ApiError" the default error code for ApiError instances, rather than "Unknown." * Merged trunk * Merged dependant branch lp:~rackspace-titan/nova/openstack-api-versioned-controllers * refactored out middleware, now it's a decorator on service.api * Fix a couple of things that assume that libvirt == kvm/qemu * fix for lp712982, and likely a variety of other dashboard error handling issues. This fix simply causes the default error code for ApiError to be 'ApiError' rather than 'Unknown', which makes dashboard handle the error gracefully, and makes euca error output slightly prettier * Make utils.execute not overwrite std{in,out,err} args to Popen on retries. Make utils.execute reject unknown kwargs * merged trunk, merged qos, slight refactor regarding merges * - general approach for openstack api versioning - openstack api version now preserved in request context - added view builder classes to handle os api responses - added imageRef and flavorRef to os api v1.1 servers - modified addresses container structure in os api v1.1 servers * merge * Add support for network QoS (ratelimiting) for XenServer. Rate is pulled from the flavor (instance\_type) when constructing a vm * Improved exception handling * merging parent branch lp:~bcwaldon/nova/osapi-flavors-1\_1 * merging parent branch lp:~rackspace-titan/nova/openstack-api-version-split * merged trunk * merge trunk * Merged trunk * merge with trunk. moved scheduler\_manager into manager. fixed tests * Remerge trunk * moved scheduler API check into db.api decorator * MErge trunk * Log the use of utils.synchronized * Fix lp727225 by adding support for personality files to the openstack api * merge lp:nova and resolve conflicts * Merging trunk * Don't generate insecure passwords where it's easy to use urandom instead * merge trunk * merge trunk * added new class Instances for managaging instances added new method list in class Instances: * first pass openstack redirect working * Merged with trunk (and brian's previous fixes to fake auth) * Add logging to lock check * Merged trunk * Use random.SystemRandom for easy secure randoms, configurable symbol set by default including mixed-case * merge lp:nova * Fixed bugs in bug fix (plugin call) * exception fixup * merged with trunk and removed conflicts * Merging trunk * Merged with trunk. Had to hold bazaar's hand as it got lost again * Clarify the logic in using 32 symbols * Don't generate insecure passwords where it's easy to use urandom instead * Fixing API per spec, to get unit-tests to pass * merge trunk * Initial implementation of refresh instance states * Adding instance\_id as Glance image\_property * removed conflicts and merged with trunk * committing to share * NTT's live-migration branch, merged with trunk, conflicts resolved, and migrate file renamed * merge trunk * merge trunk * Make nova-dhcpbridge output lease information in dnsmasq's leasesfile format * Merged my doc changes with trunk * Make utils.execute not overwrite std{in,out,err} args to Popen on retries. Make utils.execute reject unknown kwargs * merge trunk * Merged with trunk * merged with latest trunk and removed unwanted files * Use a consistent naming scheme for XenAPI variables * fixed conflicts after merging with trunk with 787 * Replace raw SQL calls through session.execute() with SQLAlchemy code * Merged trunk * This change adds the ability to boot Windows and Linux instances in XenServer using different sets of vm-params * merge trunk * Changes the output of status in describe\_volumes from showing the user as the owner of the volume to showing the project as the owner * merge trunk * Adds in multi-tenant support to openstack api. Allows for multiple accounts (projects) with admin api for creating accounts & users * remerge trunk (again). fix issues caused by changes to deserialization calls on controllers * merge from trunk.. * Merged trunk * merge trunk * merge lp:nova * merge trunk * Add a new IptablesManager that takes care of all uses of iptables * PEP8 * merge lp:nova * merge trunk * Introduces the ZoneManager to the Scheduler which polls the child zones and caches their availability and capabilities * merge trunk * merge lp:nova and add stub image service to quota tests as needed * merged to trunk rev781 * Modifies S3ImageService to wrap LocalImageService or GlanceImageService. It now pulls the parts out of s3, decrypts them locally, and sends them to the underlying service. It includes various fixes for image/glance.py, image/local.py and the tests * merged trunk * Merged trunk * merge lp:nova * merge, resolve conflicts, and update to reflect new standard deserialization function signature * Fixes doc build after execvp patch * - Content-Type and Accept headers handled properly - Content-Type added to responses - Query extensions no long cause computeFaults - adding wsgi.Request object - removing request-specific code from wsgi.Serializer * Fixes bug 726359. Passes unit tests * merge lp:nova, fix conflicts, fix tests * merge lp:nova and resolve conflicts * Update the create server call in the Openstack API so that it generates an 'adminPass' and calls set\_admin\_password in the compute API. This gets us closer to parity with the Cloud Servers v1.0 spec * Merged trunk * execvp passes pep8 * merge trunk * Add a decorator that lets you synchronise actions across multiple binaries. Like, say, ensuring that only one worker manipulates iptables at a time * merge lp:nova * Fixes bug #729400. Invalid values for offset and limit params in http requests now return a 400 response with a useful message in the body. Also added and updated tests * Fixes uses of process\_input * merged trunk r771 * remerge trunk * merge lp:nova and resolve conflicts * merge trunk * Merged with trunk Updated exception handling according to spawn refactoring * execvp: unit tests pass * merged to trunk rev 769 * execvp: almost passes tests * Refactoring nova-api to be a service, so that we can reuse it in unit tests * merge trunk * Fixes lp730960 - mangled instance creation in virt drivers due to improper merge conflict resolution * Use disk\_format and container\_format in place of image type * Merging trunk * Fix the bug where fakerabbit is doing a sort of prefix matching on the AMQP routing key * merge trunk * merged trunk * Remerged trunk. fixed conflict * This fix is an updated version of Todd's lp720157. Adds SignatureVersion checking for Amazon EC2 API requests, and resolves bug #720157 * execvp * Merged trunk * Log failed command execution if there are more retry attempts left * Implementation for XenServer migrations. There are several places for optimization but I based the current implementation on the chance scheduler just to be safe. Beyond that, a few features are missing, such as ensuring the IP address is transferred along with the migrated instance. This will be added in a subsequent patch. Finally, everything is implemented through the Openstack API resize hooks, but actual resizing of the instance RAM and hard drive space is not yet implemented * Merged with current trunk * Resolving excess conflicts due to criss-cross in branch history * Rebased to nova revision 761 * \* Updated readme file with installation of suds-0.4 through easy\_install. \* Removed pass functions \* Fixed pep8 errors \* Few bug fixes and other commits * merged trunk * merge trunk * remove ensure\_b64\_encoding * Merged to trunk rev 759 * Merged trunk rev 758 * merge lp:nova * Refactor wsgi.Serializer away from handling Requests directly; now require Content-Type in all requests; fix tests according to new code * Merged with Trunk * This fix changes a tag contained in the DescribeKeyPairs response from to so that Amazon EC2 access libraries which does more strict syntax checking can work with Nova * Remerged trunk, fixed a few conflicts * merged to trunk rev757 * Merged to rev 757 * merges dynamic instance types blueprint (http://wiki.openstack.org/ConfigureInstanceTypesDynamically) and bundles blueprint (https://blueprints.launchpad.net/nova/+spec/flavors) * merged trunk * Very simple change checking for < 0 values in "limit" and "offset" GET parameters. If either are negative, raise a HTTPBadRequest exception. Relevant tests included * Fixes Bug #715424: nova-manage : create network crashes when subnet range provided is not enough , if the network range cannot fit the parameters passed, a ValueError is raised * Provide the ability to rescue and unrescue a XenServer instance * merged trunk * merging trunk * Merged trunk * Fixed pep8 issues, applied jaypipes suggestion * Rebased to nova revision 752 * Use functools.wraps to make sure wrapped method's metadata (docstring and name) doesn't get mangled * merge from trunk * Merged trunk * merged to trunk rev 752 * Rebased at lp:nova 759 * 1. merged trunk rev749 2. rpc.call returns '/' as '\/', so nova.compute.manager.mktmpfile, nova.compute.manager.confirm.tmpfile, nova.scheduler.driver.Scheduler.mounted\_on\_same\_shared\_storage are modified followed by this changes. 3. nova.tests.test\_virt.py is modified so that other teams modification is easily detected since other team is using nova.db.sqlalchemy.models.ComputeService * This branch implements the openstack-api-hostid blueprint: "Openstack API support for hostId" * replaced ugly INSTANCE\_TYPE constant with (slightly less ugly) stubs * Add a lock\_path flag for lock files * merge trunk * Adds VHD build support for XenServer driver * Merging trunk to my branch. Fixed a conflict in servers.py * Merging trunk * 1) merge trunk 2) removed preconfigure\_xenstore 3) added jkey for broadcast address in inject\_network\_info 4) added 2 flags: 4.1) xenapi\_inject\_image (default True) This flag allows for turning off data injection by mounting the image in the VDI (agreed with Trey Morris) 4.2) xenapi\_agent\_path (default /usr/bin/xe-update-networking) This flag specifies the path where the agent should be located. It makes sense only if the above flag is True. If the agent is found, data injection is not performed * merge trunk * Add utils.synchronized decorator to allow for synchronising method entrance across multiple workers on the same host * execute: shell=True removed * Rebased to Nova revision 749 * merge with zones2 fixes and trunk * trunk merge * trunk merge, pip-requires and novatools to novaclient changes * Fixes FlatDHCP by making it inherit from NetworkManager and moving some methods around * merged trunk * Merged trunk * Merging trunk, conflicts fixed * Rebased at lp:nova 740 * merged with trunk * Merging trunk, small fixes * Make tests start with a clean database for every test * merge trunk * merge trunk * previous trunk merge * merge clean db * merged trunk * merge trunk * Merged trunk * Support HP/LeftHand SANs. We control the SAN by SSHing and issuing CLIQ commands. Also improved the way iSCSI volumes are mounted: try to store the iSCSI connection info in the volume entity, in preference to doing discovery. Also CHAP authentication support * merge trunk * Merged with trunk * Adds colors to output of tests and cleans up run\_tests.py * Merged with trunk * merged upstream * merged trunk * Helper function that supports XPath style selectors to traverse an object tree e.g * Rename minixpath\_select to get\_from\_path * Cope when we pass a non-list to xpath\_select - wrap it in a list * Fixes existing smoketests and splits out sysadmin tests from netadmin tests * Created mini XPath implementation, to simplify mapping logic * merged upstream * Fixes and optimizes filtering for describe\_security\_groups. Also adds a unit test * merged trunk * use flags for sqlite db names and fix flags in dhcpbridge * merged trunk * merged trunk * Initial support for per-instance metadata, though the OpenStack API. Key/value pairs can be specified at instance creation time and are returned in the details view. Support limits based on quota system * Merged trunk * merged trunk * move db creation into fixtures and clean db for each test * merged trunk * Merged with trunk, including manual conflict resolution in nova/virt/disk.py and nova/virt/xenapi/vmops.py * Fix DescribeRegion answer by introducing '{ec2,osapi}\_listen' flags instead of overloading {ec2,osapi}\_host. Get rid of paste\_config\_to\_flags, bin/nova-combined. Adds debug FLAGS dump at start of nova-api * Merged trunk * no, really fix lp721297 this time * Fixes various issues regarding verbose logging and logging errors on import * merged trunk * Some quick test cleanups, first step towards standardizing the way we start services in tests * merged to trunk rev709. NEEDS to be fixed based on 3rd reviewer's comment * Fixed based on reviewer's comment. 1. DB schema change vcpu/memory/hdd info were stored into Service table. but reviewer pointed out to me creating new table is better since Service table has too much columns * update based on prereq branch * fixed newline and moved import fake\_flags into run\_tests where it makes more sense * Merged with head * remove keyword argument, per review * add a start\_service method to our test baseclass * Merged with trunk * switch to explicit call to logging.setup() * merged trunk * Adds translation catalogs and distutils.extra glue code that automates the process of compiling message catalogs into .mo files * Merged trunk * PEP-8 fixes * merged with nova trunk revision #706 * get rid of initialized flag * move the fake initialized into fake flags * fixes for various logging errors and issues * Introduce IptablesManager in linux\_net. Port every use of iptables in linux\_net to it * Merging trunk to my branch. Fixed conflicts in Authors file and .mailmap * Merging trunk * fixed based on reviewer's comment. 1. erase wrapper function(remove/exists/mktempfile) from nova.utils. 2. nova-manage service describeresource(->describe\_resource) 3. nova-manage service updateresource(->update\_resource) 4. erase "my mistake print" statement * merged trunk * Merged with trunk * Added support for feature parity with the current Rackspace Cloud Servers practice of "injecting" files into newly-created instances for configuration, etc. However, this is in no way restricted to only writing files to the guest when it is first created * Uncommitted changes using the wrong author, and re-committing under the correct author * Added http://mynova/v1.0/zones/ api options for add/remove/update/delete zones. child\_zones table added to database and migration. Changed novarc vars from CLOUD\_SERVERS\_\* to NOVA\_\* to work with novatools. See python-novatools on github for help testing this * merge with zone phase 1 * merged lp:~jk0/nova/dynamicinstancetypes * changed from 003-004 migration * Merged trunk * Hi guys * Rebased at lp:nova 688 * Update the Openstack API so that it returns 'addresses' * I have a bug fix, additional tests for the \`limiter\` method, and additional commenting for a couple classes in the OpenStack API. Basically I've just tried to jump in somewhere to get my feet wet. Constructive criticism welcome * added labels to networks for use in multi-nic added writing network data to xenstore param-list added call to agent to reset network added reset\_network call to openstack api * Merged trunk * Rebased at lp:nova 687 * Merging trunk * Merged to trunk * Use RotatingFileHandler instead of FileHandler * Use a threadpool for handling requests coming in through RPC * Merged trunk * Merging trunk part 1 * merge with trunk * merging trunk back in; updating Authors conflict * Merged lp:nova * The proposed fix puts a VM which fails to spawn in a (new) 'FAILED' power state. It does not perform a clean-up. This because the user needs to know what has happened to the VM he/she was trying to run. Normally, API users do not have access to log files. In this case, the only way for the user to know what happened to the instance is to query its state (e.g.: doing euca-describe-instances). If we perform a complete clean-up, no information about the instance which failed to spawn will be left * Use eventlet.green.subprocess instead of standard subprocess * fixed merge conflict * better filtering * Use eventlet.green.subprocess instead of standard subprocess * merged recent version. no conflict, no big/important change to this branch * merge jk0 branch (with trunk merge) which added additional columns for instance\_types (which are openstack api specific) * merging with trunk * Merged trunk * Modified S3ImageService to return the format defined in BaseService to allow EC2 API's DescribeImages to work against Glance * Merged trunk * Merged trunk * Merged to trunk and fixed merge conflict in Authors * trunk merge * 1. Merged to rev654(?) 2. Fixed bug continuous request. if user continuouslly send live-migration request to same host, concurrent request to iptables occurs, and iptables complains. This version add retry for this issue * Added support for 'SAN' style volumes. A SAN's big difference is that the iSCSI target won't normally run on the same host as the volume service * Fix PEP8 violations * Automates the setup for FlatDHCP regardless of whether the interface has an ip address * merge with lp:nova * merge source and remove ifconfig * Catching all socket errors in \_get\_my\_ip, since any socket error is likely enough to cause a failure in detection * added INSTANCE\_TYPES to test for compatibility with current tests * Moved ssh\_execute to utils; moved comments to docstring * Fixes for Vish & Devin's feedback * Fixes https://bugs.launchpad.net/nova/+bug/681417 * merging * Fixed PEP8 test problems, complaining about too many blank lines at line 51 * Merged trunk * 1. Discard nova-manage host list Reason: nova-manage service list can be replacement. Changes: nova-manage * Adds security group output to describe\_instances * Use firewall\_driver flag as expected with NWFilterFirewall. This way, either you use NWFilterFirewall directly, or you use IptablesFirewall, which creates its own instance of NWFilterFirewall for the setup\_basic\_filtering command. This removes the requirement that LibvirtConnection would always need to know about NWFirewallFilter, and cleans up the area where the flag is used for loading the firewall class * Added a test that checks for localized strings in the source code that contain position-based string formatting placeholders. If found, an exception message is generated that summarizes the problem, as well as the location of the problematic code. This will prevent future trunk commits from adding localized strings that cannot be properly translated * Makes sure all instance and volume commands that raise not found are changed to show the ec2\_id instead of the internal id * Fixed formatting issues in current codebase * Fixes NotFound messages in api to show the ec2\_id * Fix Bug #703037. ra\_server is None * merge trunk * Changed method signature of create\_network * merged r621 * Merged with http://bazaar.launchpad.net/~vishvananda/nova/lp703037 * Merged trunk * Simple little changes related to openstack api to work better with glance * This branch updates docs to reflect the db sync addition. It additionally adds some useful errors to nova-manage to help people that are using old guides. It wraps sqlalchemy errors in generic DBError. Finally, it updates nova.sh to use current settings * merged trunk * Fixes issue with SNATTING chain not getting created or added to POSTROUTING when nova-network starts * Fix for bug #702237 * another trunk merge * This patch: * Trunk merged * merge with trunk * Fixes project and role checking when a user's naming attribute is not uid * Merged with r606 * Fixed merge conflict * Localized strings that employ formatting should not use positional arguments, as they prevent the translator from re-ordering the translated text; instead, they should use mappings (i.e., dicts). This change replaces all localized formatted strings that use more than one formatting placeholder with a mapping version * merged ntt branch * merged branch to name net\_manager.create\_networks args * Fix describe\_regions by changing renamed flags. Also added a test to catch future errors * Merged trunk * merged trunk * merged trunk fixed whitespace in rst * wrap sqlalchemy exceptions in a generic error * Resolved trunk merge conflicts * Change default log formats so that:  \* they include a timestamp (necessary to correlate logs)  \* no longer display version on every line (shorter lines)  \* use [-] instead of [N/A] (shorter lines, less scary-looking)  \* show level before logger name (better human-readability) * Merged with rev597 * Fixes issue with describe\_instances requiring an admin context * Added changes to make errors and recovery for volumes more graceful: * Merged trunk * merged trunk changes * merged trunk * merge vish's changes (which merged trunk and fixed a pep8 problem) * merged trunkand fixed conflicts and pep error * get\_my\_linklocal raises exception * Completed first pass at converting all localized strings with multiple format substitutions * Allows moving from the Austin-style db to the Bexar-style * merge from upstream and fix small issues * merged to trunk rev572 * Merged trunk * The live\_migration branch ( https://code.launchpad.net/~nttdata/nova/live-migration/+merge/44940 ) was not ready to be merged * merge from upstream to fix conflict * Trunk merge * Merged trunk * Implement support for streaming images from Glance when using the XenAPI virtualization backend, as per the bexar-xenapi-support-for-glance blueprint * Works around the app-armor problem of requiring disks with backing files to be named appropriately by changing the name of our extra disks * merged trunk * Add refresh\_security\_group\_\* methods to nova/virt/fake.py, as FakeConnection is the reference for documentation and method signatures that should be implemented by virt connection drivers * Merged trunk * Risk of Regression: This patch don’t modify existing functionlities, but I have added some. 1. nova.db.service.sqlalchemy.model.Serivce (adding a column to database) 2. nova.service ( nova-compute needes to insert information defined by 1 above) * Fixed error message in get\_my\_linklocal * Merged trunk * Merged with trunk revno 572 * Change where paste.deploy factories live and how they are called. They are now in the nova.wsgi.Application/Middleware classes, and call the \_\_init\_\_ method of their class with kwargs of the local configuration of the paste file * Further decouple api routing decisions and move into paste.deploy configuration. This makes paste back the nova-api binary * Merged trunk * The Openstack API requires image metadata to be returned immediately after an image-create call * merge trunk * Merging trunk * Merged trunk * merged trunk rev569 * Adds a developer interface with direct access to the internal inter-service APIs and a command-line tool based on reflection to interact with them * merge from upstream * merge from upstream * This branch fixes two outstanding bugs in compute. It also fixes a bad method signature in network and removes an unused method in cloud * Re-removes TrialTestCase. It was accidentally added in by some merges and causing issues with running tests individually * merged trial fix again * undo accidental removal of fake\_flags * merged lp:~vishvananda/nova/lp703012 * remove TrialTestCase again and fix merge issues * Merged trunk * Merged with trunk revno 565 * Implements the blueprint for enabling the setting of the root/admin password on an instance * OpenStack Compute (Nova) IPv4/IPv6 dual stack support http://wiki.openstack.org/BexarIpv6supportReadme * Merged to rev.563 * This change introduces support for Sheepdog (distributed block storage system) which is proposed in https://blueprints.launchpad.net/nova/+spec/sheepdog-support * merge from upstream: * Merged with r562 * This modifies libvirt to use CoW images instead of raw images. This is much more efficient and allows us to use the snapshotting capabilities available for qcow2 images. It also changes local storage to be a separate drive instead of a separate partition * merged trunk * Merged with r561 * Merging Trunk * Fixed based on the comments from code review. Merged to trunk rev 561 * Add a new method to firewall drivers to tell them to stop filtering a particular instance. Call it when an instance has been destroyed * merged to trunk rev 561 * merge trunk rev560 * Get reviewed and fixed based on comments. Merged latest version * merged trunk * Fixed missing \_(). Fixed to follow logging to LOG changes. Fixed merge miss (get\_fixed\_ip was moved away). Update some missing comments * merge from upstream and fix leaks in console tests * Merging trunk * Merging trunk, small fixes * cleaned up prior merge mess * Merged with r551 * Fixed syntax errors * Merged with trunk * Added support of availability zones for compute. models.Service got additional field availability\_zone and was created ZoneScheduler that make decisions based on this field. Also replaced fake 'nova' zone in EC2 cloud api * Had to abandon the other branch (~annegentle/nova/newscript) because the diffs weren't working right for me. This is a fresh branch that should be merged correctly with trunk. Thanks for your patience. :) * Merged with 549 * Change command to get link local address Remove superfluous code * This branch adds web based serial console access. Here is an overview of how it works (for libvirt): * Merged with r548 * Fixed for pep8 Remove temporary debugging * changed exception class * Changing DN creation to do searches for entries * merge trunk, fix conflict * Read Full Spec for implementation details and notes on how to boot an instance using OS API. http://etherpad.openstack.org/B2RK0q1CYj * Fixed a number of issues with the iptables firewall backend: \* Port specifications for firewalls come back from the data store as integers, but were compared as strings. \* --icmp-type was misspelled as --icmp\_type (underscore vs dash) \* There weren't any unit tests for these issues * merged trunk changes * Merging trunk * Trunk merge and conflcts resolved * Implementation of xs-console blueprint (adds support for console proxies like xvp) * Changed shared\_ip\_group detail routing * Fixes the metadata forwarding to work by default * Adds support to nova-manage to modify projects * merge trunk * re-merged in trunk to correct conflict * merged changes from trunk * Add a new firewall backend for libvirt, based on iptables * Moved get\_my\_ip into flags because that is the only thing it is being used for and use it to set a new flag called my\_ip * merged from upstream and made applicable changes * merged trunk changes * Fixes issue in trunk with downloading s3 images for instance creation * Wrap logs so we can: \* use a "context" kwarg to track requests all the way through the system \* use a custom formatter so we get the data we want (configurable with flags) \* allow additional formatting for debug statements for easer debugging \* add an AUDIT level, useful for noticing changes to system components \* use named logs instead of the general logger where it makes sesnse * Merged with trunk * merged changes from trunk * Merging trunk * Removing some FIXMEs * Reserving image before uploading * merge * another merge with trunk to remedy instance\_id issues * Include date in API action query * This branch implements lock functionality. The lock is stored in the compute worker database. Decorators have been added to the openstack API actions which alter instances in any way * merged trunk * Got the basic 'set admin password' stuff working * Merged trunk * merged trunk changes * Introduces basic support for spawning, rebooting and destroying vms when using Microsoft Hyper-V as the hypervisor. Images need to be in VHD format. Note that although Hyper-V doesn't accept kernel and ramdisk separate from the image, the nova objectstore api still expects an image to have an associated aki and ari. You can use dummy aki and ari images -- the hyper-v driver won't use them or try to download them. Requires Python's WMI module * merged trunk changes * Fixed trunk merge conflicts as spotted by dubs * This addition to the docs clarifies that it is a requirement for contributors to be listed in the Authors file before their commits can be merged to trunk * merge trunk * another merge from trunk to the latest rev * pulled changes from trunk added console api to openstack api * This branch contains the internal API cleanup branches I had previously proposed, but combined together and with all the UUID key replacement ripped out. This allows multiple REST interfaces (or other tools) to use the internal API directly, rather than having the logic tied up in the ec2 cloud.py file * merged trunk changes * Created a XenAPI plugin that will allow nova code to read/write/delete from xenstore records for a given instance. Added the basic methods for working with xenstore data to the vmops script, as well as plugin support to xenapi\_conn.py * missing \_() * Added xenstore plugin changed * merged changes from trunk * merged from trunk * Merged trunk * Merged trunk changes * Some Bug Fix * Merged and fiexed conflicts with r515 * Final few log tweaks, i18n, levels, including contexts, etc * Apply logging changes as a giant patch to work around the cloudpipe delete + add issue in the original patch * Fixing merge conflicts with new branch * merged in trunk changes * Uses paste.deploy to make application running configurable. This includes the ability to swap out middlewares, define new endpoints, and generally move away from having code to build wsgi routers and middleware chains into a configurable, extensible method for running wsgi servers * Add burnin support. Services are now by default disabled, but can have instances and volumes run on them using availability\_zone = nova:HOSTNAME. This lets the hardware be put through its paces without being put in the generally available pool of hardware. There is a 'service' subcommand for nova-manage where you can enable, disable, and list statuses of services * pep8 fixes * Several documentation corrections and formatting fixes * merge in trunk * merged latest trunk * merge trunk * merge trunk * merged in trunk and xenstore-plugin changes * Merged trunk * Merged trunk * Merged trunk * Merged trunk * 最新バージョンにマージ。変更点は以下の通り。 Authorsに自分の所属を追加 utils.pyのgenerate\_uidがおかしいのでインスタンスIDがオーバーフローしていたが、 その処理を一時撤廃。後で試験しなおしとすることにした。 * Merged trunk * Make InstanceActions and live diagnostics available through the Admin API * merge trunk * merge trunk * Cleans up the output of run\_tests.sh to look closer to Trial * Merged trunk * This patch is beginning of XenServer snapshots in nova. It adds: * merge recent revision(version of 2010/12/28) Change: 1. Use greenthread instead of defer at nova.virt.libvirt\_conn.live\_migration. 2. Move nova.scheduler.manager.live\_migration to nova.scheduler.driver 3. Move nova.scheduler.manager.has\_enough\_resource to nova.scheduler.driver 4. Any check routine in nova-manage.instance.live\_migration is moved to nova.scheduler.driver.schedule\_live\_migration * Merging trunk * fixed merge conflict with trunk * Merged trunk * merge * Changes and error fixes to help ensure basic parity with the Rackspace API. Some features are still missing, such as shared ip groups, and will be added in a later patch set * Merged with trunk * merge trunk * merge from trunk * This commit introduces scripts to apply XenServer host networking protections * merge from upstream and fix conflicts * Merging trunk * Merged trunk * merge trunk * I've added suspend along with a few changes to power state as well. I can't imagine suspend will be controversial but I've added a new power state for "suspended" to nova.compute.power\_states which libvirt doesn't use and updated the xenapi power mapping to use it for suspended state. I also updated the mappings in nova.api.openstack.servers to map PAUSED to "error" and SUSPENDED to "suspended". Thoughts there are that we don't currently (openstack API v1.0) use pause, so if somehow an instance were to be paused an error occurred somewhere, or someone did something in error. Either way asking the xenserver host for the status would show "paused". Support for more power states needs to be added to the next version of the openstack API * fix bug #lp694311 * Added stack command-line tool * Cleans up nova.api.openstack.images and fix it to work with cloudservers api. Previously "cloudservers image-list" wouldn't work, now it will. There are mappings in place to handle s3 or glance/local image service. In the future when the local image service is working, we can probably drop the s3 mappings * Merging trunk * Merged trunk * Merging trunk, fixing failed tests * Merged trunk * merge trunk * Fixed after Jay's review. Integrated code from Soren (we now use the same 'magic number' for images without kernel & ramdisk * logs inner exception in nova/utils.py->import\_class * Fix Bug #693963 * merge trunk * Merge * Support IPv6 * Make nova work even when user has LANG or LC\_ALL configured * merged trunk, resolved trivial conflict * fixed merge conflict * Merged again from trunk * fixed a few docstrings, added \_() for gettext * Moves implementation specific Openstack API code from the middleware to the drivers. Also cleans up a few areas and ensures all the API tests are passing again * Merged trunk * Trying to remove twisted dependencies, this gets everything working under nosetests * Merged trunk and resolved conflicts * merged trunk * merged trunk * Simplifies and improves ldap schema * xenapi iscsi support + unittests * Merged trunk * merge lp:nova * merge trunk * merge trunk, fixed unittests, added i18n strings, cleanups etc etc * first merge after i18n * added tests to ensure the easy api works as a backend for Compute API * merge from trunk * Fixes reboot (and rescue) to work even if libvirt doesn't know about the instance and the network doesn't exist * merged trunk * Fixes reboot (and rescue) to work even if libvirt doesn't know about the instance and the network doesn't exist * Adds a flag to use the X-Forwarded-For header to find the ip of the remote server. This is needed when you have multiple api servers with a load balancing proxy in front. It is a flag that defaults to False because if you don't have a sanitizing proxy in front, users could masquerade as other ips by passing in the header manually * Merged trunk * merged trunk * Moves the ip allocation requests to the from the api host into calls to the network host made from the compute host * merged trunk and fixed conflicts * merged trunk * Optimize creation of nwfilter rules so they aren't constantly being recreated * fixed more conflicts * merged trunk again * merge trunk and upgrade to cheetah templating * Optimize nwfilter creation and project filter * Merging trunk * fixed conflicts * WSGI middleware for lockout after failed authentications of ec2 access key * Puts the creation of nova iptables chains into the source code and cleans up rule creation. This makes nova play more nicely with other iptables rules that may be created on the host * Merging trunk * merge trunk * Fixes per-project vpns (cloudpipe) and adds manage commands and support for certificate revocation * merge trunk * merged i8n and fixed conflicts * after trunk merge * Log all XenAPI actions to InstanceActions * Merged trunk * merging trunk * merging trunk * All merged with trunk and let's see if a new merge prop (with no pre-req) works. * merging in trunk * Merged trunk * Added InstanceDiagnostics and InstanceActions DB models * Merged trunk * merge trunk * 1) Merged from trunk 2) 'type' parameter in VMHelper.fetch\_image converted in enum 3) Fixed pep8 errors 4) Passed unit tests * Merging trunk * Add raw disk image support * Adds support for Pause and Unpause of xenserver instances * Integrated changes from Soren (raw-disk-images). Updated authors file. All tests passed * eventlet merge updates * first revision after eventlet merge. Currently xenapi-unittests are broken, but everything else seems to be running okay * Integrated eventlet\_merge patch * First pass at converting run\_tests.py to nosetests. The network and objctstore tests don't yet work. Also, we need to manually remove the sqlite file between runs * merged in project-vpns to get flag changes * move some flags around * merged trunk * merged trunk, fixed conflicts and tests * This branch removes most of the dependencies on twisted and moves towards the plan described by https://blueprints.launchpad.net/nova/+spec/unified-service-architecture * pep8 fixes * Merged changes from trunk into the branch * merged with trunk. fixed compute.pause test * Make sure we properly close the bzr WorkingTree in our Authors up-to-datedness unit test * clean up tests and add overriden time method to utils * basic conversion of xs-pause to eventlet done * Merged from trunk and fixed merge issues. Also fixed pep8 issues * updates per review * pep8 * fixup after merge with trunk * merge with trey tests * First round of i18n-ifying strings in Nova * merge-a-tat-tat upstream to this branch * \* pylint fixes \* code clean-up \* first cut for xenapi unit tests * merged changes from sandy's branch * formatting and naming cleanup * get service unittests runnning again * merge with trey * Make XenServer VM diagnostics available through nova.virt.xenapi * Merged trunk * merging sandy's branch * merge with trunk to pull in admin-api branch * Flag to define which operations are exposed in the OpenStack API, disabling all others * Fixed Authors conflict and re-merged with trunk * intermediate commit to checkpoint progress * some pylint caught changes to compute * merge conflict * merged upstream changes * Merged trunk * merged updates to trunk * merge trunk * Pushed terminate instance and network manager/topic methods into network.compute.api * Merged trunk * Moved the reboot/rescue methods into nova.compute.api * merged with trunk. All clear! * Added a script to use OpenDJ as an LDAP server instead of OpenLDAP. Also modified nova.sh to add an USE\_OPENDJ option, that will be checked when USE\_LDAP is set * It looks like Soren fixed the author file, can I hit the commit button? * rev439ベースにライブマイグレーションの機能をマージ このバージョンはEBSなし、CPUフラグのチェックなし * merge with lp:~armando-migliaccio/nova/xenapi-refactoring * merge trunk * Merged reboot-rescue into network-manager * Merged trunk * Consolidated the start instance logic in the two API classes into a single method. This also cleans up a number of small discrepencies between the two * Merged trunk and resolved conflicts * merge lp:~armando-migliaccio/nova/refactoring * merge trunk * Moving the openldap schema out of nova.sh into it's own files, and adding sun (opends/opendj/sun directory server/fedora ds) schema files * brought latest changes from trunk * merged Justin Santa Barbara's raw-disk-image back into the latest trunk * merged trunk * Add a templating mechanism in the flag parsing * brought the xenapi refactoring in plus trunk changes * A few more changes: \* Fixed up some flags \* Put in an updated nova.sh \* Broke out metadata forwarding so it will work in flatdhcp mode \* Added descriptive docstrings explaining the networking modes in more detail * small conflict resolution * Added a .mailmap that maps addresses in bzr to people's real, preferred e-mail addresses. (I made a few guesses along the way, feel free to adjust according to what is actually the preferred e-mail) * merged trunk, added recent nova.sh * add vpn ping and optimize vpn list * Address pep8 complaints * * merged with trunk * Ryan\_Lane's code to handle /etc/network not existing when we try to inject /etc/network/interfaces into an image * Changed from fine-grained operation control to binary admin on/off setting * Lots of documentation and docstring updates * The docs are just going to be wrong for now. I'll file a bug upstream * Change how wsgified doc wrapping happens to fix test * pep8 * merge with trunk * Added a .mailmap that maps addresses in bzr to people's real, preferred e-mail addresses. (I made a few guesses along the way, feel free to adjust according to what is actually the preferred e-mail) * merge in trunk * Fix docstrings for wsigfied methods * merged trunk * Change socket type in nova.utils.get\_my\_ip() to SOCK\_DGRAM. This way, we don't actually have to set up a connection. Also, change the destination host to an IP (chose one of Google's DNS's at random) rather than a hostname, so we avoid doing a DNS lookup * Change socket type in nova.utils.get\_my\_ip() to SOCK\_DGRAM. This way, we don't actually have to set up a connection. Also, change the destination host to an IP (chose one of Google's DNS's at random) rather than a hostname, so we avoid doing a DNS lookup * ISCSI Volume support * merged * merge * merged trunk * basics to get proxied ajaxterm working with virsh * merged trunk, just in case * Moves db writes into compute manager class. Cleans up sqlalchemy model/api to remove redundant calls for updating what is really a dict * Fixes PEP8 violations from the last few merges * More PEP8 fixes that were introduced in the last couple commits * Fixes service unit tests after tornado excision * renamed target\_id to iscsi\_target * merged gundlach's excision * Delete BaseTestCase and with it the last reference to tornado * Removes some cruft from sqlalchemy/models.py like unused imports and the unused str\_id method * Adds rescue and unrescue commands * actually remove the conditional * Remove the last vestigial bits of tornado code still in use * Exceptions in the OpenStack API will be converted to Faults as they should be, rather than barfing a stack trace to the user * Duplicate the two trivial escaping functions remaining from tornado's code and remove the dependency * merge lp:nova * merged trunk and fixed conflicts/changes * part way through porting the codebase off of twisted * Another pep8 cleanup branch for nova/api, should be merged after lp:~eday/nova/pep8-fixes * PEP8 and pylint cleanup. There should be no functional changes here, just style changes to get violations down * \* Fills out the Parallax/Glance API calls for update/create/delete and adds unit tests for them. \* Modifies the ImageController and GlanceImageService/LocalImageService calls to use index and detail routes to comply perfectly with the RS/OpenStack API * This branch converts incoming data to the api into the proper type * Fix the --help flag for printing help on twistd-based services * Make Redis completely optional: * trivial style change * prevent leakage of FLAGS changes across tests * This branch modifies the fixes all of the deprecation warnings about empty context. It does this by adding the following fixes/features \* promotes api/context.py to context.py because it is used by the whole system \* adds more information to the context object \* passes the context through rpc \* adds a helper method for promoting to admin context (elevate()) \* modifies most checks to use context.project\_id instead of context.project.id to avoid trips to the database * Merged with trunk, fixed broken stuff * Fixes a few concurrency issues with creating volumes and instances. Most importantly it adds retries to a number of the volume shell commands and it adds a unique constraint on export\_devices and a safe create so that there aren't multiple copies of export devices in the database * merged trunk * merged concurrency * merged trunk * cleaned up most of the issues * merged trunk * Fixes bug 660115 * Xen support * Adds flat networking + dhcpserver mode * This patch removes the ugly network\_index that is used by VlanManager and turns network itself into a pool. It adds support for creating the networks through an api command: nova-manage network create # creates all of the networks defined by flags or nova-manage network create 5 # create the first five networks * merged upstream * cleanup leftover addresses * merged trunk * merged trunk * merged trunk * merged trunk * Revert the conversion to 64-bit ints stored in a PickleType column, because PickleType is incompatible with having a unique constraint * Revert 64 bit storage and use 32 bit again. I didn't notice that we verify that randomly created uids don't already exist in the DB, so the chance of collision isn't really an issue until we get to tens of thousands of machines. Even then we should only expect a few retries before finding a free ID * This patch adds support for EC2 security groups using libvirt's nwfilter mechanism, which in turn uses iptables and ebtables on the individual compute nodes. This has a number of benefits: \* Inter-VM network traffic can take the fastest route through the network without our having to worry about getting it through a central firewall. \* Not relying on a central firewall also removes a potential SPOF. \* The filtering load is distributed, offering great scalability * Change internal\_id from a 32 bit int to a 64 bit int * 32 bit internal\_ids become 64 bit. Since there is no 64 bit native type in SqlAlchemy, we use PickleType which uses the Binary SqlAlchemy type under the hood * Adds stubs and tests for GlanceImageService and LocalImageService. Adds basic plumbing for ParallaxClient and TellerClient and hooks that into the GlanceImageService * Cleanup around the rackspace API for the ec2 to internal\_id transition * Replace model.Instance.ec2\_id with an integer internal\_id so that both APIs can represent the ID to external users * merged trunk and fixed tests * merge from gundlach ec2 conversion * Fix broken unit tests * A shiny, new Auth driver backed by SQLAlchemy. Read it and weep. I did * Revert r312 * Fixes to address the following issues: * Refactor sqlalchemy api to perform contextual authorization * Merged trunk * merged remove-network-index * Fixed flat network manager with network index gone * merged trunk * First attempt at a uuid generator -- but we've lost a 'topic' input so i don't know what that did * merged trunk, removed extra quotas * Adds support for periodic\_tasks on manager that are regularly called by the service and recovers fixed\_ips that didn't get disassociated properly * merged trunk * Includes changes for creating instances via the Rackspace API. Utilizes much of the existing EC2 functionality to power the Rackspace side of things, at least for now * Support the pagination interface in RS API -- the &offset and &limit parameters are now recognized * Update from trunk to handle one-line merge conflict * Support fault notation in error messages in the RS API * merged with soren's branch * Add user-editable name & notes/description to volumes, instances, and images * merged trunk * \* Create an AuthManager#update\_user method to change keys and admin status. \* Refactor the auth\_unittest to not care about test order \* Expose the update\_user method via nova-manage * Updates the fix-iptables branch with a number of bugfixes * Fix a few errors in api calls related to mistyped database methods for floating\_ips: specifically describe addresses and and associate address * Merged Termie's branch that starts tornado removal and fixed rpc test cases for twisted. Nothing is testing the Eventlet version of rpc.call though yet * Adds bpython support to nova-manage shell, because it is super sexy * merged and removed duplicated methods * fixed merge conflicts * Implementation of the Rackspace servers API controller * merged network-lease-fix * merged floating-ips * move default group creation to api * get rid of network indexes and make networks into a pool * merged trunk * merged floating-ip-by-project * merged network-lease-fix * merged trunk * Merged Termie's branch and fixed rpc test cases for tesited. Nothing is testing the Eventlet version of rpc.call though yet * Put EC2 API -> eventlet back into trunk, fixing the bits that I missed when I put it into trunk on 9/21 * Implementation of Rackspace token based authentication for the Openstack API * Merged gundlach's branch * merged trunk * merge from trunk * merged trunk and fixed errors * merged trunk * Delete nova.endpoint module, which used Tornado to serve up the Amazon EC2 API. Replace it with nova.api.ec2 module, which serves up the same API via a WSGI app in Eventlet. Convert relevant unit tests from Twisted to eventlet * merged trunk * merged trunk * Implements quotas with overrides for instances, volumes, and floating ips * Moves keypairs out of ldap and into the common datastore * allows api servers to have a list of regions, allowing multi-cluster support if you have a shared image store and user database * merged trunk * merged trunk * Removes second copy of ProcessExecutionError that creeped in during a bad merge * Adds timing fields to instances and volumes to track launch times and schedule times * Adds timing fields to instances and volumes to track launch times and schedule times * add in support for ajaxterm console access * Better error message on the failure of a spawned process, and it's a ProcessExecutionException irrespective of how the process is run (twisted or not) * Proposing merge to get feedback on orm refactoring. I am very interested in feedback to all of these changes * Address a couple of the TODO's: We now have half-decent input validation for AuthorizeSecurityGroupIngress and RevokeDitto * removed second copy of ProcessExecutionError * merged trunk * merged instance time and added better concurrency * merged trunk * merged scheduler * merged quotas * merged trunk * remerged scheduler * merged trunk * merged trunk * merged trunk * merged trunk * merged orm and put instance in scheduling state * merged describe\_speed * added scheduled\_at to instances and volumes * merged orm * merged orm * logging for backend is now info instead of error * Tests turn things into inlineCallbacks * Remove tornado-related code from almost everything * updated to the new orm code * a few formatting fixes and moved exception * Last of cleanup, including removing fake\_storage flage * merged orm\_deux * Lots of fixes to make the nova commands work properly and make datamodel work with mysql properly * more scheduler tests * merged trunk * Moved API tests into a sub-folder of the tests/ and added a stubbed-out test declarations to mirror existing API tickets * merged orm branch * merged trunk, fixed a couple errors * undo change to get\_my\_ip * pylint fixes for /nova/virt/connection.py * pylint fixes for nova/objectstore/handler.py * ip addresses work now * Add Flavors controller supporting * Resolve conflicts and merge trunk * instance runs * split volume into service/manager/driver * get to look like trunk * Fixes issue with the same ip being assigned to multiple instances * merged trunk and fixed tests * Initial support for Rackspace API /image requests. They will eventually be backed by Glance * Rework virt.xenapi's concurrency model. There were many places where we were inadvertently blocking the reactor thread. The reworking puts all calls to XenAPI on background threads, so that they won't block the reactor thread * merged trunk and fixed merge errors * Better error message on subprocess spawn fail, and it's a ProcessExecutionException irrespective of how the process is run * Check exit codes when spawning processes by default Also pass --fail to curl so that it sets exit code when download fails * Added unit tests for WSGI helpers and base WSGI API * merged termies abstractions * Move deferredToThread into utils, as suggested by termie * Data abstraction for compute service * Merged with trunk * Merged with trunk * Merged trunk * Since pylint=0.19 is our version, force everyone to use the disable-msg syntax * Changed our minds: keep pylint equal to Ubuntu Lucid version, and use disable-msg throughout * Newest pylint supports 'disable=', not 'disable-msg=' * merged trunk * merged refresh from sleepsonthefloor * See description of change... what's the difference between that message and this message again? * Fixes quite a few style issues across the entire nova codebase bringing it much closer to the guide described in HACKING * merge from trunk * merged trunk and fixed conflicts * Added documentation for the nova.virt connection interface, a note about the need to chmod the objectstore script, and a reference for the XenAPI module * rather comprehensive style fixes * Add new libvirt\_type option "uml" for user-mode-linux.. This switches the libvirt URI to uml:///system and uses a different template for the libvirt xml * merge in latedt from vish * Catches and logs exceptions for rpc calls and raises a RemoteError exception on the caller side * Removes requirement of internet connectivity to run api server * merged trunk * merged fix-hostname and fixed conflict * Improves pep8 compliance and pylint score in network code * refactor to have base helper class with shared session and engine * got run\_tests.py to run (with many failed tests) * Make WSGI routing support routing to WSGI apps or to controller+action * Merged with trunk * Fix exception in get\_info * Merged with trunk * Merged with trunk * Implement VIF creation in the xenapi module * merged trunk * 2 changes in doing PEP8 & Pylint cleaning: \* adding pep8 and pylint to the PIP requirements files for Tools \* light cleaning work (mostly formatting) on nova/endpoints/cloud.py * More changes to volume to fix concurrency issues. Also testing updates * merged trunk, fixed an error with releasing ip * pylint fixes for /nova/test.py * Fixes pylint issues in /nova/server.py * importing merges from hudson branch * This branch builds off of Todd and Michael's API branches to rework the Rackspace API endpoint and WSGI layers * Fix up variable names instead of disabling pylint naming rule. Makes variables able to be a single letter in pylintrc * Disables warning about TODO in code comments in pylintrc * More pylint/pep8 cleanup, this time in bin/\* files * pylint fixes for /nova/test.py * Pull trunk merge through lp:~ewanmellor/nova/add-contains * Pull trunk merge through lp:~ewanmellor/nova/xapi-plugin * Merged with trunk again * Merged with trunk * Greater compliance with pep8/pylint style checks * Merged trunk * merged with trunk * Merged Todd and Michael's changes * Make network its own worker! This separates the network logic from the api server, allowing us to have multiple network controllers. There a lot of stuff in networking that is ugly and should be modified with the datamodel changes. I've attempted not to mess with those things too much to keep the changeset small(ha!) * merged trunk * merged trunk * Fix deprecation warning in AuthManager. \_\_new\_\_ isn't allowed to take args * Get IP doesn't fail of you not connected to the intetnet * Merged with trunk * Added --fail argument to curl invocations, so that HTTP request fails get surfaced as non-zero exit codes * Merged with trunk * Merged with trunk * Fixed assertion "Someone released me too many times: too many tokens!" * Merged with trunk to resolve merge conflicts * oops retry and add extra exception check * Implemented admin api for rbac * Adds initial support for XenAPI (not yet finished) * Allow driver specification in AuthManager creation * allow driver to be passed in to auth manager instead of depending solely on flag * Merged trunk * Create a model for storing session tokens * bzr merge lp:nova/trunk * Merged trunk * Makes the compute and volume daemon workers use a common base class called Service. Adds a NetworkService in preparation for splitting out networking code. General cleanup and standardizarion of naming * merged trunk * Makes the objectstore require authorization, checks it properly, and makes nova-compute provide it when fetching images * Refactor of auth code * Expiry awareness for SessionToken * Basic standup of SessionToken model for shortlived auth tokens * merged trunk * merged trunk * Changes nova-volume to use twisted * Fixes up Bucket to throw proper NotFound and NotEmpty exceptions in constructor and delete() method, and fixes up objectstore\_unittest to properly use assertRaises() to check for proper exceptions and remove the assert\_ calls * Merged with trunk, since a lot of useful things have gone in there recently * renamed xxxnode to xxservice * Check exit codes when spawning processes by default * Merged trunk, fixed extra references to fake\_users * Fixes up Bucket to throw proper NotFound and NotEmpty exceptions in constructor and delete() method, and fixes up objectstore\_unittest to properly use assertRaises() to check for proper exceptions and remove the assert\_ calls * merge with twisted-volume * Locally administered mac addresses have the second least significant bit of the most significant byte set. If this byte is set then udev on ubuntu doesn't set persistent net rules * use a locally administered mac address so it isn't saved by udev * Merged trunk. Fixed new references to UserManager * Fixes to dhcp lease code to use a flagfile * merged trunk * Replace tornado objectstore with twisted web * merged in trunk and fixed import merge errors * merge with singleton pool * reorder imports spacing * remove import of vendor since we have PPA now * update copyrights * fix merge errors * datetime import typo * added missing isotime method from utils * Fixed the os.environ patch (bogus) * Fixes as per Vish review (whitespace, import statements) * Got dhcpleasor working, with test ENV for testing, and rpc.cast for real world * Capture signals from dnsmasq and use them to update network state * Removed trailing whitespace from header * Updated licenses * removed all references to keeper * Fixes based on code review 27001 * Admin API + Worker Tracking * Removed trailing whitespace from header * Updated licenses * trackback formatting isn't logging correctly * use logger to print trace of unhandled exceptions * fix fakeldap so it can use redis keeper * Refactored Instance to get rid of \_s bits, and fixed some bugs in state management * Flush redis db in setup and teardown of tests * Update documentation * make get\_my\_ip return 127.0.0.1 for testing * whitespace fixes for nova/utils.py * Merged Vish's work on adding projects to nova * initial commit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/HACKING.rst0000664000175000017500000000024400000000000017662 0ustar00zuulzuul00000000000000oslo.versionedobjects Style Commandments ======================================== Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/LICENSE0000664000175000017500000002363700000000000017104 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.866407 oslo.versionedobjects-3.4.0/PKG-INFO0000664000175000017500000000477200000000000017173 0ustar00zuulzuul00000000000000Metadata-Version: 1.2 Name: oslo.versionedobjects Version: 3.4.0 Summary: Oslo Versioned Objects library Home-page: https://docs.openstack.org/oslo.versionedobjects/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/oslo.versionedobjects.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on ===================== oslo.versionedobjects ===================== .. image:: https://img.shields.io/pypi/v/oslo.versionedobjects.svg :target: https://pypi.org/project/oslo.versionedobjects/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.versionedobjects.svg :target: https://pypi.org/project/oslo.versionedobjects/ :alt: Downloads The oslo.versionedobjects library provides a generic versioned object model that is RPC-friendly, with inbuilt serialization, field typing, and remotable method calls. It can be used to define a data model within a project independent of external APIs or database schema for the purposes of providing upgrade compatibility across distributed services. * Free software: Apache license * Documentation: https://docs.openstack.org/oslo.versionedobjects/latest * Source: http://opendev.org/openstack/oslo.versionedobjects * Bugs: http://bugs.launchpad.net/oslo.versionedobjects * Release notes: https://docs.openstack.org/releasenotes/oslo.versionedobjects/ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: Implementation :: CPython Requires-Python: >=3.8 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/README.rst0000664000175000017500000000241700000000000017557 0ustar00zuulzuul00000000000000======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/oslo.versionedobjects.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on ===================== oslo.versionedobjects ===================== .. image:: https://img.shields.io/pypi/v/oslo.versionedobjects.svg :target: https://pypi.org/project/oslo.versionedobjects/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.versionedobjects.svg :target: https://pypi.org/project/oslo.versionedobjects/ :alt: Downloads The oslo.versionedobjects library provides a generic versioned object model that is RPC-friendly, with inbuilt serialization, field typing, and remotable method calls. It can be used to define a data model within a project independent of external APIs or database schema for the purposes of providing upgrade compatibility across distributed services. * Free software: Apache license * Documentation: https://docs.openstack.org/oslo.versionedobjects/latest * Source: http://opendev.org/openstack/oslo.versionedobjects * Bugs: http://bugs.launchpad.net/oslo.versionedobjects * Release notes: https://docs.openstack.org/releasenotes/oslo.versionedobjects/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/bindep.txt0000664000175000017500000000045400000000000020071 0ustar00zuulzuul00000000000000# This is a cross-platform list tracking distribution packages needed by tests; # see http://docs.openstack.org/infra/bindep/ for additional information. locales [platform:debian] python3-all-dev [platform:ubuntu !platform:ubuntu-precise] python3-dev [platform:dpkg] python3-devel [platform:fedora] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1716452798.8544068 oslo.versionedobjects-3.4.0/doc/0000775000175000017500000000000000000000000016631 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/doc/requirements.txt0000664000175000017500000000025700000000000022121 0ustar00zuulzuul00000000000000# These are needed for docs generation openstackdocstheme>=2.2.1 # Apache-2.0 sphinx>=2.0.0 # BSD reno>=3.1.0 # Apache-2.0 mock>=2.0.0 # BSD fixtures>=3.0.0 # Apache-2.0/BSD ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1716452798.8544068 oslo.versionedobjects-3.4.0/doc/source/0000775000175000017500000000000000000000000020131 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/doc/source/conf.py0000664000175000017500000000536500000000000021441 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright (C) 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.extlinks', 'openstackdocstheme', 'oslo_config.sphinxext', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/oslo.versionedobjects' openstackdocs_bug_project = 'oslo.versionedobjects' openstackdocs_bug_tag = '' # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'oslo.versionedobjects' copyright = '2014, OpenStack Foundation' source_tree = 'https://opendev.org/openstack/%s' % project # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # Shortened external links. extlinks = { 'example': (source_tree + '/%s/examples/%%s.py' % project.replace(".", "_"), None), } # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # html_static_path = ['static'] html_theme = 'openstackdocs' # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', '%s.tex' % project, '%s Documentation' % project, 'OpenStack Foundation', 'manual'), ] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1716452798.8544068 oslo.versionedobjects-3.4.0/doc/source/configuration/0000775000175000017500000000000000000000000023000 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/doc/source/configuration/index.rst0000664000175000017500000000043400000000000024642 0ustar00zuulzuul00000000000000======================= Configuration Options ======================= oslo.versionedobjects uses oslo.config to define and manage configuration options to allow the deployer to control how an application using oslo.versionedobjects behaves. .. show-options:: oslo.versionedobjects ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1716452798.8544068 oslo.versionedobjects-3.4.0/doc/source/contributor/0000775000175000017500000000000000000000000022503 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/doc/source/contributor/index.rst0000664000175000017500000000012400000000000024341 0ustar00zuulzuul00000000000000============== Contributing ============== .. include:: ../../../CONTRIBUTING.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/doc/source/index.rst0000664000175000017500000000202200000000000021766 0ustar00zuulzuul00000000000000======================= oslo.versionedobjects ======================= The oslo.versionedobjects library provides a generic versioned object model that is RPC-friendly, with inbuilt serialization, field typing, and remotable method calls. It can be used to define a data model within a project independent of external APIs or database schema for the purposes of providing upgrade compatibility across distributed services. * Free software: Apache license * Documentation: https://docs.openstack.org/oslo.versionedobjects/latest/ * Source: https://opendev.org/openstack/oslo.versionedobjects * Bugs: https://bugs.launchpad.net/oslo.versionedobjects ---- Contents ======== .. toctree:: :maxdepth: 2 install/index user/index configuration/index reference/index contributor/index Release Notes ============= Read also the `oslo.versionedobjects Release Notes `_. Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1716452798.8544068 oslo.versionedobjects-3.4.0/doc/source/install/0000775000175000017500000000000000000000000021577 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/doc/source/install/index.rst0000664000175000017500000000045200000000000023441 0ustar00zuulzuul00000000000000============== Installation ============== At the command line:: $ pip install oslo.versionedobjects To use ``oslo_versionedobjects.fixture``, some additional dependencies are needed. They can be installed using the ``fixtures`` extra:: $ pip install 'oslo.versionedobjects[fixtures]' ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.858407 oslo.versionedobjects-3.4.0/doc/source/reference/0000775000175000017500000000000000000000000022067 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/doc/source/reference/base.rst0000664000175000017500000000013300000000000023530 0ustar00zuulzuul00000000000000============= base ============= .. automodule:: oslo_versionedobjects.base :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/doc/source/reference/exception.rst0000664000175000017500000000013400000000000024615 0ustar00zuulzuul00000000000000========= exception ========= .. automodule:: oslo_versionedobjects.exception :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/doc/source/reference/fields.rst0000664000175000017500000000013700000000000024070 0ustar00zuulzuul00000000000000============= fields ============= .. automodule:: oslo_versionedobjects.fields :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/doc/source/reference/fixture.rst0000664000175000017500000000251100000000000024306 0ustar00zuulzuul00000000000000========= Fixture ========= .. automodule:: oslo_versionedobjects.fixture :members: :undoc-members: ObjectVersionChecker ~~~~~~~~~~~~~~~~~~~~ Fingerprints ------------ One function of the ObjectVersionChecker is to generate fingerprints of versioned objects. These fingerprints are a combination of the object's version and a hash of the RPC-critical attributes of the object: fields and remotable methods. The test_hashes() method is used to retrieve the expected and actual fingerprints of the objects. When using this method to assert the versions of objects in a local project, the expected fingerprints are the fingerprints of the previous state of the objects. These fingerprints are defined locally in the project and passed to test_hashes(). The actual fingerprints are the dynamically-generated fingerprints of the current state of the objects. If the expected and actual fingerprints do not match on an object, this means the RPC contract that was previously defined in the object is no longer the same. Because of this, the object's version must be updated. When the version is updated and the tests are run again, a new fingerprint for the object is generated. This fingerprint should be written over the previous version of the fingerprint. This shows the newly generated fingerprint is now the most recent state of the object. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/doc/source/reference/index.rst0000664000175000017500000000013000000000000023722 0ustar00zuulzuul00000000000000============= API Reference ============= .. toctree:: :maxdepth: 2 :glob: * ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.858407 oslo.versionedobjects-3.4.0/doc/source/user/0000775000175000017500000000000000000000000021107 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/doc/source/user/examples.rst0000664000175000017500000000172100000000000023460 0ustar00zuulzuul00000000000000========== Examples ========== IOT lightbulb ============= .. note:: Full source located at :example:`iot_bulb`. .. literalinclude:: ../../../oslo_versionedobjects/examples/iot_bulb.py :language: python :linenos: :lines: 14- Expected (or similar) output:: The __str__() output of this new object: IOTLightbulb(manufactured_on=2017-03-15T23:25:01Z,serial='abc-123') The 'serial' field of the object: abc-123 Primitive representation of this object: {'versioned_object.version': '1.0', 'versioned_object.changes': ['serial', 'manufactured_on'], 'versioned_object.name': 'IOTLightbulb', 'versioned_object.data': {'serial': u'abc-123', 'manufactured_on': '2017-03-15T23:25:01Z'}, 'versioned_object.namespace': 'versionedobjects.examples'} The __str__() output of this new (reconstructed) object: IOTLightbulb(manufactured_on=2017-03-15T23:25:01Z,serial='abc-123') After serial number change, the set of fields that have been mutated is: set(['serial']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/doc/source/user/history.rst0000664000175000017500000000004000000000000023334 0ustar00zuulzuul00000000000000.. include:: ../../../ChangeLog ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/doc/source/user/index.rst0000664000175000017500000000037200000000000022752 0ustar00zuulzuul00000000000000=========================== Using oslo.versionedobjects =========================== .. toctree:: :maxdepth: 2 usage examples .. history contains a lot of sections, toctree with maxdepth 1 is used. .. toctree:: :maxdepth: 1 history ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/doc/source/user/usage.rst0000664000175000017500000001103600000000000022746 0ustar00zuulzuul00000000000000======= Usage ======= Incorporating oslo.versionedobjects into your projects can be accomplished in the following steps: 1. `Add oslo.versionedobjects to requirements`_ 2. `Create objects subdirectory and a base.py inside it`_ 3. `Create base object with the project namespace`_ 4. `Create other base objects if needed`_ 5. `Implement objects and place them in objects/\*.py`_ 6. `Implement extra fields in objects/fields.py`_ 7. `Create object registry and register all objects`_ 8. `Create and attach the object serializer`_ 9. `Implement the indirection API`_ Add oslo.versionedobjects to requirements ----------------------------------------- To use oslo.versionedobjects in an OpenStack project remember to add it to the requirements.txt Create objects subdirectory and a base.py inside it --------------------------------------------------- Objects reside in the `/objects` directory and this is the place from which all objects should be imported. Start the implementation by creating `objects/base.py` with these main classes: Create base object with the project namespace --------------------------------------------- :class:`oslo_versionedobjects.base.VersionedObject` The VersionedObject base class for the project. You have to fill up the `OBJ_PROJECT_NAMESPACE` property. `OBJ_SERIAL_NAMESPACE` is used only for backward compatibility and should not be set in new projects. Create other base objects if needed ----------------------------------- class:`oslo_versionedobjects.base.VersionedPersistentObject` A mixin class for persistent objects can be created, defining repeated fields like `created_at`, `updated_at`. Fields are defined in the fields property (which is a dict). If objects were previously passed as dicts (a common situation), a :class:`oslo_versionedobjects.base.VersionedObjectDictCompat` can be used as a mixin class to support dict operations. Implement objects and place them in objects/\*.py ------------------------------------------------- Objects classes should be created for all resources/objects passed via RPC as IDs or dicts in order to: * spare the database (or other resource) from extra calls * pass objects instead of dicts, which are tagged with their version * handle all object versions in one place (the `obj_make_compatible` method) To make sure all objects are accessible at all times, you should import them in __init__.py in the objects/ directory. Implement extra fields in objects/fields.py ------------------------------------------- New field types can be implemented by inheriting from :class:`oslo_versionedobjects.field.Field` and overwriting the `from_primitive` and `to_primitive` methods. By subclassing :class:`oslo_versionedobjects.fields.AutoTypedField` you can stack multiple fields together, making sure even nested data structures are being validated. Create object registry and register all objects ----------------------------------------------- :class:`oslo_versionedobjects.base.VersionedObjectRegistry` The place where all objects are registered. All object classes should be registered by the :attr:`oslo_versionedobjects.base.ObjectRegistry.register` class decorator. Create and attach the object serializer --------------------------------------- :class:`oslo_versionedobjects.base.VersionedObjectSerializer` To transfer objects by RPC, subclass the :class:`oslo_versionedobjects.base.VersionedObjectSerializer` setting the OBJ_BASE_CLASS property to the previously defined Object class. Connect the serializer to oslo_messaging: .. code:: python serializer = RequestContextSerializer(objects_base.MagnumObjectSerializer()) target = messaging.Target(topic=topic, server=server) self._server = messaging.get_rpc_server(transport, target, handlers, serializer=serializer) Implement the indirection API ----------------------------- :class:`oslo_versionedobjects.base.VersionedObjectIndirectionAPI` oslo.versionedobjects supports `remotable` method calls. These are calls of the object methods and classmethods which can be executed locally or remotely depending on the configuration. Setting the indirection_api as a property of an object relays the calls to decorated methods through the defined RPC API. The attachment of the indirection_api should be handled by configuration at startup time. Second function of the indirection API is backporting. When the object serializer attempts to deserialize an object with a future version, not supported by the current instance, it calls the object_backport method in an attempt to backport the object to a version which can then be handled as normal. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.858407 oslo.versionedobjects-3.4.0/oslo.versionedobjects.egg-info/0000775000175000017500000000000000000000000024101 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452798.0 oslo.versionedobjects-3.4.0/oslo.versionedobjects.egg-info/PKG-INFO0000664000175000017500000000477200000000000025210 0ustar00zuulzuul00000000000000Metadata-Version: 1.2 Name: oslo.versionedobjects Version: 3.4.0 Summary: Oslo Versioned Objects library Home-page: https://docs.openstack.org/oslo.versionedobjects/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/oslo.versionedobjects.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on ===================== oslo.versionedobjects ===================== .. image:: https://img.shields.io/pypi/v/oslo.versionedobjects.svg :target: https://pypi.org/project/oslo.versionedobjects/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.versionedobjects.svg :target: https://pypi.org/project/oslo.versionedobjects/ :alt: Downloads The oslo.versionedobjects library provides a generic versioned object model that is RPC-friendly, with inbuilt serialization, field typing, and remotable method calls. It can be used to define a data model within a project independent of external APIs or database schema for the purposes of providing upgrade compatibility across distributed services. * Free software: Apache license * Documentation: https://docs.openstack.org/oslo.versionedobjects/latest * Source: http://opendev.org/openstack/oslo.versionedobjects * Bugs: http://bugs.launchpad.net/oslo.versionedobjects * Release notes: https://docs.openstack.org/releasenotes/oslo.versionedobjects/ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: Implementation :: CPython Requires-Python: >=3.8 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452798.0 oslo.versionedobjects-3.4.0/oslo.versionedobjects.egg-info/SOURCES.txt0000664000175000017500000000501500000000000025766 0ustar00zuulzuul00000000000000.coveragerc .mailmap .pre-commit-config.yaml .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst bindep.txt requirements.txt setup.cfg setup.py test-requirements.txt tox.ini doc/requirements.txt doc/source/conf.py doc/source/index.rst doc/source/configuration/index.rst doc/source/contributor/index.rst doc/source/install/index.rst doc/source/reference/base.rst doc/source/reference/exception.rst doc/source/reference/fields.rst doc/source/reference/fixture.rst doc/source/reference/index.rst doc/source/user/examples.rst doc/source/user/history.rst doc/source/user/index.rst doc/source/user/usage.rst oslo.versionedobjects.egg-info/PKG-INFO oslo.versionedobjects.egg-info/SOURCES.txt oslo.versionedobjects.egg-info/dependency_links.txt oslo.versionedobjects.egg-info/entry_points.txt oslo.versionedobjects.egg-info/not-zip-safe oslo.versionedobjects.egg-info/pbr.json oslo.versionedobjects.egg-info/requires.txt oslo.versionedobjects.egg-info/top_level.txt oslo_versionedobjects/__init__.py oslo_versionedobjects/_i18n.py oslo_versionedobjects/_options.py oslo_versionedobjects/_utils.py oslo_versionedobjects/base.py oslo_versionedobjects/exception.py oslo_versionedobjects/fields.py oslo_versionedobjects/fixture.py oslo_versionedobjects/test.py oslo_versionedobjects/examples/__init__.py oslo_versionedobjects/examples/iot_bulb.py oslo_versionedobjects/locale/en_GB/LC_MESSAGES/oslo_versionedobjects.po oslo_versionedobjects/tests/__init__.py oslo_versionedobjects/tests/obj_fixtures.py oslo_versionedobjects/tests/test_exception.py oslo_versionedobjects/tests/test_fields.py oslo_versionedobjects/tests/test_fixture.py oslo_versionedobjects/tests/test_objects.py releasenotes/notes/add-reno-996dd44974d53238.yaml releasenotes/notes/drop-python27-support-b3e377b0dcfa4f5c.yaml releasenotes/notes/update_md5_for_fips-e5a8f8f438ac81fb.yaml releasenotes/source/2023.1.rst releasenotes/source/2023.2.rst releasenotes/source/2024.1.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/yoga.rst releasenotes/source/zed.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452798.0 oslo.versionedobjects-3.4.0/oslo.versionedobjects.egg-info/dependency_links.txt0000664000175000017500000000000100000000000030147 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452798.0 oslo.versionedobjects-3.4.0/oslo.versionedobjects.egg-info/entry_points.txt0000664000175000017500000000012500000000000027375 0ustar00zuulzuul00000000000000[oslo.config.opts] oslo.versionedobjects = oslo_versionedobjects._options:list_opts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452798.0 oslo.versionedobjects-3.4.0/oslo.versionedobjects.egg-info/not-zip-safe0000664000175000017500000000000100000000000026327 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452798.0 oslo.versionedobjects-3.4.0/oslo.versionedobjects.egg-info/pbr.json0000664000175000017500000000005600000000000025560 0ustar00zuulzuul00000000000000{"git_version": "6af8327", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452798.0 oslo.versionedobjects-3.4.0/oslo.versionedobjects.egg-info/requires.txt0000664000175000017500000000032500000000000026501 0ustar00zuulzuul00000000000000WebOb>=1.7.1 iso8601>=0.1.11 netaddr>=0.7.18 oslo.concurrency>=3.26.0 oslo.config>=5.2.0 oslo.context>=2.19.2 oslo.i18n>=3.15.3 oslo.log>=3.36.0 oslo.messaging>=5.29.0 oslo.serialization>=2.18.0 oslo.utils>=4.7.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452798.0 oslo.versionedobjects-3.4.0/oslo.versionedobjects.egg-info/top_level.txt0000664000175000017500000000002600000000000026631 0ustar00zuulzuul00000000000000oslo_versionedobjects ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.858407 oslo.versionedobjects-3.4.0/oslo_versionedobjects/0000775000175000017500000000000000000000000022470 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/oslo_versionedobjects/__init__.py0000664000175000017500000000000000000000000024567 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/oslo_versionedobjects/_i18n.py0000664000175000017500000000153600000000000023765 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/index.html """ import oslo_i18n _translators = oslo_i18n.TranslatorFactory(domain='oslo_versionedobjects') # The primary translation function using the well-known name "_" _ = _translators.primary ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/oslo_versionedobjects/_options.py0000664000175000017500000000252100000000000024674 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_versionedobjects import exception def list_opts(): """Returns a list of oslo.config options available in the library. The returned list includes all oslo.config options which may be registered at runtime by the library. Each element of the list is a tuple. The first element is the name of the group under which the list of elements in the second element will be registered. A group name of None corresponds to the [DEFAULT] group in config files. The purpose of this is to allow tools like the Oslo sample config file generator to discover the options exposed to users by this library. :returns: a list of (group_name, opts) tuples """ return [('oslo_versionedobjects', copy.deepcopy(exception.exc_log_opts))] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/oslo_versionedobjects/_utils.py0000664000175000017500000000225100000000000024341 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" # ISO 8601 extended time format without microseconds _ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' def isotime(at): """Stringify time in ISO 8601 format.""" st = at.strftime(_ISO8601_TIME_FORMAT) tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' # Need to handle either iso8601 or python UTC format st += ('Z' if tz in ['UTC', 'UTC+00:00'] else tz) return st ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/oslo_versionedobjects/base.py0000664000175000017500000014406500000000000023766 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common internal object model""" import abc import collections from collections import abc as collections_abc import copy import functools import logging import warnings import oslo_messaging as messaging from oslo_utils import excutils from oslo_utils import versionutils as vutils from oslo_versionedobjects._i18n import _ from oslo_versionedobjects import exception from oslo_versionedobjects import fields as obj_fields LOG = logging.getLogger('object') class _NotSpecifiedSentinel(object): pass def _get_attrname(name): """Return the mangled name of the attribute's underlying storage.""" return '_obj_' + name def _make_class_properties(cls): # NOTE(danms/comstud): Inherit fields from super classes. # mro() returns the current class first and returns 'object' last, so # those can be skipped. Also be careful to not overwrite any fields # that already exist. And make sure each cls has its own copy of # fields and that it is not sharing the dict with a super class. cls.fields = dict(cls.fields) for supercls in cls.mro()[1:-1]: if not hasattr(supercls, 'fields'): continue for name, field in supercls.fields.items(): if name not in cls.fields: cls.fields[name] = field for name, field in cls.fields.items(): if not isinstance(field, obj_fields.Field): raise exception.ObjectFieldInvalid( field=name, objname=cls.obj_name()) def getter(self, name=name): attrname = _get_attrname(name) if not hasattr(self, attrname): self.obj_load_attr(name) return getattr(self, attrname) def setter(self, value, name=name, field=field): attrname = _get_attrname(name) field_value = field.coerce(self, name, value) if field.read_only and hasattr(self, attrname): # Note(yjiang5): _from_db_object() may iterate # every field and write, no exception in such situation. if getattr(self, attrname) != field_value: raise exception.ReadOnlyFieldError(field=name) else: return self._changed_fields.add(name) try: return setattr(self, attrname, field_value) except Exception: with excutils.save_and_reraise_exception(): attr = "%s.%s" % (self.obj_name(), name) LOG.exception('Error setting %(attr)s', {'attr': attr}) def deleter(self, name=name): attrname = _get_attrname(name) if not hasattr(self, attrname): raise AttributeError("No such attribute `%s'" % name) delattr(self, attrname) setattr(cls, name, property(getter, setter, deleter)) class VersionedObjectRegistry(object): _registry = None def __new__(cls, *args, **kwargs): if not VersionedObjectRegistry._registry: VersionedObjectRegistry._registry = object.__new__( VersionedObjectRegistry, *args, **kwargs) VersionedObjectRegistry._registry._obj_classes = \ collections.defaultdict(list) self = object.__new__(cls, *args, **kwargs) self._obj_classes = VersionedObjectRegistry._registry._obj_classes return self def registration_hook(self, cls, index): pass def _register_class(self, cls): def _vers_tuple(obj): return vutils.convert_version_to_tuple(obj.VERSION) _make_class_properties(cls) obj_name = cls.obj_name() for i, obj in enumerate(self._obj_classes[obj_name]): self.registration_hook(cls, i) if cls.VERSION == obj.VERSION: self._obj_classes[obj_name][i] = cls break if _vers_tuple(cls) > _vers_tuple(obj): # Insert before. self._obj_classes[obj_name].insert(i, cls) break else: # Either this is the first time we've seen the object or it's # an older version than anything we'e seen. self._obj_classes[obj_name].append(cls) self.registration_hook(cls, 0) @classmethod def register(cls, obj_cls): registry = cls() registry._register_class(obj_cls) return obj_cls @classmethod def register_if(cls, condition): def wraps(obj_cls): if condition: obj_cls = cls.register(obj_cls) else: _make_class_properties(obj_cls) return obj_cls return wraps @classmethod def objectify(cls, obj_cls): return cls.register_if(False)(obj_cls) @classmethod def obj_classes(cls): registry = cls() return registry._obj_classes # These are decorators that mark an object's method as remotable. # If the metaclass is configured to forward object methods to an # indirection service, these will result in making an RPC call # instead of directly calling the implementation in the object. Instead, # the object implementation on the remote end will perform the # requested action and the result will be returned here. def remotable_classmethod(fn): """Decorator for remotable classmethods.""" @functools.wraps(fn) def wrapper(cls, context, *args, **kwargs): if cls.indirection_api: version_manifest = obj_tree_get_versions(cls.obj_name()) try: result = cls.indirection_api.object_class_action_versions( context, cls.obj_name(), fn.__name__, version_manifest, args, kwargs) except NotImplementedError: # FIXME(danms): Maybe start to warn here about deprecation? result = cls.indirection_api.object_class_action( context, cls.obj_name(), fn.__name__, cls.VERSION, args, kwargs) else: result = fn(cls, context, *args, **kwargs) if isinstance(result, VersionedObject): result._context = context return result # NOTE(danms): Make this discoverable wrapper.remotable = True wrapper.original_fn = fn return classmethod(wrapper) # See comment above for remotable_classmethod() # # Note that this will use either the provided context, or the one # stashed in the object. If neither are present, the object is # "orphaned" and remotable methods cannot be called. def remotable(fn): """Decorator for remotable object methods.""" @functools.wraps(fn) def wrapper(self, *args, **kwargs): ctxt = self._context if ctxt is None: raise exception.OrphanedObjectError(method=fn.__name__, objtype=self.obj_name()) if self.indirection_api: updates, result = self.indirection_api.object_action( ctxt, self, fn.__name__, args, kwargs) for key, value in updates.items(): if key in self.fields: field = self.fields[key] # NOTE(ndipanov): Since VersionedObjectSerializer will have # deserialized any object fields into objects already, # we do not try to deserialize them again here. if isinstance(value, VersionedObject): setattr(self, key, value) else: setattr(self, key, field.from_primitive(self, key, value)) self.obj_reset_changes() self._changed_fields = set(updates.get('obj_what_changed', [])) return result else: return fn(self, *args, **kwargs) wrapper.remotable = True wrapper.original_fn = fn return wrapper class VersionedObject(object): """Base class and object factory. This forms the base of all objects that can be remoted or instantiated via RPC. Simply defining a class that inherits from this base class will make it remotely instantiatable. Objects should implement the necessary "get" classmethod routines as well as "save" object methods as appropriate. """ indirection_api = None # Object versioning rules # # Each service has its set of objects, each with a version attached. When # a client attempts to call an object method, the server checks to see if # the version of that object matches (in a compatible way) its object # implementation. If so, cool, and if not, fail. # # This version is allowed to have three parts, X.Y.Z, where the .Z element # is reserved for stable branch backports. The .Z is ignored for the # purposes of triggering a backport, which means anything changed under # a .Z must be additive and non-destructive such that a node that knows # about X.Y can consider X.Y.Z equivalent. VERSION = '1.0' # Object namespace for serialization # NB: Generally this should not be changed, but is needed for backwards # compatibility OBJ_SERIAL_NAMESPACE = 'versioned_object' # Object project namespace for serialization # This is used to disambiguate owners of objects sharing a common RPC # medium OBJ_PROJECT_NAMESPACE = 'versionedobjects' # The fields present in this object as key:field pairs. For example: # # fields = { 'foo': obj_fields.IntegerField(), # 'bar': obj_fields.StringField(), # } fields = {} obj_extra_fields = [] # Table of sub-object versioning information # # This contains a list of version mappings, by the field name of # the subobject. The mappings must be in order of oldest to # newest, and are tuples of (my_version, subobject_version). A # request to backport this object to $my_version will cause the # subobject to be backported to $subobject_version. # # obj_relationships = { # 'subobject1': [('1.2', '1.1'), ('1.4', '1.2')], # 'subobject2': [('1.2', '1.0')], # } # # In the above example: # # - If we are asked to backport our object to version 1.3, # subobject1 will be backported to version 1.1, since it was # bumped to version 1.2 when our version was 1.4. # - If we are asked to backport our object to version 1.5, # no changes will be made to subobject1 or subobject2, since # they have not changed since version 1.4. # - If we are asked to backlevel our object to version 1.1, we # will remove both subobject1 and subobject2 from the primitive, # since they were not added until version 1.2. obj_relationships = {} def __init__(self, context=None, **kwargs): self._changed_fields = set() self._context = context for key in kwargs.keys(): setattr(self, key, kwargs[key]) def __repr__(self): repr_str = '%s(%s)' % ( self.obj_name(), ','.join(['%s=%s' % (name, (self.obj_attr_is_set(name) and field.stringify(getattr(self, name)) or '')) for name, field in sorted(self.fields.items())])) return repr_str def __contains__(self, name): try: return self.obj_attr_is_set(name) except AttributeError: return False @classmethod def to_json_schema(cls): obj_name = cls.obj_name() schema = { '$schema': 'http://json-schema.org/draft-04/schema#', 'title': obj_name, } schema.update(obj_fields.Object(obj_name).get_schema()) return schema @classmethod def obj_name(cls): """Return the object's name Return a canonical name for this object which will be used over the wire for remote hydration. """ return cls.__name__ @classmethod def _obj_primitive_key(cls, field): return '%s.%s' % (cls.OBJ_SERIAL_NAMESPACE, field) @classmethod def _obj_primitive_field(cls, primitive, field, default=obj_fields.UnspecifiedDefault): key = cls._obj_primitive_key(field) if default == obj_fields.UnspecifiedDefault: return primitive[key] else: return primitive.get(key, default) @classmethod def obj_class_from_name(cls, objname, objver): """Returns a class from the registry based on a name and version.""" if objname not in VersionedObjectRegistry.obj_classes(): LOG.error('Unable to instantiate unregistered object type ' '%(objtype)s'), dict(objtype=objname) raise exception.UnsupportedObjectError(objtype=objname) # NOTE(comstud): If there's not an exact match, return the highest # compatible version. The objects stored in the class are sorted # such that highest version is first, so only set compatible_match # once below. compatible_match = None for objclass in VersionedObjectRegistry.obj_classes()[objname]: if objclass.VERSION == objver: return objclass if (not compatible_match and vutils.is_compatible(objver, objclass.VERSION)): compatible_match = objclass if compatible_match: return compatible_match # As mentioned above, latest version is always first in the list. latest_ver = VersionedObjectRegistry.obj_classes()[objname][0].VERSION raise exception.IncompatibleObjectVersion(objname=objname, objver=objver, supported=latest_ver) @classmethod def _obj_from_primitive(cls, context, objver, primitive): self = cls() self._context = context self.VERSION = objver objdata = cls._obj_primitive_field(primitive, 'data') changes = cls._obj_primitive_field(primitive, 'changes', []) for name, field in self.fields.items(): if name in objdata: setattr(self, name, field.from_primitive(self, name, objdata[name])) self._changed_fields = set([x for x in changes if x in self.fields]) return self @classmethod def obj_from_primitive(cls, primitive, context=None): """Object field-by-field hydration.""" objns = cls._obj_primitive_field(primitive, 'namespace') objname = cls._obj_primitive_field(primitive, 'name') objver = cls._obj_primitive_field(primitive, 'version') if objns != cls.OBJ_PROJECT_NAMESPACE: # NOTE(danms): We don't do anything with this now, but it's # there for "the future" raise exception.UnsupportedObjectError( objtype='%s.%s' % (objns, objname)) objclass = cls.obj_class_from_name(objname, objver) return objclass._obj_from_primitive(context, objver, primitive) def __deepcopy__(self, memo): """Efficiently make a deep copy of this object.""" # NOTE(danms): A naive deepcopy would copy more than we need, # and since we have knowledge of the volatile bits of the # object, we can be smarter here. Also, nested entities within # some objects may be uncopyable, so we can avoid those sorts # of issues by copying only our field data. nobj = self.__class__() # NOTE(sskripnick): we should save newly created object into mem # to let deepcopy know which branches are already created. # See launchpad bug #1602314 for more details memo[id(self)] = nobj nobj._context = self._context for name in self.fields: if self.obj_attr_is_set(name): nval = copy.deepcopy(getattr(self, name), memo) setattr(nobj, name, nval) nobj._changed_fields = set(self._changed_fields) return nobj def obj_clone(self): """Create a copy.""" return copy.deepcopy(self) def _obj_relationship_for(self, field, target_version): # NOTE(danms): We need to be graceful about not having the temporary # version manifest if called from obj_make_compatible(). if (not hasattr(self, '_obj_version_manifest') or self._obj_version_manifest is None): try: return self.obj_relationships[field] except KeyError: raise exception.ObjectActionError( action='obj_make_compatible', reason='No rule for %s' % field) objname = self.fields[field].objname if objname not in self._obj_version_manifest: return # NOTE(danms): Compute a relationship mapping that looks like # what the caller expects. return [(target_version, self._obj_version_manifest[objname])] def _obj_make_obj_compatible(self, primitive, target_version, field): """Backlevel a sub-object based on our versioning rules. This is responsible for backporting objects contained within this object's primitive according to a set of rules we maintain about version dependencies between objects. This requires that the obj_relationships table in this object is correct and up-to-date. :param:primitive: The primitive version of this object :param:target_version: The version string requested for this object :param:field: The name of the field in this object containing the sub-object to be backported """ relationship_map = self._obj_relationship_for(field, target_version) if not relationship_map: # NOTE(danms): This means the field was not specified in the # version manifest from the client, so it must not want this # field, so skip. return try: _get_subobject_version(target_version, relationship_map, lambda ver: _do_subobject_backport( ver, self, field, primitive)) except exception.TargetBeforeSubobjectExistedException: # Subobject did not exist, so delete it from the primitive del primitive[field] def obj_make_compatible(self, primitive, target_version): """Make an object representation compatible with a target version. This is responsible for taking the primitive representation of an object and making it suitable for the given target_version. This may mean converting the format of object attributes, removing attributes that have been added since the target version, etc. In general: - If a new version of an object adds a field, this routine should remove it for older versions. - If a new version changed or restricted the format of a field, this should convert it back to something a client knowing only of the older version will tolerate. - If an object that this object depends on is bumped, then this object should also take a version bump. Then, this routine should backlevel the dependent object (by calling its obj_make_compatible()) if the requested version of this object is older than the version where the new dependent object was added. :param primitive: The result of :meth:`obj_to_primitive` :param target_version: The version string requested by the recipient of the object :raises: :exc:`oslo_versionedobjects.exception.UnsupportedObjectError` if conversion is not possible for some reason """ for key, field in self.fields.items(): if not isinstance(field, (obj_fields.ObjectField, obj_fields.ListOfObjectsField)): continue if not self.obj_attr_is_set(key): continue self._obj_make_obj_compatible(primitive, target_version, key) def obj_make_compatible_from_manifest(self, primitive, target_version, version_manifest): # NOTE(danms): Stash the manifest on the object so we can use it in # the deeper layers. We do this because obj_make_compatible() is # defined library API at this point, yet we need to get this manifest # to the other bits that get called so we can propagate it to child # calls. It's not pretty, but a tactical solution. Ideally we will # either evolve or deprecate obj_make_compatible() in a major version # bump. self._obj_version_manifest = version_manifest try: return self.obj_make_compatible(primitive, target_version) finally: delattr(self, '_obj_version_manifest') def obj_to_primitive(self, target_version=None, version_manifest=None): """Simple base-case dehydration. This calls to_primitive() for each item in fields. """ if target_version is None: target_version = self.VERSION if (vutils.convert_version_to_tuple(target_version) > vutils.convert_version_to_tuple(self.VERSION)): raise exception.InvalidTargetVersion(version=target_version) primitive = dict() for name, field in self.fields.items(): if self.obj_attr_is_set(name): primitive[name] = field.to_primitive(self, name, getattr(self, name)) # NOTE(danms): If we know we're being asked for a different version, # then do the compat step. However, even if we think we're not, # we may have sub-objects that need it, so if we have a manifest we # have to traverse this object just in case. Previously, we # required a parent version bump for any child, so the target # check was enough. if target_version != self.VERSION or version_manifest: self.obj_make_compatible_from_manifest(primitive, target_version, version_manifest) obj = {self._obj_primitive_key('name'): self.obj_name(), self._obj_primitive_key('namespace'): ( self.OBJ_PROJECT_NAMESPACE), self._obj_primitive_key('version'): target_version, self._obj_primitive_key('data'): primitive} if self.obj_what_changed(): # NOTE(cfriesen): if we're downgrading to a lower version, then # it's possible that self.obj_what_changed() includes fields that # no longer exist in the lower version. If so, filter them out. what_changed = self.obj_what_changed() changes = [field for field in what_changed if field in primitive] if changes: obj[self._obj_primitive_key('changes')] = changes return obj def obj_set_defaults(self, *attrs): if not attrs: attrs = [name for name, field in self.fields.items() if field.default != obj_fields.UnspecifiedDefault] for attr in attrs: default = copy.deepcopy(self.fields[attr].default) if default is obj_fields.UnspecifiedDefault: raise exception.ObjectActionError( action='set_defaults', reason='No default set for field %s' % attr) if not self.obj_attr_is_set(attr): setattr(self, attr, default) def obj_load_attr(self, attrname): """Load an additional attribute from the real object. This should load self.$attrname and cache any data that might be useful for future load operations. """ raise NotImplementedError( _("Cannot load '%s' in the base class") % attrname) def save(self, context): """Save the changed fields back to the store. This is optional for subclasses, but is presented here in the base class for consistency among those that do. """ raise NotImplementedError(_('Cannot save anything in the base class')) def obj_what_changed(self): """Returns a set of fields that have been modified.""" changes = set([field for field in self._changed_fields if field in self.fields]) for field in self.fields: if (self.obj_attr_is_set(field) and isinstance(getattr(self, field), VersionedObject) and getattr(self, field).obj_what_changed()): changes.add(field) return changes def obj_get_changes(self): """Returns a dict of changed fields and their new values.""" changes = {} for key in self.obj_what_changed(): changes[key] = getattr(self, key) return changes def obj_reset_changes(self, fields=None, recursive=False): """Reset the list of fields that have been changed. :param fields: List of fields to reset, or "all" if None. :param recursive: Call obj_reset_changes(recursive=True) on any sub-objects within the list of fields being reset. This is NOT "revert to previous values". Specifying fields on recursive resets will only be honored at the top level. Everything below the top will reset all. """ if recursive: for field in self.obj_get_changes(): # Ignore fields not in requested set (if applicable) if fields and field not in fields: continue # Skip any fields that are unset if not self.obj_attr_is_set(field): continue value = getattr(self, field) # Don't reset nulled fields if value is None: continue # Reset straight Object and ListOfObjects fields if isinstance(self.fields[field], obj_fields.ObjectField): value.obj_reset_changes(recursive=True) elif isinstance(self.fields[field], obj_fields.ListOfObjectsField): for thing in value: thing.obj_reset_changes(recursive=True) if fields: self._changed_fields -= set(fields) else: self._changed_fields.clear() def obj_attr_is_set(self, attrname): """Test object to see if attrname is present. Returns True if the named attribute has a value set, or False if not. Raises AttributeError if attrname is not a valid attribute for this object. """ if attrname not in self.obj_fields: raise AttributeError( _("%(objname)s object has no attribute '%(attrname)s'") % {'objname': self.obj_name(), 'attrname': attrname}) return hasattr(self, _get_attrname(attrname)) @property def obj_fields(self): return list(self.fields.keys()) + self.obj_extra_fields @property def obj_context(self): return self._context class ComparableVersionedObject(object): """Mix-in to provide comparison methods When objects are to be compared with each other (in tests for example), this mixin can be used. """ def __eq__(self, obj): # FIXME(inc0): this can return incorrect value if we consider partially # loaded objects from db and fields which are dropped out differ if hasattr(obj, 'obj_to_primitive'): return self.obj_to_primitive() == obj.obj_to_primitive() return NotImplemented def __hash__(self): return super(ComparableVersionedObject, self).__hash__() def __ne__(self, obj): if hasattr(obj, 'obj_to_primitive'): return self.obj_to_primitive() != obj.obj_to_primitive() return NotImplemented class TimestampedObject(object): """Mixin class for db backed objects with timestamp fields. Sqlalchemy models that inherit from the oslo_db TimestampMixin will include these fields and the corresponding objects will benefit from this mixin. """ fields = { 'created_at': obj_fields.DateTimeField(nullable=True), 'updated_at': obj_fields.DateTimeField(nullable=True), } class VersionedObjectDictCompat(object): """Mix-in to provide dictionary key access compatibility If an object needs to support attribute access using dictionary items instead of object attributes, inherit from this class. This should only be used as a temporary measure until all callers are converted to use modern attribute access. """ def __iter__(self): for name in self.obj_fields: if (self.obj_attr_is_set(name) or name in self.obj_extra_fields): yield name keys = __iter__ def values(self): for name in self: yield getattr(self, name) def items(self): for name in self: yield name, getattr(self, name) def __getitem__(self, name): return getattr(self, name) def __setitem__(self, name, value): setattr(self, name, value) def get(self, key, value=_NotSpecifiedSentinel): if key not in self.obj_fields: raise AttributeError("'%s' object has no attribute '%s'" % ( self.__class__, key)) if value != _NotSpecifiedSentinel and not self.obj_attr_is_set(key): return value else: return getattr(self, key) def update(self, updates): for key, value in updates.items(): setattr(self, key, value) class ObjectListBase(collections_abc.Sequence): """Mixin class for lists of objects. This mixin class can be added as a base class for an object that is implementing a list of objects. It adds a single field of 'objects', which is the list store, and behaves like a list itself. It supports serialization of the list of objects automatically. """ fields = { 'objects': obj_fields.ListOfObjectsField('VersionedObject'), } # This is a dictionary of my_version:child_version mappings so that # we can support backleveling our contents based on the version # requested of the list object. child_versions = {} def __init__(self, *args, **kwargs): super(ObjectListBase, self).__init__(*args, **kwargs) if 'objects' not in kwargs: self.objects = [] self._changed_fields.discard('objects') def __len__(self): """List length.""" return len(self.objects) def __getitem__(self, index): """List index access.""" if isinstance(index, slice): new_obj = self.__class__() new_obj.objects = self.objects[index] # NOTE(danms): We must be mixed in with a VersionedObject! new_obj.obj_reset_changes() new_obj._context = self._context return new_obj return self.objects[index] def sort(self, key=None, reverse=False): self.objects.sort(key=key, reverse=reverse) def obj_make_compatible(self, primitive, target_version): # Give priority to using child_versions, if that isn't set, try # obj_relationships if self.child_versions: relationships = self.child_versions.items() else: try: relationships = self._obj_relationship_for('objects', target_version) except exception.ObjectActionError: # No relationship for this found in manifest or # in obj_relationships relationships = {} try: # NOTE(rlrossit): If we have no version information, just # backport to child version 1.0 (maintaining default # behavior) if relationships: _get_subobject_version(target_version, relationships, lambda ver: _do_subobject_backport( ver, self, 'objects', primitive)) else: _do_subobject_backport('1.0', self, 'objects', primitive) except exception.TargetBeforeSubobjectExistedException: # Child did not exist, so delete it from the primitive del primitive['objects'] def obj_what_changed(self): changes = set(self._changed_fields) for child in self.objects: if child.obj_what_changed(): changes.add('objects') return changes def __add__(self, other): # Handling arbitrary fields may not make sense if those fields are not # all concatenatable. Only concatenate if the base 'objects' field is # the only one and the classes match. if (self.__class__ == other.__class__ and list(self.__class__.fields.keys()) == ['objects']): return self.__class__(objects=self.objects + other.objects) else: raise TypeError("List Objects should be of the same type and only " "have an 'objects' field") def __radd__(self, other): if (self.__class__ == other.__class__ and list(self.__class__.fields.keys()) == ['objects']): # This should never be run in practice. If the above condition is # met then __add__ would have been run. raise NotImplementedError('__radd__ is not implemented for ' 'objects of the same type') else: raise TypeError("List Objects should be of the same type and only " "have an 'objects' field") class VersionedObjectSerializer(messaging.NoOpSerializer): """A VersionedObject-aware Serializer. This implements the Oslo Serializer interface and provides the ability to serialize and deserialize VersionedObject entities. Any service that needs to accept or return VersionedObjects as arguments or result values should pass this to its RPCClient and RPCServer objects. """ # Base class to use for object hydration OBJ_BASE_CLASS = VersionedObject def _do_backport(self, context, objprim, objclass): obj_versions = obj_tree_get_versions(objclass.obj_name()) indirection_api = self.OBJ_BASE_CLASS.indirection_api try: return indirection_api.object_backport_versions( context, objprim, obj_versions) except NotImplementedError: # FIXME(danms): Maybe start to warn here about deprecation? return indirection_api.object_backport(context, objprim, objclass.VERSION) def _process_object(self, context, objprim): try: return self.OBJ_BASE_CLASS.obj_from_primitive( objprim, context=context) except exception.IncompatibleObjectVersion: with excutils.save_and_reraise_exception(reraise=False) as ctxt: verkey = \ '%s.version' % self.OBJ_BASE_CLASS.OBJ_SERIAL_NAMESPACE objver = objprim[verkey] if objver.count('.') == 2: # NOTE(danms): For our purposes, the .z part of the version # should be safe to accept without requiring a backport objprim[verkey] = \ '.'.join(objver.split('.')[:2]) return self._process_object(context, objprim) namekey = '%s.name' % self.OBJ_BASE_CLASS.OBJ_SERIAL_NAMESPACE objname = objprim[namekey] supported = VersionedObjectRegistry.obj_classes().get(objname, []) if self.OBJ_BASE_CLASS.indirection_api and supported: return self._do_backport(context, objprim, supported[0]) else: ctxt.reraise = True def _process_iterable(self, context, action_fn, values): """Process an iterable, taking an action on each value. :param:context: Request context :param:action_fn: Action to take on each item in values :param:values: Iterable container of things to take action on :returns: A new container of the same type (except set) with items from values having had action applied. """ iterable = values.__class__ if issubclass(iterable, dict): return iterable([(k, action_fn(context, v)) for k, v in values.items()]) else: # NOTE(danms, gibi) A set can't have an unhashable value inside, # such as a dict. Convert the set to list, which is fine, since we # can't send them over RPC anyway. We convert it to list as this # way there will be no semantic change between the fake rpc driver # used in functional test and a normal rpc driver. if iterable == set: iterable = list return iterable([action_fn(context, value) for value in values]) def serialize_entity(self, context, entity): if isinstance(entity, (tuple, list, set, dict)): entity = self._process_iterable(context, self.serialize_entity, entity) elif (hasattr(entity, 'obj_to_primitive') and callable(entity.obj_to_primitive)): entity = entity.obj_to_primitive() return entity def deserialize_entity(self, context, entity): namekey = '%s.name' % self.OBJ_BASE_CLASS.OBJ_SERIAL_NAMESPACE if isinstance(entity, dict) and namekey in entity: entity = self._process_object(context, entity) elif isinstance(entity, (tuple, list, set, dict)): entity = self._process_iterable(context, self.deserialize_entity, entity) return entity class VersionedObjectIndirectionAPI(object, metaclass=abc.ABCMeta): def object_action(self, context, objinst, objmethod, args, kwargs): """Perform an action on a VersionedObject instance. When indirection_api is set on a VersionedObject (to a class implementing this interface), method calls on remotable methods will cause this to be executed to actually make the desired call. This often involves performing RPC. :param context: The context within which to perform the action :param objinst: The object instance on which to perform the action :param objmethod: The name of the action method to call :param args: The positional arguments to the action method :param kwargs: The keyword arguments to the action method :returns: The result of the action method """ pass def object_class_action(self, context, objname, objmethod, objver, args, kwargs): """.. deprecated:: 0.10.0 Use :func:`object_class_action_versions` instead. Perform an action on a VersionedObject class. When indirection_api is set on a VersionedObject (to a class implementing this interface), classmethod calls on remotable_classmethod methods will cause this to be executed to actually make the desired call. This usually involves performing RPC. :param context: The context within which to perform the action :param objname: The registry name of the object :param objmethod: The name of the action method to call :param objver: The (remote) version of the object on which the action is being taken :param args: The positional arguments to the action method :param kwargs: The keyword arguments to the action method :returns: The result of the action method, which may (or may not) be an instance of the implementing VersionedObject class. """ pass def object_class_action_versions(self, context, objname, objmethod, object_versions, args, kwargs): """Perform an action on a VersionedObject class. When indirection_api is set on a VersionedObject (to a class implementing this interface), classmethod calls on remotable_classmethod methods will cause this to be executed to actually make the desired call. This usually involves performing RPC. This differs from object_class_action() in that it is provided with object_versions, a manifest of client-side object versions for easier nested backports. The manifest is the result of calling obj_tree_get_versions(). NOTE: This was not in the initial spec for this interface, so the base class raises NotImplementedError if you don't implement it. For backports, this method will be tried first, and if unimplemented, will fall back to object_class_action(). New implementations should provide this method instead of object_class_action() :param context: The context within which to perform the action :param objname: The registry name of the object :param objmethod: The name of the action method to call :param object_versions: A dict of {objname: version} mappings :param args: The positional arguments to the action method :param kwargs: The keyword arguments to the action method :returns: The result of the action method, which may (or may not) be an instance of the implementing VersionedObject class. """ warnings.warn('object_class_action() is deprecated in favor of ' 'object_class_action_versions() and will be removed ' 'in a later release', DeprecationWarning) raise NotImplementedError('Multi-version class action not supported') def object_backport(self, context, objinst, target_version): """.. deprecated:: 0.10.0 Use :func:`object_backport_versions` instead. Perform a backport of an object instance to a specified version. When indirection_api is set on a VersionedObject (to a class implementing this interface), the default behavior of the base VersionedObjectSerializer, upon receiving an object with a version newer than what is in the lcoal registry, is to call this method to request a backport of the object. In an environment where there is an RPC-able service on the bus which can gracefully downgrade newer objects for older services, this method services as a translation mechanism for older code when receiving objects from newer code. NOTE: This older/original method is soon to be deprecated. When a backport is required, the newer object_backport_versions() will be tried, and if it raises NotImplementedError, then we will fall back to this (less optimal) method. :param context: The context within which to perform the backport :param objinst: An instance of a VersionedObject to be backported :param target_version: The maximum version of the objinst's class that is understood by the requesting host. :returns: The downgraded instance of objinst """ pass def object_backport_versions(self, context, objinst, object_versions): """Perform a backport of an object instance. This method is basically just like object_backport() but instead of providing a specific target version for the toplevel object and relying on the service-side mapping to handle sub-objects, this sends a mapping of all the dependent objects and their client-supported versions. The server will backport objects within the tree starting at objinst to the versions specified in object_versions, removing objects that have no entry. Use obj_tree_get_versions() to generate this mapping. NOTE: This was not in the initial spec for this interface, so the base class raises NotImplementedError if you don't implement it. For backports, this method will be tried first, and if unimplemented, will fall back to object_backport(). :param context: The context within which to perform the backport :param objinst: An instance of a VersionedObject to be backported :param object_versions: A dict of {objname: version} mappings """ warnings.warn('object_backport() is deprecated in favor of ' 'object_backport_versions() and will be removed ' 'in a later release', DeprecationWarning) raise NotImplementedError('Multi-version backport not supported') def obj_make_list(context, list_obj, item_cls, db_list, **extra_args): """Construct an object list from a list of primitives. This calls item_cls._from_db_object() on each item of db_list, and adds the resulting object to list_obj. :param:context: Request context :param:list_obj: An ObjectListBase object :param:item_cls: The VersionedObject class of the objects within the list :param:db_list: The list of primitives to convert to objects :param:extra_args: Extra arguments to pass to _from_db_object() :returns: list_obj """ list_obj.objects = [] for db_item in db_list: item = item_cls._from_db_object(context, item_cls(), db_item, **extra_args) list_obj.objects.append(item) list_obj._context = context list_obj.obj_reset_changes() return list_obj def obj_tree_get_versions(objname, tree=None): """Construct a mapping of dependent object versions. This method builds a list of dependent object versions given a top- level object with other objects as fields. It walks the tree recursively to determine all the objects (by symbolic name) that could be contained within the top-level object, and the maximum versions of each. The result is a dict like:: {'MyObject': '1.23', ... } :param objname: The top-level object at which to start :param tree: Used internally, pass None here. :returns: A dictionary of object names and versions """ if tree is None: tree = {} if objname in tree: return tree objclass = VersionedObjectRegistry.obj_classes()[objname][0] tree[objname] = objclass.VERSION for field_name in objclass.fields: field = objclass.fields[field_name] if isinstance(field, obj_fields.ObjectField): child_cls = field._type._obj_name elif isinstance(field, obj_fields.ListOfObjectsField): child_cls = field._type._element_type._type._obj_name else: continue try: obj_tree_get_versions(child_cls, tree=tree) except IndexError: raise exception.UnregisteredSubobject( child_objname=child_cls, parent_objname=objname) return tree def _get_subobject_version(tgt_version, relationships, backport_func): """Get the version to which we need to convert a subobject. This uses the relationships between a parent and a subobject, along with the target parent version, to decide the version we need to convert a subobject to. If the subobject did not exist in the parent at the target version, TargetBeforeChildExistedException is raised. If there is a need to backport, backport_func is called and the subobject version to backport to is passed in. :param tgt_version: The version we are converting the parent to :param relationships: A list of (parent, subobject) version tuples :param backport_func: A backport function that takes in the subobject version :returns: The version we need to convert the subobject to """ tgt = vutils.convert_version_to_tuple(tgt_version) for index, versions in enumerate(relationships): parent, child = versions parent = vutils.convert_version_to_tuple(parent) if tgt < parent: if index == 0: # We're backporting to a version of the parent that did # not contain this subobject raise exception.TargetBeforeSubobjectExistedException( target_version=tgt_version) else: # We're in a gap between index-1 and index, so set the desired # version to the previous index's version child = relationships[index - 1][1] backport_func(child) return elif tgt == parent: # We found the version we want, so backport to it backport_func(child) return def _do_subobject_backport(to_version, parent, field, primitive): obj = getattr(parent, field) manifest = (hasattr(parent, '_obj_version_manifest') and parent._obj_version_manifest or None) if isinstance(obj, VersionedObject): obj.obj_make_compatible_from_manifest( obj._obj_primitive_field(primitive[field], 'data'), to_version, version_manifest=manifest) ver_key = obj._obj_primitive_key('version') primitive[field][ver_key] = to_version elif isinstance(obj, list): for i, element in enumerate(obj): element.obj_make_compatible_from_manifest( element._obj_primitive_field(primitive[field][i], 'data'), to_version, version_manifest=manifest) ver_key = element._obj_primitive_key('version') primitive[field][i][ver_key] = to_version ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.862407 oslo.versionedobjects-3.4.0/oslo_versionedobjects/examples/0000775000175000017500000000000000000000000024306 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/oslo_versionedobjects/examples/__init__.py0000664000175000017500000000000000000000000026405 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/oslo_versionedobjects/examples/iot_bulb.py0000664000175000017500000000442200000000000026461 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime from oslo_versionedobjects import base from oslo_versionedobjects import fields as obj_fields # INTRO: This example shows how a object (a plain-old-python-object) with # some associated fields can be used, and some of its built-in methods can # be used to convert that object into a primitive and back again (as well # as determine simple changes on it. # Ensure that we always register our object with an object registry, # so that it can be deserialized from its primitive form. @base.VersionedObjectRegistry.register class IOTLightbulb(base.VersionedObject): """Simple light bulb class with some data about it.""" VERSION = '1.0' # Initial version #: Namespace these examples will use. OBJ_PROJECT_NAMESPACE = 'versionedobjects.examples' #: Required fields this object **must** declare. fields = { 'serial': obj_fields.StringField(), 'manufactured_on': obj_fields.DateTimeField(), } # Now do some basic operations on a light bulb. bulb = IOTLightbulb(serial='abc-123', manufactured_on=datetime.now()) print("The __str__() output of this new object: %s" % bulb) print("The 'serial' field of the object: %s" % bulb.serial) bulb_prim = bulb.obj_to_primitive() print("Primitive representation of this object: %s" % bulb_prim) # Now convert the primitive back to an object (isn't it easy!) bulb = IOTLightbulb.obj_from_primitive(bulb_prim) bulb.obj_reset_changes() print("The __str__() output of this new (reconstructed)" " object: %s" % bulb) # Mutating a field and showing what changed. bulb.serial = 'abc-124' print("After serial number change, the set of fields that" " have been mutated is: %s" % bulb.obj_what_changed()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/oslo_versionedobjects/exception.py0000664000175000017500000001460000000000000025041 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """VersionedObjects base exception handling. Includes decorator for re-raising VersionedObjects-type exceptions. SHOULD include dedicated exception logging. """ import functools import inspect import logging from oslo_config import cfg from oslo_utils import excutils import webob.exc from oslo_versionedobjects._i18n import _ LOG = logging.getLogger(__name__) exc_log_opts = [ cfg.BoolOpt('fatal_exception_format_errors', default=False, help='Make exception message format errors fatal'), ] CONF = cfg.CONF CONF.register_opts(exc_log_opts, group='oslo_versionedobjects') class ConvertedException(webob.exc.WSGIHTTPException): def __init__(self, code=0, title="", explanation=""): self.code = code self.title = title self.explanation = explanation super(ConvertedException, self).__init__() def _cleanse_dict(original): """Strip all admin_password, new_pass, rescue_pass keys from a dict.""" return {k: v for k, v in original.items() if "_pass" not in k} def wrap_exception(notifier=None, get_notifier=None): """Catch all exceptions in wrapped method This decorator wraps a method to catch any exceptions that may get thrown. It also optionally sends the exception to the notification system. """ def inner(f): def wrapped(self, context, *args, **kw): # Don't store self or context in the payload, it now seems to # contain confidential information. try: return f(self, context, *args, **kw) except Exception as e: with excutils.save_and_reraise_exception(): if notifier or get_notifier: payload = dict(exception=e) call_dict = inspect.getcallargs(f, self, context, *args, **kw) cleansed = _cleanse_dict(call_dict) payload.update({'args': cleansed}) # If f has multiple decorators, they must use # functools.wraps to ensure the name is # propagated. event_type = f.__name__ (notifier or get_notifier()).error(context, event_type, payload) return functools.wraps(f)(wrapped) return inner class VersionedObjectsException(Exception): """Base VersionedObjects Exception To correctly use this class, inherit from it and define a 'msg_fmt' property. That msg_fmt will get printf'd with the keyword arguments provided to the constructor. """ msg_fmt = _("An unknown exception occurred.") code = 500 headers = {} safe = False def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if not message: try: message = self.msg_fmt % kwargs except Exception: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception('Exception in string format operation') for name, value in kwargs.items(): LOG.error("%s: %s" % (name, value)) # noqa if CONF.oslo_versionedobjects.fatal_exception_format_errors: raise else: # at least get the core message out if something happened message = self.msg_fmt super(VersionedObjectsException, self).__init__(message) def format_message(self): # NOTE(mrodden): use the first argument to the python Exception object # which should be our full VersionedObjectsException message, # (see __init__) return self.args[0] class ObjectActionError(VersionedObjectsException): msg_fmt = _('Object action %(action)s failed because: %(reason)s') class ObjectFieldInvalid(VersionedObjectsException): msg_fmt = _('Field %(field)s of %(objname)s is not an instance of Field') class OrphanedObjectError(VersionedObjectsException): msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object') class IncompatibleObjectVersion(VersionedObjectsException): msg_fmt = _('Version %(objver)s of %(objname)s is not supported, ' 'supported version is %(supported)s') class ReadOnlyFieldError(VersionedObjectsException): msg_fmt = _('Cannot modify readonly field %(field)s') class UnsupportedObjectError(VersionedObjectsException): msg_fmt = _('Unsupported object type %(objtype)s') class EnumRequiresValidValuesError(VersionedObjectsException): msg_fmt = _('Enum fields require a list of valid_values') class EnumValidValuesInvalidError(VersionedObjectsException): msg_fmt = _('Enum valid values are not valid') class EnumFieldInvalid(VersionedObjectsException): msg_fmt = _('%(typename)s in %(fieldname)s is not an instance of Enum') class EnumFieldUnset(VersionedObjectsException): msg_fmt = _('%(fieldname)s missing field type') class InvalidTargetVersion(VersionedObjectsException): msg_fmt = _('Invalid target version %(version)s') class TargetBeforeSubobjectExistedException(VersionedObjectsException): msg_fmt = _("No subobject existed at version %(target_version)s") class UnregisteredSubobject(VersionedObjectsException): msg_fmt = _("%(child_objname)s is referenced by %(parent_objname)s but " "is not registered") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/oslo_versionedobjects/fields.py0000664000175000017500000012671100000000000024320 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from collections import abc as collections_abc import datetime from distutils import versionpredicate import re import uuid import warnings import copy import iso8601 import netaddr from oslo_utils import strutils from oslo_utils import timeutils from oslo_versionedobjects._i18n import _ from oslo_versionedobjects import _utils from oslo_versionedobjects import exception class KeyTypeError(TypeError): def __init__(self, expected, value): super(KeyTypeError, self).__init__( _('Key %(key)s must be of type %(expected)s not %(actual)s' ) % {'key': repr(value), 'expected': expected.__name__, 'actual': value.__class__.__name__, }) class ElementTypeError(TypeError): def __init__(self, expected, key, value): super(ElementTypeError, self).__init__( _('Element %(key)s:%(val)s must be of type %(expected)s' ' not %(actual)s' ) % {'key': key, 'val': repr(value), 'expected': expected, 'actual': value.__class__.__name__, }) class AbstractFieldType(object, metaclass=abc.ABCMeta): @abc.abstractmethod def coerce(self, obj, attr, value): """This is called to coerce (if possible) a value on assignment. This method should convert the value given into the designated type, or throw an exception if this is not possible. :param:obj: The VersionedObject on which an attribute is being set :param:attr: The name of the attribute being set :param:value: The value being set :returns: A properly-typed value """ pass @abc.abstractmethod def from_primitive(self, obj, attr, value): """This is called to deserialize a value. This method should deserialize a value from the form given by to_primitive() to the designated type. :param:obj: The VersionedObject on which the value is to be set :param:attr: The name of the attribute which will hold the value :param:value: The serialized form of the value :returns: The natural form of the value """ pass @abc.abstractmethod def to_primitive(self, obj, attr, value): """This is called to serialize a value. This method should serialize a value to the form expected by from_primitive(). :param:obj: The VersionedObject on which the value is set :param:attr: The name of the attribute holding the value :param:value: The natural form of the value :returns: The serialized form of the value """ pass @abc.abstractmethod def describe(self): """Returns a string describing the type of the field.""" pass @abc.abstractmethod def stringify(self, value): """Returns a short stringified version of a value.""" pass class FieldType(AbstractFieldType): @staticmethod def coerce(obj, attr, value): return value @staticmethod def from_primitive(obj, attr, value): return value @staticmethod def to_primitive(obj, attr, value): return value def describe(self): return self.__class__.__name__ def stringify(self, value): return str(value) def get_schema(self): raise NotImplementedError() class UnspecifiedDefault(object): pass class Field(object): def __init__(self, field_type, nullable=False, default=UnspecifiedDefault, read_only=False): self._type = field_type self._nullable = nullable self._default = default self._read_only = read_only def __repr__(self): if isinstance(self._default, set): # TODO(stephenfin): Drop this when we switch from # 'inspect.getargspec' to 'inspect.getfullargspec', since our # hashes will have to change anyway # make a py27 and py35 compatible representation. See bug 1771804 default = 'set([%s])' % ','.join( sorted([str(v) for v in self._default]) ) else: default = str(self._default) return '%s(default=%s,nullable=%s)' % (self._type.__class__.__name__, default, self._nullable) @property def nullable(self): return self._nullable @property def default(self): return self._default @property def read_only(self): return self._read_only def _null(self, obj, attr): if self.nullable: return None elif self._default != UnspecifiedDefault: # NOTE(danms): We coerce the default value each time the field # is set to None as our contract states that we'll let the type # examine the object and attribute name at that time. return self._type.coerce(obj, attr, copy.deepcopy(self._default)) else: raise ValueError(_("Field `%s' cannot be None") % attr) def coerce(self, obj, attr, value): """Coerce a value to a suitable type. This is called any time you set a value on an object, like: foo.myint = 1 and is responsible for making sure that the value (1 here) is of the proper type, or can be sanely converted. This also handles the potentially nullable or defaultable nature of the field and calls the coerce() method on a FieldType to actually do the coercion. :param:obj: The object being acted upon :param:attr: The name of the attribute/field being set :param:value: The value being set :returns: The properly-typed value """ if value is None: return self._null(obj, attr) else: return self._type.coerce(obj, attr, value) def from_primitive(self, obj, attr, value): """Deserialize a value from primitive form. This is responsible for deserializing a value from primitive into regular form. It calls the from_primitive() method on a FieldType to do the actual deserialization. :param:obj: The object being acted upon :param:attr: The name of the attribute/field being deserialized :param:value: The value to be deserialized :returns: The deserialized value """ if value is None: return None else: return self._type.from_primitive(obj, attr, value) def to_primitive(self, obj, attr, value): """Serialize a value to primitive form. This is responsible for serializing a value to primitive form. It calls to_primitive() on a FieldType to do the actual serialization. :param:obj: The object being acted upon :param:attr: The name of the attribute/field being serialized :param:value: The value to be serialized :returns: The serialized value """ if value is None: return None else: return self._type.to_primitive(obj, attr, value) def describe(self): """Return a short string describing the type of this field.""" name = self._type.describe() prefix = self.nullable and 'Nullable' or '' return prefix + name def stringify(self, value): if value is None: return 'None' else: return self._type.stringify(value) def get_schema(self): schema = self._type.get_schema() schema.update({'readonly': self.read_only}) if self.nullable: schema['type'].append('null') default = self.default if default != UnspecifiedDefault: schema.update({'default': default}) return schema class String(FieldType): @staticmethod def coerce(obj, attr, value): # FIXME(danms): We should really try to avoid the need to do this accepted_types = (int, float, str, datetime.datetime) if isinstance(value, accepted_types): return str(value) raise ValueError(_('A string is required in field %(attr)s, ' 'not a %(type)s') % {'attr': attr, 'type': type(value).__name__}) @staticmethod def stringify(value): return '\'%s\'' % value def get_schema(self): return {'type': ['string']} class SensitiveString(String): """A string field type that may contain sensitive (password) information. Passwords in the string value are masked when stringified. """ def stringify(self, value): return super(SensitiveString, self).stringify( strutils.mask_password(value)) class VersionPredicate(String): @staticmethod def coerce(obj, attr, value): try: versionpredicate.VersionPredicate('check (%s)' % value) except ValueError: raise ValueError(_('Version %(val)s is not a valid predicate in ' 'field %(attr)s') % {'val': value, 'attr': attr}) return value class Enum(String): def __init__(self, valid_values, **kwargs): if not valid_values: raise exception.EnumRequiresValidValuesError() try: # Test validity of the values for value in valid_values: super(Enum, self).coerce(None, 'init', value) except (TypeError, ValueError): raise exception.EnumValidValuesInvalidError() self._valid_values = valid_values super(Enum, self).__init__(**kwargs) @property def valid_values(self): return copy.copy(self._valid_values) def coerce(self, obj, attr, value): if value not in self._valid_values: msg = _("Field value %s is invalid") % value raise ValueError(msg) return super(Enum, self).coerce(obj, attr, value) def stringify(self, value): if value not in self._valid_values: msg = _("Field value %s is invalid") % value raise ValueError(msg) return super(Enum, self).stringify(value) def get_schema(self): schema = super(Enum, self).get_schema() schema['enum'] = self._valid_values return schema class StringPattern(FieldType): def get_schema(self): if hasattr(self, "PATTERN"): return {'type': ['string'], 'pattern': self.PATTERN} else: msg = _("%s has no pattern") % self.__class__.__name__ raise AttributeError(msg) class UUID(StringPattern): PATTERN = (r'^[a-fA-F0-9]{8}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]' r'{4}-?[a-fA-F0-9]{12}$') @staticmethod def coerce(obj, attr, value): # FIXME(danms): We should actually verify the UUIDness here with warnings.catch_warnings(): # Change the warning action only if no other filter exists # for this warning to allow the client to define other action # like 'error' for this warning. warnings.filterwarnings(action="once", append=True) try: uuid.UUID("%s" % value) except Exception: # This is to ensure no breaking behaviour for current # users warnings.warn("%s is an invalid UUID. Using UUIDFields " "with invalid UUIDs is no longer " "supported, and will be removed in a future " "release. Please update your " "code to input valid UUIDs or accept " "ValueErrors for invalid UUIDs. See " "https://docs.openstack.org/oslo.versionedobjects/latest/reference/fields.html#oslo_versionedobjects.fields.UUIDField " # noqa "for further details" % repr(value).encode('utf8'), FutureWarning) return "%s" % value class MACAddress(StringPattern): PATTERN = r'^[0-9a-f]{2}(:[0-9a-f]{2}){5}$' _REGEX = re.compile(PATTERN) @staticmethod def coerce(obj, attr, value): if isinstance(value, str): lowered = value.lower().replace('-', ':') if MACAddress._REGEX.match(lowered): return lowered raise ValueError(_("Malformed MAC %s") % (value,)) class PCIAddress(StringPattern): PATTERN = r'^[0-9a-f]{4}:[0-9a-f]{2}:[0-1][0-9a-f].[0-7]$' _REGEX = re.compile(PATTERN) @staticmethod def coerce(obj, attr, value): if isinstance(value, str): newvalue = value.lower() if PCIAddress._REGEX.match(newvalue): return newvalue raise ValueError(_("Malformed PCI address %s") % (value,)) class Integer(FieldType): @staticmethod def coerce(obj, attr, value): return int(value) def get_schema(self): return {'type': ['integer']} class NonNegativeInteger(FieldType): @staticmethod def coerce(obj, attr, value): v = int(value) if v < 0: raise ValueError(_('Value must be >= 0 for field %s') % attr) return v def get_schema(self): return {'type': ['integer'], 'minimum': 0} class Float(FieldType): def coerce(self, obj, attr, value): return float(value) def get_schema(self): return {'type': ['number']} class NonNegativeFloat(FieldType): @staticmethod def coerce(obj, attr, value): v = float(value) if v < 0: raise ValueError(_('Value must be >= 0 for field %s') % attr) return v def get_schema(self): return {'type': ['number'], 'minimum': 0} class Boolean(FieldType): @staticmethod def coerce(obj, attr, value): return bool(value) def get_schema(self): return {'type': ['boolean']} class FlexibleBoolean(Boolean): @staticmethod def coerce(obj, attr, value): return strutils.bool_from_string(value) class DateTime(FieldType): def __init__(self, tzinfo_aware=True, *args, **kwargs): self.tzinfo_aware = tzinfo_aware super(DateTime, self).__init__(*args, **kwargs) def coerce(self, obj, attr, value): if isinstance(value, str): # NOTE(danms): Being tolerant of isotime strings here will help us # during our objects transition value = timeutils.parse_isotime(value) elif not isinstance(value, datetime.datetime): raise ValueError(_('A datetime.datetime is required ' 'in field %(attr)s, not a %(type)s') % {'attr': attr, 'type': type(value).__name__}) if value.utcoffset() is None and self.tzinfo_aware: # NOTE(danms): Legacy objects from sqlalchemy are stored in UTC, # but are returned without a timezone attached. # As a transitional aid, assume a tz-naive object is in UTC. value = value.replace(tzinfo=iso8601.UTC) elif not self.tzinfo_aware: value = value.replace(tzinfo=None) return value def from_primitive(self, obj, attr, value): return self.coerce(obj, attr, timeutils.parse_isotime(value)) def get_schema(self): return {'type': ['string'], 'format': 'date-time'} @staticmethod def to_primitive(obj, attr, value): return _utils.isotime(value) @staticmethod def stringify(value): return _utils.isotime(value) class IPAddress(StringPattern): @staticmethod def coerce(obj, attr, value): try: return netaddr.IPAddress(value) except netaddr.AddrFormatError as e: raise ValueError(str(e)) def from_primitive(self, obj, attr, value): return self.coerce(obj, attr, value) @staticmethod def to_primitive(obj, attr, value): return str(value) class IPV4Address(IPAddress): @staticmethod def coerce(obj, attr, value): result = IPAddress.coerce(obj, attr, value) if result.version != 4: raise ValueError(_('Network "%(val)s" is not valid ' 'in field %(attr)s') % {'val': value, 'attr': attr}) return result def get_schema(self): return {'type': ['string'], 'format': 'ipv4'} class IPV6Address(IPAddress): @staticmethod def coerce(obj, attr, value): result = IPAddress.coerce(obj, attr, value) if result.version != 6: raise ValueError(_('Network "%(val)s" is not valid ' 'in field %(attr)s') % {'val': value, 'attr': attr}) return result def get_schema(self): return {'type': ['string'], 'format': 'ipv6'} class IPV4AndV6Address(IPAddress): @staticmethod def coerce(obj, attr, value): result = IPAddress.coerce(obj, attr, value) if result.version != 4 and result.version != 6: raise ValueError(_('Network "%(val)s" is not valid ' 'in field %(attr)s') % {'val': value, 'attr': attr}) return result def get_schema(self): return {'oneOf': [IPV4Address().get_schema(), IPV6Address().get_schema()]} class IPNetwork(IPAddress): @staticmethod def coerce(obj, attr, value): try: return netaddr.IPNetwork(value) except netaddr.AddrFormatError as e: raise ValueError(str(e)) class IPV4Network(IPNetwork): PATTERN = (r'^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-' r'9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/([0-9]|[1-2][' r'0-9]|3[0-2]))$') @staticmethod def coerce(obj, attr, value): try: return netaddr.IPNetwork(value, version=4) except netaddr.AddrFormatError as e: raise ValueError(str(e)) class IPV6Network(IPNetwork): def __init__(self, *args, **kwargs): super(IPV6Network, self).__init__(*args, **kwargs) self.PATTERN = self._create_pattern() @staticmethod def coerce(obj, attr, value): try: return netaddr.IPNetwork(value, version=6) except netaddr.AddrFormatError as e: raise ValueError(str(e)) def _create_pattern(self): ipv6seg = '[0-9a-fA-F]{1,4}' ipv4seg = '(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])' return ( # Pattern based on answer to # http://stackoverflow.com/questions/53497/regular-expression-that-matches-valid-ipv6-addresses '^' # 1:2:3:4:5:6:7:8 '(' + ipv6seg + ':){7,7}' + ipv6seg + '|' # 1:: 1:2:3:4:5:6:7:: '(' + ipv6seg + ':){1,7}:|' # 1::8 1:2:3:4:5:6::8 1:2:3:4:5:6::8 '(' + ipv6seg + ':){1,6}:' + ipv6seg + '|' # 1::7:8 1:2:3:4:5::7:8 1:2:3:4:5::8 '(' + ipv6seg + ':){1,5}(:' + ipv6seg + '){1,2}|' # 1::6:7:8 1:2:3:4::6:7:8 1:2:3:4::8 '(' + ipv6seg + ':){1,4}(:' + ipv6seg + '){1,3}|' # 1::5:6:7:8 1:2:3::5:6:7:8 1:2:3::8 '(' + ipv6seg + ':){1,3}(:' + ipv6seg + '){1,4}|' # 1::4:5:6:7:8 1:2::4:5:6:7:8 1:2::8 '(' + ipv6seg + ':){1,2}(:' + ipv6seg + '){1,5}|' + # 1::3:4:5:6:7:8 1::3:4:5:6:7:8 1::8 ipv6seg + ':((:' + ipv6seg + '){1,6})|' # ::2:3:4:5:6:7:8 ::2:3:4:5:6:7:8 ::8 :: ':((:' + ipv6seg + '){1,7}|:)|' # fe80::7:8%eth0 fe80::7:8%1 'fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|' # ::255.255.255.255 ::ffff:255.255.255.255 ::ffff:0:255.255.255.255 '::(ffff(:0{1,4}){0,1}:){0,1}' '(' + ipv4seg + r'\.){3,3}' + ipv4seg + '|' # 2001:db8:3:4::192.0.2.33 64:ff9b::192.0.2.33 '(' + ipv6seg + ':){1,4}:' '(' + ipv4seg + r'\.){3,3}' + ipv4seg + # /128 r'(\/(d|dd|1[0-1]d|12[0-8]))$' ) class CompoundFieldType(FieldType): def __init__(self, element_type, **field_args): self._element_type = Field(element_type, **field_args) class List(CompoundFieldType): def coerce(self, obj, attr, value): if (not isinstance(value, collections_abc.Iterable) or isinstance(value, (str, collections_abc.Mapping))): raise ValueError(_('A list is required in field %(attr)s, ' 'not a %(type)s') % {'attr': attr, 'type': type(value).__name__}) coerced_list = CoercedList() coerced_list.enable_coercing(self._element_type, obj, attr) coerced_list.extend(value) return coerced_list def to_primitive(self, obj, attr, value): return [self._element_type.to_primitive(obj, attr, x) for x in value] def from_primitive(self, obj, attr, value): return [self._element_type.from_primitive(obj, attr, x) for x in value] def stringify(self, value): return '[%s]' % ( ','.join([self._element_type.stringify(x) for x in value])) def get_schema(self): return {'type': ['array'], 'items': self._element_type.get_schema()} class Dict(CompoundFieldType): def coerce(self, obj, attr, value): if not isinstance(value, dict): raise ValueError(_('A dict is required in field %(attr)s, ' 'not a %(type)s') % {'attr': attr, 'type': type(value).__name__}) coerced_dict = CoercedDict() coerced_dict.enable_coercing(self._element_type, obj, attr) coerced_dict.update(value) return coerced_dict def to_primitive(self, obj, attr, value): primitive = {} for key, element in value.items(): primitive[key] = self._element_type.to_primitive( obj, '%s["%s"]' % (attr, key), element) return primitive def from_primitive(self, obj, attr, value): concrete = {} for key, element in value.items(): concrete[key] = self._element_type.from_primitive( obj, '%s["%s"]' % (attr, key), element) return concrete def stringify(self, value): return '{%s}' % ( ','.join(['%s=%s' % (key, self._element_type.stringify(val)) for key, val in sorted(value.items())])) def get_schema(self): return {'type': ['object'], 'additionalProperties': self._element_type.get_schema()} class DictProxyField(object): """Descriptor allowing us to assign pinning data as a dict of key_types This allows us to have an object field that will be a dict of key_type keys, allowing that will convert back to string-keyed dict. This will take care of the conversion while the dict field will make sure that we store the raw json-serializable data on the object. key_type should return a type that unambiguously responds to str so that calling key_type on it yields the same thing. """ def __init__(self, dict_field_name, key_type=int): self._fld_name = dict_field_name self._key_type = key_type def __get__(self, obj, obj_type): if obj is None: return self if getattr(obj, self._fld_name) is None: return return {self._key_type(k): v for k, v in getattr(obj, self._fld_name).items()} def __set__(self, obj, val): if val is None: setattr(obj, self._fld_name, val) else: setattr(obj, self._fld_name, {str(k): v for k, v in val.items()}) class Set(CompoundFieldType): def coerce(self, obj, attr, value): if not isinstance(value, set): raise ValueError(_('A set is required in field %(attr)s, ' 'not a %(type)s') % {'attr': attr, 'type': type(value).__name__}) coerced_set = CoercedSet() coerced_set.enable_coercing(self._element_type, obj, attr) coerced_set.update(value) return coerced_set def to_primitive(self, obj, attr, value): return tuple( self._element_type.to_primitive(obj, attr, x) for x in value) def from_primitive(self, obj, attr, value): return set([self._element_type.from_primitive(obj, attr, x) for x in value]) def stringify(self, value): return 'set([%s])' % ( ','.join([self._element_type.stringify(x) for x in value])) def get_schema(self): return {'type': ['array'], 'uniqueItems': True, 'items': self._element_type.get_schema()} class Object(FieldType): def __init__(self, obj_name, subclasses=False, **kwargs): self._obj_name = obj_name self._subclasses = subclasses super(Object, self).__init__(**kwargs) @staticmethod def _get_all_obj_names(obj): obj_names = [] for parent in obj.__class__.mro(): # Skip mix-ins which are not versioned object subclasses if not hasattr(parent, "obj_name"): continue obj_names.append(parent.obj_name()) return obj_names def coerce(self, obj, attr, value): try: obj_name = value.obj_name() except AttributeError: obj_name = "" if self._subclasses: obj_names = self._get_all_obj_names(value) else: obj_names = [obj_name] if self._obj_name not in obj_names: if not obj_name: # If we're not dealing with an object, it's probably a # primitive so get it's type for the message below. obj_name = type(value).__name__ obj_mod = '' if hasattr(obj, '__module__'): obj_mod = ''.join([obj.__module__, '.']) val_mod = '' if hasattr(value, '__module__'): val_mod = ''.join([value.__module__, '.']) raise ValueError(_('An object of type %(type)s is required ' 'in field %(attr)s, not a %(valtype)s') % {'type': ''.join([obj_mod, self._obj_name]), 'attr': attr, 'valtype': ''.join([val_mod, obj_name])}) return value @staticmethod def to_primitive(obj, attr, value): return value.obj_to_primitive() @staticmethod def from_primitive(obj, attr, value): # FIXME(danms): Avoid circular import from base.py from oslo_versionedobjects import base as obj_base # NOTE (ndipanov): If they already got hydrated by the serializer, just # pass them back unchanged if isinstance(value, obj_base.VersionedObject): return value return obj.obj_from_primitive(value, obj._context) def describe(self): return "Object<%s>" % self._obj_name def stringify(self, value): if 'uuid' in value.fields: ident = '(%s)' % (value.obj_attr_is_set('uuid') and value.uuid or 'UNKNOWN') elif 'id' in value.fields: ident = '(%s)' % (value.obj_attr_is_set('id') and value.id or 'UNKNOWN') else: ident = '' return '%s%s' % (value.obj_name(), ident) def get_schema(self): from oslo_versionedobjects import base as obj_base obj_classes = obj_base.VersionedObjectRegistry.obj_classes() if self._obj_name in obj_classes: cls = obj_classes[self._obj_name][0] namespace_key = cls._obj_primitive_key('namespace') name_key = cls._obj_primitive_key('name') version_key = cls._obj_primitive_key('version') data_key = cls._obj_primitive_key('data') changes_key = cls._obj_primitive_key('changes') field_schemas = {key: field.get_schema() for key, field in cls.fields.items()} required_fields = [key for key, field in sorted(cls.fields.items()) if not field.nullable] schema = { 'type': ['object'], 'properties': { namespace_key: { 'type': 'string' }, name_key: { 'type': 'string' }, version_key: { 'type': 'string' }, changes_key: { 'type': 'array', 'items': { 'type': 'string' } }, data_key: { 'type': 'object', 'description': 'fields of %s' % self._obj_name, 'properties': field_schemas, }, }, 'required': [namespace_key, name_key, version_key, data_key] } if required_fields: schema['properties'][data_key]['required'] = required_fields return schema else: raise exception.UnsupportedObjectError(objtype=self._obj_name) class AutoTypedField(Field): AUTO_TYPE = None def __init__(self, **kwargs): super(AutoTypedField, self).__init__(self.AUTO_TYPE, **kwargs) class StringField(AutoTypedField): AUTO_TYPE = String() class SensitiveStringField(AutoTypedField): """Field type that masks passwords when the field is stringified.""" AUTO_TYPE = SensitiveString() class VersionPredicateField(AutoTypedField): AUTO_TYPE = VersionPredicate() class BaseEnumField(AutoTypedField): '''Base class for all enum field types This class should not be directly instantiated. Instead subclass it and set AUTO_TYPE to be a SomeEnum() where SomeEnum is a subclass of Enum. ''' def __init__(self, **kwargs): if self.AUTO_TYPE is None: raise exception.EnumFieldUnset( fieldname=self.__class__.__name__) if not isinstance(self.AUTO_TYPE, Enum): raise exception.EnumFieldInvalid( typename=self.AUTO_TYPE.__class__.__name__, fieldname=self.__class__.__name__) super(BaseEnumField, self).__init__(**kwargs) def __repr__(self): valid_values = self._type.valid_values args = { 'nullable': self._nullable, 'default': self._default, } args.update({'valid_values': valid_values}) return '%s(%s)' % (self._type.__class__.__name__, ','.join(['%s=%s' % (k, v) for k, v in sorted(args.items())])) @property def valid_values(self): """Return the list of valid values for the field.""" return self._type.valid_values class EnumField(BaseEnumField): '''Anonymous enum field type This class allows for anonymous enum types to be declared, simply by passing in a list of valid values to its constructor. It is generally preferable though, to create an explicit named enum type by sub-classing the BaseEnumField type directly. ''' def __init__(self, valid_values, **kwargs): self.AUTO_TYPE = Enum(valid_values=valid_values) super(EnumField, self).__init__(**kwargs) class StateMachine(EnumField): """A mixin that can be applied to an EnumField to enforce a state machine e.g: Setting the code below on a field will ensure an object cannot transition from ERROR to ACTIVE :example: .. code-block:: python class FakeStateMachineField(fields.EnumField, fields.StateMachine): ACTIVE = 'ACTIVE' PENDING = 'PENDING' ERROR = 'ERROR' DELETED = 'DELETED' ALLOWED_TRANSITIONS = { ACTIVE: { PENDING, ERROR, DELETED, }, PENDING: { ACTIVE, ERROR }, ERROR: { PENDING, }, DELETED: {} # This is a terminal state } _TYPES = (ACTIVE, PENDING, ERROR, DELETED) def __init__(self, **kwargs): super(FakeStateMachineField, self).__init__( self._TYPES, **kwargs) """ # This is dict of states, that have dicts of states an object is # allowed to transition to ALLOWED_TRANSITIONS = {} def _my_name(self, obj): for name, field in obj.fields.items(): if field == self: return name return 'unknown' def coerce(self, obj, attr, value): super(StateMachine, self).coerce(obj, attr, value) my_name = self._my_name(obj) msg = _("%(object)s.%(name)s is not allowed to transition out of " "%(value)s state") if attr in obj: current_value = getattr(obj, attr) else: return value if current_value in self.ALLOWED_TRANSITIONS: if value in self.ALLOWED_TRANSITIONS[current_value]: return value else: msg = _( "%(object)s.%(name)s is not allowed to transition out of " "'%(current_value)s' state to '%(value)s' state, choose " "from %(options)r") msg = msg % { 'object': obj.obj_name(), 'name': my_name, 'current_value': current_value, 'value': value, 'options': [x for x in self.ALLOWED_TRANSITIONS[current_value]] } raise ValueError(msg) class UUIDField(AutoTypedField): """UUID Field Type .. warning:: This class does not actually validate UUIDs. This will happen in a future major version of oslo.versionedobjects To validate that you have valid UUIDs you need to do the following in your own objects/fields.py :Example: .. code-block:: python import oslo_versionedobjects.fields as ovo_fields class UUID(ovo_fields.UUID): def coerce(self, obj, attr, value): uuid.UUID(value) return str(value) class UUIDField(ovo_fields.AutoTypedField): AUTO_TYPE = UUID() and then in your objects use ``.object.fields.UUIDField``. This will become default behaviour in the future. """ AUTO_TYPE = UUID() class MACAddressField(AutoTypedField): AUTO_TYPE = MACAddress() class PCIAddressField(AutoTypedField): AUTO_TYPE = PCIAddress() class IntegerField(AutoTypedField): AUTO_TYPE = Integer() class NonNegativeIntegerField(AutoTypedField): AUTO_TYPE = NonNegativeInteger() class FloatField(AutoTypedField): AUTO_TYPE = Float() class NonNegativeFloatField(AutoTypedField): AUTO_TYPE = NonNegativeFloat() # This is a strict interpretation of boolean # values using Python's semantics for truth/falsehood class BooleanField(AutoTypedField): AUTO_TYPE = Boolean() # This is a flexible interpretation of boolean # values using common user friendly semantics for # truth/falsehood. ie strings like 'yes', 'no', # 'on', 'off', 't', 'f' get mapped to values you # would expect. class FlexibleBooleanField(AutoTypedField): AUTO_TYPE = FlexibleBoolean() class DateTimeField(AutoTypedField): def __init__(self, tzinfo_aware=True, **kwargs): self.AUTO_TYPE = DateTime(tzinfo_aware=tzinfo_aware) super(DateTimeField, self).__init__(**kwargs) class DictOfStringsField(AutoTypedField): AUTO_TYPE = Dict(String()) class DictOfNullableStringsField(AutoTypedField): AUTO_TYPE = Dict(String(), nullable=True) class DictOfIntegersField(AutoTypedField): AUTO_TYPE = Dict(Integer()) class ListOfStringsField(AutoTypedField): AUTO_TYPE = List(String()) class DictOfListOfStringsField(AutoTypedField): AUTO_TYPE = Dict(List(String())) class ListOfEnumField(AutoTypedField): def __init__(self, valid_values, **kwargs): self.AUTO_TYPE = List(Enum(valid_values)) super(ListOfEnumField, self).__init__(**kwargs) def __repr__(self): valid_values = self._type._element_type._type.valid_values args = { 'nullable': self._nullable, 'default': self._default, } args.update({'valid_values': valid_values}) return '%s(%s)' % (self._type.__class__.__name__, ','.join(['%s=%s' % (k, v) for k, v in sorted(args.items())])) class SetOfIntegersField(AutoTypedField): AUTO_TYPE = Set(Integer()) class ListOfSetsOfIntegersField(AutoTypedField): AUTO_TYPE = List(Set(Integer())) class ListOfIntegersField(AutoTypedField): AUTO_TYPE = List(Integer()) class ListOfDictOfNullableStringsField(AutoTypedField): AUTO_TYPE = List(Dict(String(), nullable=True)) class ObjectField(AutoTypedField): def __init__(self, objtype, subclasses=False, **kwargs): self.AUTO_TYPE = Object(objtype, subclasses) self.objname = objtype super(ObjectField, self).__init__(**kwargs) class ListOfObjectsField(AutoTypedField): def __init__(self, objtype, subclasses=False, **kwargs): self.AUTO_TYPE = List(Object(objtype, subclasses)) self.objname = objtype super(ListOfObjectsField, self).__init__(**kwargs) class ListOfUUIDField(AutoTypedField): AUTO_TYPE = List(UUID()) class IPAddressField(AutoTypedField): AUTO_TYPE = IPAddress() class IPV4AddressField(AutoTypedField): AUTO_TYPE = IPV4Address() class IPV6AddressField(AutoTypedField): AUTO_TYPE = IPV6Address() class IPV4AndV6AddressField(AutoTypedField): AUTO_TYPE = IPV4AndV6Address() class IPNetworkField(AutoTypedField): AUTO_TYPE = IPNetwork() class IPV4NetworkField(AutoTypedField): AUTO_TYPE = IPV4Network() class IPV6NetworkField(AutoTypedField): AUTO_TYPE = IPV6Network() class CoercedCollectionMixin(object): def __init__(self, *args, **kwargs): self._element_type = None self._obj = None self._field = None super(CoercedCollectionMixin, self).__init__(*args, **kwargs) def enable_coercing(self, element_type, obj, field): self._element_type = element_type self._obj = obj self._field = field class CoercedList(CoercedCollectionMixin, list): """List which coerces its elements List implementation which overrides all element-adding methods and coercing the element(s) being added to the required element type """ def _coerce_item(self, index, item): if hasattr(self, "_element_type") and self._element_type is not None: att_name = "%s[%i]" % (self._field, index) return self._element_type.coerce(self._obj, att_name, item) else: return item def __setitem__(self, i, y): if type(i) is slice: # compatibility with py3 and [::] slices start = i.start or 0 step = i.step or 1 coerced_items = [self._coerce_item(start + index * step, item) for index, item in enumerate(y)] super(CoercedList, self).__setitem__(i, coerced_items) else: super(CoercedList, self).__setitem__(i, self._coerce_item(i, y)) def append(self, x): super(CoercedList, self).append(self._coerce_item(len(self) + 1, x)) def extend(self, t): coerced_items = [self._coerce_item(len(self) + index, item) for index, item in enumerate(t)] super(CoercedList, self).extend(coerced_items) def insert(self, i, x): super(CoercedList, self).insert(i, self._coerce_item(i, x)) def __iadd__(self, y): coerced_items = [self._coerce_item(len(self) + index, item) for index, item in enumerate(y)] return super(CoercedList, self).__iadd__(coerced_items) def __setslice__(self, i, j, y): coerced_items = [self._coerce_item(i + index, item) for index, item in enumerate(y)] return super(CoercedList, self).__setslice__(i, j, coerced_items) class CoercedDict(CoercedCollectionMixin, dict): """Dict which coerces its values Dict implementation which overrides all element-adding methods and coercing the element(s) being added to the required element type """ def _coerce_dict(self, d): res = {} for key, element in d.items(): res[key] = self._coerce_item(key, element) return res def _coerce_item(self, key, item): if not isinstance(key, str): raise KeyTypeError(str, key) if hasattr(self, "_element_type") and self._element_type is not None: att_name = "%s[%s]" % (self._field, key) return self._element_type.coerce(self._obj, att_name, item) else: return item def __setitem__(self, key, value): super(CoercedDict, self).__setitem__(key, self._coerce_item(key, value)) def update(self, other=None, **kwargs): if other is not None: super(CoercedDict, self).update(self._coerce_dict(other), **self._coerce_dict(kwargs)) else: super(CoercedDict, self).update(**self._coerce_dict(kwargs)) def setdefault(self, key, default=None): return super(CoercedDict, self).setdefault(key, self._coerce_item(key, default)) class CoercedSet(CoercedCollectionMixin, set): """Set which coerces its values Dict implementation which overrides all element-adding methods and coercing the element(s) being added to the required element type """ def _coerce_element(self, element): if hasattr(self, "_element_type") and self._element_type is not None: return self._element_type.coerce(self._obj, "%s[%s]" % (self._field, element), element) else: return element def _coerce_iterable(self, values): coerced = set() for element in values: coerced.add(self._coerce_element(element)) return coerced def add(self, value): return super(CoercedSet, self).add(self._coerce_element(value)) def update(self, values): return super(CoercedSet, self).update(self._coerce_iterable(values)) def symmetric_difference_update(self, values): return super(CoercedSet, self).symmetric_difference_update( self._coerce_iterable(values)) def __ior__(self, y): return super(CoercedSet, self).__ior__(self._coerce_iterable(y)) def __ixor__(self, y): return super(CoercedSet, self).__ixor__(self._coerce_iterable(y)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/oslo_versionedobjects/fixture.py0000664000175000017500000004770100000000000024541 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fixtures for writing tests for code using oslo.versionedobjects .. note:: This module has several extra dependencies not needed at runtime for production code, and therefore not installed by default. To ensure those dependencies are present for your tests, add ``oslo.versionedobjects[fixtures]`` to your list of test dependencies. """ from collections import namedtuple from collections import OrderedDict import copy import datetime import inspect import logging from unittest import mock import fixtures from oslo_utils.secretutils import md5 from oslo_utils import versionutils as vutils from oslo_versionedobjects import base from oslo_versionedobjects import fields LOG = logging.getLogger(__name__) def compare_obj(test, obj, db_obj, subs=None, allow_missing=None, comparators=None): """Compare a VersionedObject and a dict-like database object. This automatically converts TZ-aware datetimes and iterates over the fields of the object. :param test: The TestCase doing the comparison :param obj: The VersionedObject to examine :param db_obj: The dict-like database object to use as reference :param subs: A dict of objkey=dbkey field substitutions :param allow_missing: A list of fields that may not be in db_obj :param comparators: Map of comparator functions to use for certain fields """ subs = subs or {} allow_missing = allow_missing or [] comparators = comparators or {} for key in obj.fields: db_key = subs.get(key, key) # If this is an allow_missing key and it's missing in either obj or # db_obj, just skip it if key in allow_missing: if key not in obj or db_key not in db_obj: continue # If the value isn't set on the object, and also isn't set on the # db_obj, we'll skip the value check, unset in both is equal if not obj.obj_attr_is_set(key) and db_key not in db_obj: continue # If it's set on the object and not on the db_obj, they aren't equal elif obj.obj_attr_is_set(key) and db_key not in db_obj: raise AssertionError(("%s (db_key: %s) is set on the object, but " "not on the db_obj, so the objects are not " "equal") % (key, db_key)) # If it's set on the db_obj and not the object, they aren't equal elif not obj.obj_attr_is_set(key) and db_key in db_obj: raise AssertionError(("%s (db_key: %s) is set on the db_obj, but " "not on the object, so the objects are not " "equal") % (key, db_key)) # All of the checks above have safeguarded us, so we know we will # get an obj_val and db_val without issue obj_val = getattr(obj, key) db_val = db_obj[db_key] if isinstance(obj_val, datetime.datetime): obj_val = obj_val.replace(tzinfo=None) if isinstance(db_val, datetime.datetime): db_val = obj_val.replace(tzinfo=None) if key in comparators: comparator = comparators[key] comparator(db_val, obj_val) else: test.assertEqual(db_val, obj_val) class FakeIndirectionAPI(base.VersionedObjectIndirectionAPI): def __init__(self, serializer=None): super(FakeIndirectionAPI, self).__init__() self._ser = serializer or base.VersionedObjectSerializer() def _get_changes(self, orig_obj, new_obj): updates = dict() for name, field in new_obj.fields.items(): if not new_obj.obj_attr_is_set(name): continue if (not orig_obj.obj_attr_is_set(name) or getattr(orig_obj, name) != getattr(new_obj, name)): updates[name] = field.to_primitive(new_obj, name, getattr(new_obj, name)) return updates def _canonicalize_args(self, context, args, kwargs): args = tuple( [self._ser.deserialize_entity( context, self._ser.serialize_entity(context, arg)) for arg in args]) kwargs = dict( [(argname, self._ser.deserialize_entity( context, self._ser.serialize_entity(context, arg))) for argname, arg in kwargs.items()]) return args, kwargs def object_action(self, context, objinst, objmethod, args, kwargs): objinst = self._ser.deserialize_entity( context, self._ser.serialize_entity( context, objinst)) objmethod = str(objmethod) args, kwargs = self._canonicalize_args(context, args, kwargs) original = objinst.obj_clone() with mock.patch('oslo_versionedobjects.base.VersionedObject.' 'indirection_api', new=None): result = getattr(objinst, objmethod)(*args, **kwargs) updates = self._get_changes(original, objinst) updates['obj_what_changed'] = objinst.obj_what_changed() return updates, result def object_class_action(self, context, objname, objmethod, objver, args, kwargs): objname = str(objname) objmethod = str(objmethod) objver = str(objver) args, kwargs = self._canonicalize_args(context, args, kwargs) cls = base.VersionedObject.obj_class_from_name(objname, objver) with mock.patch('oslo_versionedobjects.base.VersionedObject.' 'indirection_api', new=None): result = getattr(cls, objmethod)(context, *args, **kwargs) return (base.VersionedObject.obj_from_primitive( result.obj_to_primitive(target_version=objver), context=context) if isinstance(result, base.VersionedObject) else result) def object_class_action_versions(self, context, objname, objmethod, object_versions, args, kwargs): objname = str(objname) objmethod = str(objmethod) object_versions = {str(o): str(v) for o, v in object_versions.items()} args, kwargs = self._canonicalize_args(context, args, kwargs) objver = object_versions[objname] cls = base.VersionedObject.obj_class_from_name(objname, objver) with mock.patch('oslo_versionedobjects.base.VersionedObject.' 'indirection_api', new=None): result = getattr(cls, objmethod)(context, *args, **kwargs) return (base.VersionedObject.obj_from_primitive( result.obj_to_primitive(target_version=objver), context=context) if isinstance(result, base.VersionedObject) else result) def object_backport(self, context, objinst, target_version): raise Exception('not supported') class IndirectionFixture(fixtures.Fixture): def __init__(self, indirection_api=None): self.indirection_api = indirection_api or FakeIndirectionAPI() def setUp(self): super(IndirectionFixture, self).setUp() self.useFixture(fixtures.MonkeyPatch( 'oslo_versionedobjects.base.VersionedObject.indirection_api', self.indirection_api)) class ObjectHashMismatch(Exception): def __init__(self, expected, actual): self.expected = expected self.actual = actual def __str__(self): return 'Hashes have changed for %s' % ( ','.join(set(self.expected.keys() + self.actual.keys()))) CompatArgSpec = namedtuple( 'ArgSpec', ('args', 'varargs', 'keywords', 'defaults')) def get_method_spec(method): """Get a stable and compatible method spec. Newer features in Python3 (kw-only arguments and annotations) are not supported or representable with inspect.getargspec() but many object hashes are already recorded using that method. This attempts to return something compatible with getargspec() when possible (i.e. when those features are not used), and otherwise just returns the newer getfullargspec() representation. """ fullspec = inspect.getfullargspec(method) if any([fullspec.kwonlyargs, fullspec.kwonlydefaults, fullspec.annotations]): # Method uses newer-than-getargspec() features, so return the # newer full spec return fullspec else: return CompatArgSpec(fullspec.args, fullspec.varargs, fullspec.varkw, fullspec.defaults) class ObjectVersionChecker(object): def __init__(self, obj_classes=base.VersionedObjectRegistry.obj_classes()): self.obj_classes = obj_classes def _find_remotable_method(self, cls, thing, parent_was_remotable=False): """Follow a chain of remotable things down to the original function.""" if isinstance(thing, classmethod): return self._find_remotable_method(cls, thing.__get__(None, cls)) elif (inspect.ismethod(thing) or inspect.isfunction(thing)) and hasattr(thing, 'remotable'): return self._find_remotable_method(cls, thing.original_fn, parent_was_remotable=True) elif parent_was_remotable: # We must be the first non-remotable thing underneath a stack of # remotable things (i.e. the actual implementation method) return thing else: # This means the top-level thing never hit a remotable layer return None def _get_fingerprint(self, obj_name, extra_data_func=None): obj_class = self.obj_classes[obj_name][0] obj_fields = list(obj_class.fields.items()) obj_fields.sort() methods = [] for name in dir(obj_class): thing = getattr(obj_class, name) if inspect.ismethod(thing) or inspect.isfunction(thing) \ or isinstance(thing, classmethod): method = self._find_remotable_method(obj_class, thing) if method: methods.append((name, get_method_spec(method))) methods.sort() # NOTE(danms): Things that need a version bump are any fields # and their types, or the signatures of any remotable methods. # Of course, these are just the mechanical changes we can detect, # but many other things may require a version bump (method behavior # and return value changes, for example). if hasattr(obj_class, 'child_versions'): relevant_data = (obj_fields, methods, OrderedDict( sorted(obj_class.child_versions.items()))) else: relevant_data = (obj_fields, methods) if extra_data_func: relevant_data += extra_data_func(obj_class) fingerprint = '%s-%s' % (obj_class.VERSION, md5( bytes(repr(relevant_data).encode()), usedforsecurity=False).hexdigest()) return fingerprint def get_hashes(self, extra_data_func=None): """Return a dict of computed object hashes. :param extra_data_func: a function that is given the object class which gathers more relevant data about the class that is needed in versioning. Returns a tuple containing the extra data bits. """ fingerprints = {} for obj_name in sorted(self.obj_classes): fingerprints[obj_name] = self._get_fingerprint( obj_name, extra_data_func=extra_data_func) return fingerprints def test_hashes(self, expected_hashes, extra_data_func=None): fingerprints = self.get_hashes(extra_data_func=extra_data_func) stored = set(expected_hashes.items()) computed = set(fingerprints.items()) changed = stored.symmetric_difference(computed) expected = {} actual = {} for name, hash in changed: expected[name] = expected_hashes.get(name) actual[name] = fingerprints.get(name) return expected, actual def _get_dependencies(self, tree, obj_class): obj_name = obj_class.obj_name() if obj_name in tree: return for name, field in obj_class.fields.items(): if isinstance(field._type, fields.Object): sub_obj_name = field._type._obj_name sub_obj_class = self.obj_classes[sub_obj_name][0] self._get_dependencies(tree, sub_obj_class) tree.setdefault(obj_name, {}) tree[obj_name][sub_obj_name] = sub_obj_class.VERSION def get_dependency_tree(self): tree = {} for obj_name in self.obj_classes.keys(): self._get_dependencies(tree, self.obj_classes[obj_name][0]) return tree def test_relationships(self, expected_tree): actual_tree = self.get_dependency_tree() stored = set([(x, str(y)) for x, y in expected_tree.items()]) computed = set([(x, str(y)) for x, y in actual_tree.items()]) changed = stored.symmetric_difference(computed) expected = {} actual = {} for name, deps in changed: expected[name] = expected_tree.get(name) actual[name] = actual_tree.get(name) return expected, actual def _test_object_compatibility(self, obj_class, manifest=None, init_args=None, init_kwargs=None): init_args = init_args or [] init_kwargs = init_kwargs or {} version = vutils.convert_version_to_tuple(obj_class.VERSION) kwargs = {'version_manifest': manifest} if manifest else {} for n in range(version[1] + 1): test_version = '%d.%d' % (version[0], n) # Run the test with OS_DEBUG=True to see this. LOG.debug('testing obj: %s version: %s' % (obj_class.obj_name(), test_version)) kwargs['target_version'] = test_version obj_class(*init_args, **init_kwargs).obj_to_primitive(**kwargs) def test_compatibility_routines(self, use_manifest=False, init_args=None, init_kwargs=None): """Test obj_make_compatible() on all object classes. :param use_manifest: a boolean that determines if the version manifest should be passed to obj_make_compatible :param init_args: a dictionary of the format {obj_class: [arg1, arg2]} that will be used to pass arguments to init on the given obj_class. If no args are needed, the obj_class does not need to be added to the dict :param init_kwargs: a dictionary of the format {obj_class: {'kwarg1': val1}} that will be used to pass kwargs to init on the given obj_class. If no kwargs are needed, the obj_class does not need to be added to the dict """ # Iterate all object classes and verify that we can run # obj_make_compatible with every older version than current. # This doesn't actually test the data conversions, but it at least # makes sure the method doesn't blow up on something basic like # expecting the wrong version format. init_args = init_args or {} init_kwargs = init_kwargs or {} for obj_name in self.obj_classes: obj_classes = self.obj_classes[obj_name] if use_manifest: manifest = base.obj_tree_get_versions(obj_name) else: manifest = None for obj_class in obj_classes: args_for_init = init_args.get(obj_class, []) kwargs_for_init = init_kwargs.get(obj_class, {}) self._test_object_compatibility(obj_class, manifest=manifest, init_args=args_for_init, init_kwargs=kwargs_for_init) def _test_relationships_in_order(self, obj_class): for field, versions in obj_class.obj_relationships.items(): last_my_version = (0, 0) last_child_version = (0, 0) for my_version, child_version in versions: _my_version = vutils.convert_version_to_tuple(my_version) _ch_version = vutils.convert_version_to_tuple(child_version) if not (last_my_version < _my_version and last_child_version <= _ch_version): raise AssertionError(('Object %s relationship %s->%s for ' 'field %s is out of order') % ( obj_class.obj_name(), my_version, child_version, field)) last_my_version = _my_version last_child_version = _ch_version def test_relationships_in_order(self): # Iterate all object classes and verify that we can run # obj_make_compatible with every older version than current. # This doesn't actually test the data conversions, but it at least # makes sure the method doesn't blow up on something basic like # expecting the wrong version format. for obj_name in self.obj_classes: obj_classes = self.obj_classes[obj_name] for obj_class in obj_classes: self._test_relationships_in_order(obj_class) class VersionedObjectRegistryFixture(fixtures.Fixture): """Use a VersionedObjectRegistry as a temp registry pattern fixture. The pattern solution is to backup the object registry, register a class locally, and then restore the original registry. This could be used for test objects that do not need to be registered permanently but will have calls which lookup registration. """ def setUp(self): super(VersionedObjectRegistryFixture, self).setUp() self._base_test_obj_backup = copy.deepcopy( base.VersionedObjectRegistry._registry._obj_classes) self.addCleanup(self._restore_obj_registry) @staticmethod def register(cls_name): base.VersionedObjectRegistry.register(cls_name) def _restore_obj_registry(self): base.VersionedObjectRegistry._registry._obj_classes = \ self._base_test_obj_backup class StableObjectJsonFixture(fixtures.Fixture): """Fixture that makes sure we get stable JSON object representations. Since objects contain things like set(), which can't be converted to JSON, we have some situations where the representation isn't fully deterministic. This doesn't matter at all at runtime, but does to unit tests that try to assert things at a low level. This fixture mocks the obj_to_primitive() call and makes sure to sort the list of changed fields (which came from a set) before returning it to the caller. """ def __init__(self): self._original_otp = base.VersionedObject.obj_to_primitive def setUp(self): super(StableObjectJsonFixture, self).setUp() def _doit(obj, *args, **kwargs): result = self._original_otp(obj, *args, **kwargs) changes_key = obj._obj_primitive_key('changes') if changes_key in result: result[changes_key].sort() return result self.useFixture(fixtures.MonkeyPatch( 'oslo_versionedobjects.base.VersionedObject.obj_to_primitive', _doit)) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.846407 oslo.versionedobjects-3.4.0/oslo_versionedobjects/locale/0000775000175000017500000000000000000000000023727 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.846407 oslo.versionedobjects-3.4.0/oslo_versionedobjects/locale/en_GB/0000775000175000017500000000000000000000000024701 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.862407 oslo.versionedobjects-3.4.0/oslo_versionedobjects/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000026466 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/oslo_versionedobjects/locale/en_GB/LC_MESSAGES/oslo_versionedobjects.po0000664000175000017500000001313600000000000033436 0ustar00zuulzuul00000000000000# Translations template for oslo.versionedobjects. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the # oslo.versionedobjects project. # # Translators: # Andi Chandler , 2015 # Andi Chandler , 2016. #zanata # Andreas Jaeger , 2016. #zanata # Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2020. #zanata msgid "" msgstr "" "Project-Id-Version: oslo.versionedobjects VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2020-05-21 17:27+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2020-05-04 09:27+0000\n" "Last-Translator: Andi Chandler \n" "Language: en_GB\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: English (United Kingdom)\n" #, python-format msgid "" "%(child_objname)s is referenced by %(parent_objname)s but is not registered" msgstr "" "%(child_objname)s is referenced by %(parent_objname)s but is not registered" #, python-format msgid "%(fieldname)s missing field type" msgstr "%(fieldname)s missing field type" #, python-format msgid "%(object)s.%(name)s is not allowed to transition out of %(value)s state" msgstr "" "%(object)s.%(name)s is not allowed to transition out of %(value)s state" #, python-format msgid "" "%(object)s.%(name)s is not allowed to transition out of '%(current_value)s' " "state to '%(value)s' state, choose from %(options)r" msgstr "" "%(object)s.%(name)s is not allowed to transition out of '%(current_value)s' " "state to '%(value)s' state, choose from %(options)r" #, python-format msgid "%(objname)s object has no attribute '%(attrname)s'" msgstr "%(objname)s object has no attribute '%(attrname)s'" #, python-format msgid "%(typename)s in %(fieldname)s is not an instance of Enum" msgstr "%(typename)s in %(fieldname)s is not an instance of Enum" #, python-format msgid "%s has no pattern" msgstr "%s has no pattern" #, python-format msgid "A datetime.datetime is required in field %(attr)s, not a %(type)s" msgstr "A datetime.datetime is required in field %(attr)s, not a %(type)s" #, python-format msgid "A dict is required in field %(attr)s, not a %(type)s" msgstr "A dict is required in field %(attr)s, not a %(type)s" #, python-format msgid "A list is required in field %(attr)s, not a %(type)s" msgstr "A list is required in field %(attr)s, not a %(type)s" #, python-format msgid "A set is required in field %(attr)s, not a %(type)s" msgstr "A set is required in field %(attr)s, not a %(type)s" #, python-format msgid "A string is required in field %(attr)s, not a %(type)s" msgstr "A string is required in field %(attr)s, not a %(type)s" #, python-format msgid "" "An object of type %(type)s is required in field %(attr)s, not a %(valtype)s" msgstr "" "An object of type %(type)s is required in field %(attr)s, not a %(valtype)s" msgid "An unknown exception occurred." msgstr "An unknown exception occurred." #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "Cannot call %(method)s on orphaned %(objtype)s object" #, python-format msgid "Cannot load '%s' in the base class" msgstr "Cannot load '%s' in the base class" #, python-format msgid "Cannot modify readonly field %(field)s" msgstr "Cannot modify readonly field %(field)s" msgid "Cannot save anything in the base class" msgstr "Cannot save anything in the base class" #, python-format msgid "Element %(key)s:%(val)s must be of type %(expected)s not %(actual)s" msgstr "Element %(key)s:%(val)s must be of type %(expected)s not %(actual)s" msgid "Enum fields require a list of valid_values" msgstr "Enum fields require a list of valid_values" msgid "Enum valid values are not valid" msgstr "Enum valid values are not valid" #, python-format msgid "Field %(field)s of %(objname)s is not an instance of Field" msgstr "Field %(field)s of %(objname)s is not an instance of Field" #, python-format msgid "Field `%s' cannot be None" msgstr "Field `%s' cannot be None" #, python-format msgid "Field value %s is invalid" msgstr "Field value %s is invalid" #, python-format msgid "Invalid target version %(version)s" msgstr "Invalid target version %(version)s" #, python-format msgid "Key %(key)s must be of type %(expected)s not %(actual)s" msgstr "Key %(key)s must be of type %(expected)s not %(actual)s" #, python-format msgid "Malformed MAC %s" msgstr "Malformed MAC %s" #, python-format msgid "Malformed PCI address %s" msgstr "Malformed PCI address %s" #, python-format msgid "Network \"%(val)s\" is not valid in field %(attr)s" msgstr "Network \"%(val)s\" is not valid in field %(attr)s" #, python-format msgid "No subobject existed at version %(target_version)s" msgstr "No subobject existed at version %(target_version)s" #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "Object action %(action)s failed because: %(reason)s" #, python-format msgid "Unsupported object type %(objtype)s" msgstr "Unsupported object type %(objtype)s" #, python-format msgid "Value must be >= 0 for field %s" msgstr "Value must be >= 0 for field %s" #, python-format msgid "" "Version %(objver)s of %(objname)s is not supported, supported version is " "%(supported)s" msgstr "" "Version %(objver)s of %(objname)s is not supported, supported version is " "%(supported)s" #, python-format msgid "Version %(val)s is not a valid predicate in field %(attr)s" msgstr "Version %(val)s is not a valid predicate in field %(attr)s" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/oslo_versionedobjects/test.py0000664000175000017500000001631400000000000024026 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for our unit tests. Some black magic for inline callbacks. """ import eventlet # noqa eventlet.monkey_patch(os=False) # noqa import functools # noqa: E402 import inspect # noqa: E402 import os # noqa: E402 from unittest import mock # noqa: E402 import fixtures # noqa: E402 from oslo_concurrency import lockutils # noqa: E402 from oslo_config import cfg # noqa: E402 from oslo_config import fixture as config_fixture # noqa: E402 from oslo_log.fixture import logging_error # noqa: E402 import testtools # noqa: E402 from oslo_versionedobjects.tests import obj_fixtures # noqa: E402 CONF = cfg.CONF class TestingException(Exception): pass class skipIf(object): def __init__(self, condition, reason): self.condition = condition self.reason = reason def __call__(self, func_or_cls): condition = self.condition reason = self.reason if inspect.isfunction(func_or_cls): @functools.wraps(func_or_cls) def wrapped(*args, **kwargs): if condition: raise testtools.TestCase.skipException(reason) return func_or_cls(*args, **kwargs) return wrapped elif inspect.isclass(func_or_cls): orig_func = getattr(func_or_cls, 'setUp') @functools.wraps(orig_func) def new_func(self, *args, **kwargs): if condition: raise testtools.TestCase.skipException(reason) orig_func(self, *args, **kwargs) func_or_cls.setUp = new_func return func_or_cls else: raise TypeError('skipUnless can be used only with functions or ' 'classes') def _patch_mock_to_raise_for_invalid_assert_calls(): def raise_for_invalid_assert_calls(wrapped): def wrapper(_self, name): valid_asserts = [ 'assert_called_with', 'assert_called_once_with', 'assert_has_calls', 'assert_any_calls'] if name.startswith('assert') and name not in valid_asserts: raise AttributeError('%s is not a valid mock assert method' % name) return wrapped(_self, name) return wrapper mock.Mock.__getattr__ = raise_for_invalid_assert_calls( mock.Mock.__getattr__) # NOTE(gibi): needs to be called only once at import time # to patch the mock lib _patch_mock_to_raise_for_invalid_assert_calls() class TestCase(testtools.TestCase): """Test case base class for all unit tests.""" REQUIRES_LOCKING = False TIMEOUT_SCALING_FACTOR = 1 def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() self.useFixture(obj_fixtures.Timeout( os.environ.get('OS_TEST_TIMEOUT', 0), self.TIMEOUT_SCALING_FACTOR)) self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) self.useFixture(obj_fixtures.TranslationFixture()) self.useFixture(logging_error.get_logging_handle_error_fixture()) self.useFixture(obj_fixtures.OutputStreamCapture()) self.useFixture(obj_fixtures.StandardLogging()) # NOTE(sdague): because of the way we were using the lock # wrapper we eneded up with a lot of tests that started # relying on global external locking being set up for them. We # consider all of these to be *bugs*. Tests should not require # global external locking, or if they do, they should # explicitly set it up themselves. # # The following REQUIRES_LOCKING class parameter is provided # as a bridge to get us there. No new tests should be added # that require it, and existing classes and tests should be # fixed to not need it. if self.REQUIRES_LOCKING: lock_path = self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture( config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=lock_path, group='oslo_concurrency') # NOTE(blk-u): WarningsFixture must be after the Database fixture # because sqlalchemy-migrate messes with the warnings filters. self.useFixture(obj_fixtures.WarningsFixture()) self.addCleanup(self._clear_attrs) self.useFixture(fixtures.EnvironmentVariable('http_proxy')) def _clear_attrs(self): # Delete attributes that don't start with _ so they don't pin # memory around unnecessarily for the duration of the test # suite for key in [k for k in self.__dict__.keys() if k[0] != '_']: del self.__dict__[key] def assertPublicAPISignatures(self, baseinst, inst): def get_public_apis(inst): methods = {} for (name, value) in inspect.getmembers(inst, inspect.ismethod): if name.startswith("_"): continue methods[name] = value return methods baseclass = baseinst.__class__.__name__ basemethods = get_public_apis(baseinst) implmethods = get_public_apis(inst) extranames = [] for name in sorted(implmethods.keys()): if name not in basemethods: extranames.append(name) self.assertEqual([], extranames, "public APIs not listed in base class %s" % baseclass) for name in sorted(implmethods.keys()): baseargs = inspect.getfullargspec(basemethods[name]) implargs = inspect.getfullargspec(implmethods[name]) self.assertEqual(baseargs, implargs, "%s args don't match base class %s" % (name, baseclass)) class APICoverage(object): cover_api = None def test_api_methods(self): self.assertTrue(self.cover_api is not None) api_methods = [x for x in dir(self.cover_api) if not x.startswith('_')] test_methods = [x[5:] for x in dir(self) if x.startswith('test_')] self.assertThat( test_methods, testtools.matchers.ContainsAll(api_methods)) class BaseHookTestCase(TestCase): def assert_has_hook(self, expected_name, func): self.assertTrue(hasattr(func, '__hook_name__')) self.assertEqual(expected_name, func.__hook_name__) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.862407 oslo.versionedobjects-3.4.0/oslo_versionedobjects/tests/0000775000175000017500000000000000000000000023632 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/oslo_versionedobjects/tests/__init__.py0000664000175000017500000000000000000000000025731 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/oslo_versionedobjects/tests/obj_fixtures.py0000664000175000017500000001451100000000000026711 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fixtures for VersionedObject tests.""" import gettext import logging import os import warnings import fixtures from oslo_config import cfg _TRUE_VALUES = ('True', 'true', '1', 'yes') CONF = cfg.CONF DB_SCHEMA = "" class TranslationFixture(fixtures.Fixture): """Use gettext NullTranslation objects in tests.""" def setUp(self): super(TranslationFixture, self).setUp() nulltrans = gettext.NullTranslations() gettext_fixture = fixtures.MonkeyPatch('gettext.translation', lambda *x, **y: nulltrans) self.gettext_patcher = self.useFixture(gettext_fixture) class NullHandler(logging.Handler): """custom default NullHandler to attempt to format the record. Used in conjunction with log_fixture.get_logging_handle_error_fixture to detect formatting errors in debug level logs without saving the logs. """ def handle(self, record): self.format(record) def emit(self, record): pass def createLock(self): self.lock = None class StandardLogging(fixtures.Fixture): """Setup Logging redirection for tests. There are a number of things we want to handle with logging in tests: * Redirect the logging to somewhere that we can test or dump it later. * Ensure that as many DEBUG messages as possible are actually executed, to ensure they are actually syntactically valid (they often have not been). * Ensure that we create useful output for tests that doesn't overwhelm the testing system (which means we can't capture the 100 MB of debug logging on every run). To do this we create a logger fixture at the root level, which defaults to INFO and create a Null Logger at DEBUG which lets us execute log messages at DEBUG but not keep the output. To support local debugging OS_DEBUG=True can be set in the environment, which will print out the full debug logging. There are also a set of overrides for particularly verbose modules to be even less than INFO. """ def setUp(self): super(StandardLogging, self).setUp() # set root logger to debug root = logging.getLogger() root.setLevel(logging.DEBUG) # supports collecting debug level for local runs if os.environ.get('OS_DEBUG') in _TRUE_VALUES: level = logging.DEBUG else: level = logging.INFO # Collect logs fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s' self.logger = self.useFixture( fixtures.FakeLogger(format=fs, level=None)) # TODO(sdague): why can't we send level through the fake # logger? Tests prove that it breaks, but it's worth getting # to the bottom of. root.handlers[0].setLevel(level) if level > logging.DEBUG: # Just attempt to format debug level logs, but don't save them handler = NullHandler() self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False)) handler.setLevel(logging.DEBUG) class OutputStreamCapture(fixtures.Fixture): """Capture output streams during tests. This fixture captures errant printing to stderr / stdout during the tests and lets us see those streams at the end of the test runs instead. Useful to see what was happening during failed tests. """ def setUp(self): super(OutputStreamCapture, self).setUp() if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES: self.out = self.useFixture(fixtures.StringStream('stdout')) self.useFixture( fixtures.MonkeyPatch('sys.stdout', self.out.stream)) if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES: self.err = self.useFixture(fixtures.StringStream('stderr')) self.useFixture( fixtures.MonkeyPatch('sys.stderr', self.err.stream)) @property def stderr(self): return self.err._details["stderr"].as_text() @property def stdout(self): return self.out._details["stdout"].as_text() class Timeout(fixtures.Fixture): """Setup per test timeouts. In order to avoid test deadlocks we support setting up a test timeout parameter read from the environment. In almost all cases where the timeout is reached this means a deadlock. A class level TIMEOUT_SCALING_FACTOR also exists, which allows extremely long tests to specify they need more time. """ def __init__(self, timeout, scaling=1): super(Timeout, self).__init__() try: self.test_timeout = int(timeout) except ValueError: # If timeout value is invalid do not set a timeout. self.test_timeout = 0 if scaling >= 1: self.test_timeout *= scaling else: raise ValueError('scaling value must be >= 1') def setUp(self): super(Timeout, self).setUp() if self.test_timeout > 0: self.useFixture(fixtures.Timeout(self.test_timeout, gentle=True)) class WarningsFixture(fixtures.Fixture): """Filters out warnings during test runs.""" def setUp(self): super(WarningsFixture, self).setUp() # NOTE(sdague): Make deprecation warnings only happen once. Otherwise # this gets kind of crazy given the way that upstream python libs use # this. warnings.simplefilter("once", DeprecationWarning) warnings.filterwarnings('ignore', message='With-statements now directly support' ' multiple context managers') self.addCleanup(warnings.resetwarnings) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/oslo_versionedobjects/tests/test_exception.py0000664000175000017500000000540600000000000027246 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_versionedobjects import exception from oslo_versionedobjects import test notifier = mock.Mock() class TestWrapper(object): @exception.wrap_exception(notifier=notifier) def raise_exc(self, context, exc, admin_password): raise exc class ExceptionTestCase(test.TestCase): def test_wrap_exception_wrapped(self): test = TestWrapper() # Ensure that the original function is available in # the __wrapped__ attribute self.assertTrue(hasattr(test.raise_exc, '__wrapped__')) def test_wrap_exception(self): context = "context" exc = ValueError() test = TestWrapper() notifier.reset_mock() # wrap_exception() must reraise the exception self.assertRaises(ValueError, # nosec test.raise_exc, context, exc, admin_password="xxx") # wrap_exception() strips admin_password from args payload = {'args': {'self': test, 'context': context, 'exc': exc}, 'exception': exc} notifier.error.assert_called_once_with(context, 'raise_exc', payload) def test_vo_exception(self): exc = exception.VersionedObjectsException() self.assertEqual('An unknown exception occurred.', str(exc)) self.assertEqual({'code': 500}, exc.kwargs) def test_object_action_error(self): exc = exception.ObjectActionError(action='ACTION', reason='REASON', code=123) self.assertEqual('Object action ACTION failed because: REASON', str(exc)) self.assertEqual({'code': 123, 'action': 'ACTION', 'reason': 'REASON'}, exc.kwargs) def test_constructor_format_error(self): # Test error handling on formatting exception message in the # VersionedObjectsException constructor with mock.patch.object(exception, 'LOG') as log: exc = exception.ObjectActionError() log.error.assert_called_with('code: 500') # Formatting failed: the message is the original format string self.assertEqual(exception.ObjectActionError.msg_fmt, str(exc)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/oslo_versionedobjects/tests/test_fields.py0000664000175000017500000014163700000000000026525 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock import warnings import iso8601 import netaddr import testtools from oslo_versionedobjects import _utils from oslo_versionedobjects import base as obj_base from oslo_versionedobjects import exception from oslo_versionedobjects import fields from oslo_versionedobjects import test class FakeFieldType(fields.FieldType): def coerce(self, obj, attr, value): return '*%s*' % value def to_primitive(self, obj, attr, value): return '!%s!' % value def from_primitive(self, obj, attr, value): return value[1:-1] def get_schema(self): return {'type': ['foo']} class FakeEnum(fields.Enum): FROG = "frog" PLATYPUS = "platypus" ALLIGATOR = "alligator" ALL = (FROG, PLATYPUS, ALLIGATOR) def __init__(self, **kwargs): super(FakeEnum, self).__init__(valid_values=FakeEnum.ALL, **kwargs) class FakeEnumAlt(fields.Enum): FROG = "frog" PLATYPUS = "platypus" AARDVARK = "aardvark" ALL = set([FROG, PLATYPUS, AARDVARK]) def __init__(self, **kwargs): super(FakeEnumAlt, self).__init__(valid_values=FakeEnumAlt.ALL, **kwargs) class FakeEnumField(fields.BaseEnumField): AUTO_TYPE = FakeEnum() class FakeStateMachineField(fields.StateMachine): ACTIVE = 'ACTIVE' PENDING = 'PENDING' ERROR = 'ERROR' ALLOWED_TRANSITIONS = { ACTIVE: { PENDING, ERROR }, PENDING: { ACTIVE, ERROR }, ERROR: { PENDING, }, } _TYPES = (ACTIVE, PENDING, ERROR) def __init__(self, **kwargs): super(FakeStateMachineField, self).__init__(self._TYPES, **kwargs) class FakeEnumAltField(fields.BaseEnumField): AUTO_TYPE = FakeEnumAlt() class TestFieldType(test.TestCase): def test_get_schema(self): self.assertRaises(NotImplementedError, fields.FieldType().get_schema) class TestField(test.TestCase): def setUp(self): super(TestField, self).setUp() self.field = fields.Field(FakeFieldType()) self.coerce_good_values = [('foo', '*foo*')] self.coerce_bad_values = [] self.to_primitive_values = [('foo', '!foo!')] self.from_primitive_values = [('!foo!', 'foo')] def test_coerce_good_values(self): for in_val, out_val in self.coerce_good_values: self.assertEqual(out_val, self.field.coerce('obj', 'attr', in_val)) def test_coerce_bad_values(self): for in_val in self.coerce_bad_values: self.assertRaises((TypeError, ValueError), self.field.coerce, 'obj', 'attr', in_val) def test_to_primitive(self): for in_val, prim_val in self.to_primitive_values: self.assertEqual(prim_val, self.field.to_primitive('obj', 'attr', in_val)) def test_from_primitive(self): class ObjectLikeThing(object): _context = 'context' for prim_val, out_val in self.from_primitive_values: self.assertEqual(out_val, self.field.from_primitive( ObjectLikeThing, 'attr', prim_val)) def test_stringify(self): self.assertEqual('123', self.field.stringify(123)) class TestSchema(test.TestCase): def setUp(self): super(TestSchema, self).setUp() self.field = fields.Field(FakeFieldType(), nullable=True, default='', read_only=False) def test_get_schema(self): self.assertEqual({'type': ['foo', 'null'], 'default': '', 'readonly': False}, self.field.get_schema()) class TestString(TestField): def setUp(self): super(TestString, self).setUp() self.field = fields.StringField() self.coerce_good_values = [ ('foo', 'foo'), (1, '1'), (1.0, '1.0'), (True, 'True')] self.coerce_bad_values = [None] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'123'", self.field.stringify(123)) def test_fieldtype_get_schema(self): self.assertEqual({'type': ['string']}, self.field._type.get_schema()) def test_get_schema(self): self.assertEqual({'type': ['string'], 'readonly': False}, self.field.get_schema()) class TestSensitiveString(TestString): def setUp(self): super(TestSensitiveString, self).setUp() self.field = fields.SensitiveStringField() def test_stringify(self): payload = """{'admin_password':'mypassword'}""" expected = """'{'admin_password':'***'}'""" self.assertEqual(expected, self.field.stringify(payload)) class TestVersionPredicate(TestString): def setUp(self): super(TestVersionPredicate, self).setUp() self.field = fields.VersionPredicateField() self.coerce_good_values = [('>=1.0', '>=1.0'), ('==1.1', '==1.1'), ('<1.1.0', '<1.1.0')] self.coerce_bad_values = ['1', 'foo', '>1', 1.0, '1.0', '=1.0'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] class TestMACAddress(TestField): def setUp(self): super(TestMACAddress, self).setUp() self.field = fields.MACAddressField() self.coerce_good_values = [ ('c6:df:11:a5:c8:5d', 'c6:df:11:a5:c8:5d'), ('C6:DF:11:A5:C8:5D', 'c6:df:11:a5:c8:5d'), ('c6:df:11:a5:c8:5d', 'c6:df:11:a5:c8:5d'), ('C6:DF:11:A5:C8:5D', 'c6:df:11:a5:c8:5d'), ] self.coerce_bad_values = [ 'C6:DF:11:A5:C8', # Too short 'C6:DF:11:A5:C8:5D:D7', # Too long 'C6:DF:11:A5:C8:KD', # Bad octal 1123123, # Number {}, # dict ] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_get_schema(self): schema = self.field.get_schema() self.assertEqual(['string'], schema['type']) self.assertEqual(False, schema['readonly']) pattern = schema['pattern'] for _, valid_val in self.coerce_good_values: self.assertRegex(valid_val, pattern) invalid_vals = [x for x in self.coerce_bad_values if isinstance(x, str)] for invalid_val in invalid_vals: self.assertNotRegex(invalid_val, pattern) class TestPCIAddress(TestField): def setUp(self): super(TestPCIAddress, self).setUp() self.field = fields.PCIAddressField() self.coerce_good_values = [ ('0000:02:00.0', '0000:02:00.0'), ('FFFF:FF:1F.7', 'ffff:ff:1f.7'), ('fFfF:fF:1F.7', 'ffff:ff:1f.7'), ] self.coerce_bad_values = [ '000:02:00.0', # Too short '00000:02:00.0', # Too long 'FFFF:FF:2F.7', # Bad slot 'FFFF:GF:1F.7', # Bad octal 1123123, # Number {}, # dict ] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_get_schema(self): schema = self.field.get_schema() self.assertEqual(['string'], schema['type']) self.assertEqual(False, schema['readonly']) pattern = schema['pattern'] for _, valid_val in self.coerce_good_values: self.assertRegex(valid_val, pattern) invalid_vals = [x for x in self.coerce_bad_values if isinstance(x, str)] for invalid_val in invalid_vals: self.assertNotRegex(invalid_val, pattern) class TestUUID(TestField): def setUp(self): super(TestUUID, self).setUp() self.field = fields.UUIDField() self.coerce_good_values = [ ('da66a411-af0e-4829-9b67-475017ddd152', 'da66a411-af0e-4829-9b67-475017ddd152'), ('da66a411af0e48299b67475017ddd152', 'da66a411af0e48299b67475017ddd152'), ('DA66A411-AF0E-4829-9B67-475017DDD152', 'DA66A411-AF0E-4829-9B67-475017DDD152'), ('DA66A411AF0E48299b67475017DDD152', 'DA66A411AF0E48299b67475017DDD152'), # These values are included to ensure there is not change in # behaviour - only when we can remove the old UUID behaviour can # we add these to the "self.coerce_bad_values" list ('da66a411-af0e-4829-9b67', 'da66a411-af0e-4829-9b67'), ('da66a411-af0e-4829-9b67-475017ddd152548999', 'da66a411-af0e-4829-9b67-475017ddd152548999'), ('da66a411-af0e-4829-9b67-475017ddz152', 'da66a411-af0e-4829-9b67-475017ddz152'), ('fake_uuid', 'fake_uuid'), ('fake_uāid', 'fake_uāid'), (b'fake_u\xe1id'.decode('latin_1'), b'fake_u\xe1id'.decode('latin_1')), ('1', '1'), (1, '1') ] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] @mock.patch('warnings.warn') def test_coerce_good_values(self, mock_warn): super().test_coerce_good_values() mock_warn.assert_has_calls( [mock.call(mock.ANY, FutureWarning)] * 8, ) def test_validation_warning_can_be_escalated_to_exception(self): warnings.filterwarnings(action='error') self.assertRaises(FutureWarning, self.field.coerce, 'obj', 'attr', 'not a uuid') def test_get_schema(self): field = fields.UUIDField() schema = field.get_schema() self.assertEqual(['string'], schema['type']) self.assertEqual(False, schema['readonly']) pattern = schema['pattern'] for _, valid_val in self.coerce_good_values[:4]: self.assertRegex(valid_val, pattern) invalid_vals = [x for x in self.coerce_bad_values if isinstance(x, str)] for invalid_val in invalid_vals: self.assertNotRegex(invalid_val, pattern) class TestBaseEnum(TestField): def setUp(self): super(TestBaseEnum, self).setUp() self.field = FakeEnumField() self.coerce_good_values = [('frog', 'frog'), ('platypus', 'platypus'), ('alligator', 'alligator')] self.coerce_bad_values = ['aardvark', 'wookie'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'platypus'", self.field.stringify('platypus')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'aardvark') def test_fingerprint(self): # Notes(yjiang5): make sure changing valid_value will be detected # in test_objects.test_versions field1 = FakeEnumField() field2 = FakeEnumAltField() self.assertNotEqual(str(field1), str(field2)) def test_valid_values(self): self.assertEqual(self.field.valid_values, FakeEnum.ALL) def test_valid_values_keeps_type(self): self.assertIsInstance(self.field.valid_values, tuple) self.assertIsInstance(FakeEnumAltField().valid_values, set) class TestEnum(TestField): def setUp(self): super(TestEnum, self).setUp() self.field = fields.EnumField( valid_values=['foo', 'bar', 1, True]) self.coerce_good_values = [('foo', 'foo'), (1, '1'), (True, 'True')] self.coerce_bad_values = ['boo', 2, False] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'foo'", self.field.stringify('foo')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, '123') def test_fieldtype_get_schema(self): self.assertEqual({'type': ['string'], 'enum': ["foo", "bar", 1, True]}, self.field._type.get_schema()) def test_get_schema(self): self.assertEqual({'type': ['string'], 'enum': ["foo", "bar", 1, True], 'readonly': False}, self.field.get_schema()) def test_fingerprint(self): # Notes(yjiang5): make sure changing valid_value will be detected # in test_objects.test_versions field1 = fields.EnumField(valid_values=['foo', 'bar']) field2 = fields.EnumField(valid_values=['foo', 'bar1']) self.assertNotEqual(str(field1), str(field2)) def test_missing_valid_values(self): self.assertRaises(exception.EnumRequiresValidValuesError, fields.EnumField, None) def test_empty_valid_values(self): self.assertRaises(exception.EnumRequiresValidValuesError, fields.EnumField, []) def test_non_iterable_valid_values(self): self.assertRaises(exception.EnumValidValuesInvalidError, fields.EnumField, True) def test_enum_subclass_check(self): def _test(): class BrokenEnumField(fields.BaseEnumField): AUTO_TYPE = int BrokenEnumField() self.assertRaises(exception.EnumFieldInvalid, _test) class TestStateMachine(TestField): def test_good_transitions(self): @obj_base.VersionedObjectRegistry.register class AnObject(obj_base.VersionedObject): fields = { 'status': FakeStateMachineField(), } obj = AnObject() obj.status = FakeStateMachineField.ACTIVE obj.status = FakeStateMachineField.PENDING obj.status = FakeStateMachineField.ERROR obj.status = FakeStateMachineField.PENDING obj.status = FakeStateMachineField.ACTIVE def test_bad_transitions(self): @obj_base.VersionedObjectRegistry.register class AnObject(obj_base.VersionedObject): fields = { 'status': FakeStateMachineField(), } obj = AnObject(status='ERROR') try: obj.status = FakeStateMachineField.ACTIVE except ValueError as e: ex = e else: ex = None self.assertIsNotNone(ex, 'Invalid transition failed to raise error') self.assertEqual('AnObject.status is not allowed to transition out ' 'of \'ERROR\' state to \'ACTIVE\' state, choose from ' '[\'PENDING\']', str(ex)) def test_bad_initial_value(self): @obj_base.VersionedObjectRegistry.register class AnObject(obj_base.VersionedObject): fields = { 'status': FakeStateMachineField(), } obj = AnObject() with testtools.ExpectedException(ValueError): obj.status = "FOO" def test_bad_updated_value(self): @obj_base.VersionedObjectRegistry.register class AnObject(obj_base.VersionedObject): fields = { 'status': FakeStateMachineField(), } obj = AnObject() with testtools.ExpectedException(ValueError): obj.status = FakeStateMachineField.ACTIVE obj.status = "FOO" class TestInteger(TestField): def setUp(self): super(TestField, self).setUp() self.field = fields.IntegerField() self.coerce_good_values = [(1, 1), ('1', 1)] self.coerce_bad_values = ['foo', None] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_fieldtype_get_schema(self): self.assertEqual({'type': ['integer']}, self.field._type.get_schema()) def test_get_schema(self): self.assertEqual({'type': ['integer'], 'readonly': False}, self.field.get_schema()) class TestNonNegativeInteger(TestField): def setUp(self): super(TestNonNegativeInteger, self).setUp() self.field = fields.NonNegativeIntegerField() self.coerce_good_values = [(1, 1), ('1', 1)] self.coerce_bad_values = ['-2', '4.2', 'foo', None] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_get_schema(self): self.assertEqual({'type': ['integer'], 'readonly': False, 'minimum': 0}, self.field.get_schema()) class TestFloat(TestField): def setUp(self): super(TestFloat, self).setUp() self.field = fields.FloatField() self.coerce_good_values = [(1.1, 1.1), ('1.1', 1.1)] self.coerce_bad_values = ['foo', None] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_fieldtype_get_schema(self): self.assertEqual({'type': ['number']}, self.field._type.get_schema()) def test_get_schema(self): self.assertEqual({'type': ['number'], 'readonly': False}, self.field.get_schema()) class TestNonNegativeFloat(TestField): def setUp(self): super(TestNonNegativeFloat, self).setUp() self.field = fields.NonNegativeFloatField() self.coerce_good_values = [(1.1, 1.1), ('1.1', 1.1)] self.coerce_bad_values = ['-4.2', 'foo', None] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_get_schema(self): self.assertEqual({'type': ['number'], 'readonly': False, 'minimum': 0}, self.field.get_schema()) class TestBoolean(TestField): def setUp(self): super(TestField, self).setUp() self.field = fields.BooleanField() self.coerce_good_values = [(True, True), (False, False), (1, True), ('foo', True), (0, False), ('', False)] self.coerce_bad_values = [] self.to_primitive_values = self.coerce_good_values[0:2] self.from_primitive_values = self.coerce_good_values[0:2] def test_fieldtype_get_schema(self): self.assertEqual({'type': ['boolean']}, self.field._type.get_schema()) def test_get_schema(self): self.assertEqual({'type': ['boolean'], 'readonly': False}, self.field.get_schema()) class TestFlexibleBoolean(TestField): def setUp(self): super(TestFlexibleBoolean, self).setUp() self.field = fields.FlexibleBooleanField() self.coerce_good_values = [(True, True), (False, False), ("true", True), ("false", False), ("t", True), ("f", False), ("yes", True), ("no", False), ("y", True), ("n", False), ("on", True), ("off", False), (1, True), (0, False), ('frog', False), ('', False)] self.coerce_bad_values = [] self.to_primitive_values = self.coerce_good_values[0:2] self.from_primitive_values = self.coerce_good_values[0:2] class TestDateTime(TestField): def setUp(self): super(TestDateTime, self).setUp() self.dt = datetime.datetime(1955, 11, 5, tzinfo=iso8601.iso8601.UTC) self.field = fields.DateTimeField() self.coerce_good_values = [(self.dt, self.dt), (_utils.isotime(self.dt), self.dt)] self.coerce_bad_values = [1, 'foo'] self.to_primitive_values = [(self.dt, _utils.isotime(self.dt))] self.from_primitive_values = [(_utils.isotime(self.dt), self.dt)] def test_stringify(self): self.assertEqual( '1955-11-05T18:00:00Z', self.field.stringify( datetime.datetime(1955, 11, 5, 18, 0, 0, tzinfo=iso8601.iso8601.UTC))) def test_get_schema(self): self.assertEqual({'type': ['string'], 'format': 'date-time', 'readonly': False}, self.field.get_schema()) class TestDateTimeNoTzinfo(TestField): def setUp(self): super(TestDateTimeNoTzinfo, self).setUp() self.dt = datetime.datetime(1955, 11, 5) self.field = fields.DateTimeField(tzinfo_aware=False) self.coerce_good_values = [(self.dt, self.dt), (_utils.isotime(self.dt), self.dt)] self.coerce_bad_values = [1, 'foo'] self.to_primitive_values = [(self.dt, _utils.isotime(self.dt))] self.from_primitive_values = [ ( _utils.isotime(self.dt), self.dt, ) ] def test_stringify(self): self.assertEqual( '1955-11-05T18:00:00Z', self.field.stringify( datetime.datetime(1955, 11, 5, 18, 0, 0))) class TestDict(TestField): def setUp(self): super(TestDict, self).setUp() self.field = fields.Field(fields.Dict(FakeFieldType())) self.coerce_good_values = [({'foo': 'bar'}, {'foo': '*bar*'}), ({'foo': 1}, {'foo': '*1*'})] self.coerce_bad_values = [{1: 'bar'}, 'foo'] self.to_primitive_values = [({'foo': 'bar'}, {'foo': '!bar!'})] self.from_primitive_values = [({'foo': '!bar!'}, {'foo': 'bar'})] def test_stringify(self): self.assertEqual("{key=val}", self.field.stringify({'key': 'val'})) def test_get_schema(self): self.assertEqual({'type': ['object'], 'additionalProperties': {'readonly': False, 'type': ['foo']}, 'readonly': False}, self.field.get_schema()) class TestDictOfStrings(TestField): def setUp(self): super(TestDictOfStrings, self).setUp() self.field = fields.DictOfStringsField() self.coerce_good_values = [({'foo': 'bar'}, {'foo': 'bar'}), ({'foo': 1}, {'foo': '1'})] self.coerce_bad_values = [{1: 'bar'}, {'foo': None}, 'foo'] self.to_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})] self.from_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})] def test_stringify(self): self.assertEqual("{key='val'}", self.field.stringify({'key': 'val'})) class TestDictOfIntegers(TestField): def setUp(self): super(TestDictOfIntegers, self).setUp() self.field = fields.DictOfIntegersField() self.coerce_good_values = [({'foo': '42'}, {'foo': 42}), ({'foo': 4.2}, {'foo': 4})] self.coerce_bad_values = [{1: 'bar'}, {'foo': 'boo'}, 'foo', {'foo': None}] self.to_primitive_values = [({'foo': 42}, {'foo': 42})] self.from_primitive_values = [({'foo': 42}, {'foo': 42})] def test_stringify(self): self.assertEqual("{key=42}", self.field.stringify({'key': 42})) class TestDictOfStringsNone(TestField): def setUp(self): super(TestDictOfStringsNone, self).setUp() self.field = fields.DictOfNullableStringsField() self.coerce_good_values = [({'foo': 'bar'}, {'foo': 'bar'}), ({'foo': 1}, {'foo': '1'}), ({'foo': None}, {'foo': None})] self.coerce_bad_values = [{1: 'bar'}, 'foo'] self.to_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})] self.from_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})] def test_stringify(self): self.assertEqual("{k2=None,key='val'}", self.field.stringify({'k2': None, 'key': 'val'})) class TestListOfDictOfNullableStringsField(TestField): def setUp(self): super(TestListOfDictOfNullableStringsField, self).setUp() self.field = fields.ListOfDictOfNullableStringsField() self.coerce_good_values = [([{'f': 'b', 'f1': 'b1'}, {'f2': 'b2'}], [{'f': 'b', 'f1': 'b1'}, {'f2': 'b2'}]), ([{'f': 1}, {'f1': 'b1'}], [{'f': '1'}, {'f1': 'b1'}]), ([{'foo': None}], [{'foo': None}])] self.coerce_bad_values = [[{1: 'a'}], ['ham', 1], ['eggs']] self.to_primitive_values = [([{'f': 'b'}, {'f1': 'b1'}, {'f2': None}], [{'f': 'b'}, {'f1': 'b1'}, {'f2': None}])] self.from_primitive_values = [([{'f': 'b'}, {'f1': 'b1'}, {'f2': None}], [{'f': 'b'}, {'f1': 'b1'}, {'f2': None}])] def test_stringify(self): self.assertEqual("[{f=None,f1='b1'},{f2='b2'}]", self.field.stringify( [{'f': None, 'f1': 'b1'}, {'f2': 'b2'}])) class TestList(TestField): def setUp(self): super(TestList, self).setUp() self.field = fields.Field(fields.List(FakeFieldType())) self.coerce_good_values = [(['foo', 'bar'], ['*foo*', '*bar*'])] self.coerce_bad_values = ['foo'] self.to_primitive_values = [(['foo'], ['!foo!'])] self.from_primitive_values = [(['!foo!'], ['foo'])] def test_stringify(self): self.assertEqual('[123]', self.field.stringify([123])) def test_fieldtype_get_schema(self): self.assertEqual({'type': ['array'], 'items': {'type': ['foo'], 'readonly': False}}, self.field._type.get_schema()) def test_get_schema(self): self.assertEqual({'type': ['array'], 'items': {'type': ['foo'], 'readonly': False}, 'readonly': False}, self.field.get_schema()) class TestListOfStrings(TestField): def setUp(self): super(TestListOfStrings, self).setUp() self.field = fields.ListOfStringsField() self.coerce_good_values = [(['foo', 'bar'], ['foo', 'bar'])] self.coerce_bad_values = ['foo'] self.to_primitive_values = [(['foo'], ['foo'])] self.from_primitive_values = [(['foo'], ['foo'])] def test_stringify(self): self.assertEqual("['abc']", self.field.stringify(['abc'])) class TestDictOfListOfStrings(TestField): def setUp(self): super(TestDictOfListOfStrings, self).setUp() self.field = fields.DictOfListOfStringsField() self.coerce_good_values = [({'foo': ['1', '2']}, {'foo': ['1', '2']}), ({'foo': [1]}, {'foo': ['1']})] self.coerce_bad_values = [{'foo': [None, None]}, 'foo'] self.to_primitive_values = [({'foo': ['1', '2']}, {'foo': ['1', '2']})] self.from_primitive_values = [({'foo': ['1', '2']}, {'foo': ['1', '2']})] def test_stringify(self): self.assertEqual("{foo=['1','2']}", self.field.stringify({'foo': ['1', '2']})) class TestListOfEnum(TestField): def setUp(self): super(TestListOfEnum, self).setUp() self.field = fields.ListOfEnumField(valid_values=['foo', 'bar']) self.coerce_good_values = [(['foo', 'bar'], ['foo', 'bar'])] self.coerce_bad_values = ['foo', ['foo', 'bar1']] self.to_primitive_values = [(['foo'], ['foo'])] self.from_primitive_values = [(['foo'], ['foo'])] def test_stringify(self): self.assertEqual("['foo']", self.field.stringify(['foo'])) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, '[abc]') def test_fingerprint(self): # Notes(yjiang5): make sure changing valid_value will be detected # in test_objects.test_versions field1 = fields.ListOfEnumField(valid_values=['foo', 'bar']) field2 = fields.ListOfEnumField(valid_values=['foo', 'bar1']) self.assertNotEqual(str(field1), str(field2)) class TestSet(TestField): def setUp(self): super(TestSet, self).setUp() self.field = fields.Field(fields.Set(FakeFieldType())) self.coerce_good_values = [(set(['foo', 'bar']), set(['*foo*', '*bar*']))] self.coerce_bad_values = [['foo'], {'foo': 'bar'}] self.to_primitive_values = [(set(['foo']), tuple(['!foo!']))] self.from_primitive_values = [(tuple(['!foo!']), set(['foo']))] def test_stringify(self): self.assertEqual('set([123])', self.field.stringify(set([123]))) def test_get_schema(self): self.assertEqual({'type': ['array'], 'uniqueItems': True, 'items': {'type': ['foo'], 'readonly': False}, 'readonly': False}, self.field.get_schema()) class TestSetOfIntegers(TestField): def setUp(self): super(TestSetOfIntegers, self).setUp() self.field = fields.SetOfIntegersField() self.coerce_good_values = [(set(['1', 2]), set([1, 2]))] self.coerce_bad_values = [set(['foo'])] self.to_primitive_values = [(set([1]), tuple([1]))] self.from_primitive_values = [(tuple([1]), set([1]))] def test_stringify(self): self.assertEqual('set([1,2])', self.field.stringify(set([1, 2]))) def test_repr(self): self.assertEqual("Set(default=,nullable=False)", repr(self.field)) self.assertEqual("Set(default=set([]),nullable=False)", repr(fields.SetOfIntegersField(default=set()))) self.assertEqual("Set(default=set([1,a]),nullable=False)", repr(fields.SetOfIntegersField(default={1, 'a'}))) class TestListOfSetsOfIntegers(TestField): def setUp(self): super(TestListOfSetsOfIntegers, self).setUp() self.field = fields.ListOfSetsOfIntegersField() self.coerce_good_values = [([set(['1', 2]), set([3, '4'])], [set([1, 2]), set([3, 4])])] self.coerce_bad_values = [[set(['foo'])]] self.to_primitive_values = [([set([1])], [tuple([1])])] self.from_primitive_values = [([tuple([1])], [set([1])])] def test_stringify(self): self.assertEqual('[set([1,2])]', self.field.stringify([set([1, 2])])) class TestListOfIntegers(TestField): def setUp(self): super(TestListOfIntegers, self).setUp() self.field = fields.ListOfIntegersField() self.coerce_good_values = [(['1', 2], [1, 2]), ([1, 2], [1, 2])] self.coerce_bad_values = [['foo']] self.to_primitive_values = [([1], [1])] self.from_primitive_values = [([1], [1])] def test_stringify(self): self.assertEqual('[[1, 2]]', self.field.stringify([[1, 2]])) class TestListOfUUIDField(TestField): def setUp(self): super(TestListOfUUIDField, self).setUp() self.field = fields.ListOfUUIDField() self.uuid1 = '6b2097ea-d0e3-44dd-b131-95472b3ea8fd' self.uuid2 = '478c193d-2533-4e71-ab2b-c7683f67d7f9' self.coerce_good_values = [([self.uuid1, self.uuid2], [self.uuid1, self.uuid2])] # coerce_bad_values is intentionally ignored since the UUID field # allows non-UUID values for now. See TestUUIDField for examples. self.to_primitive_values = [([self.uuid1], [self.uuid1])] self.from_primitive_values = [([self.uuid1], [self.uuid1])] def test_stringify(self): self.assertEqual('[%s,%s]' % (self.uuid1, self.uuid2), self.field.stringify([self.uuid1, self.uuid2])) class TestLocalMethods(test.TestCase): @mock.patch.object(obj_base.LOG, 'exception') def test__make_class_properties_setter_value_error(self, mock_log): @obj_base.VersionedObjectRegistry.register class AnObject(obj_base.VersionedObject): fields = { 'intfield': fields.IntegerField(), } self.assertRaises(ValueError, AnObject, intfield='badvalue') self.assertFalse(mock_log.called) @mock.patch.object(obj_base.LOG, 'exception') def test__make_class_properties_setter_setattr_fails(self, mock_log): @obj_base.VersionedObjectRegistry.register class AnObject(obj_base.VersionedObject): fields = { 'intfield': fields.IntegerField(), } # We want the setattr() call in _make_class_properties.setter() to # raise an exception with mock.patch.object(obj_base, '_get_attrname') as mock_attr: mock_attr.return_value = '__class__' self.assertRaises(TypeError, AnObject, intfield=2) mock_attr.assert_called_once_with('intfield') mock_log.assert_called_once_with(mock.ANY, {'attr': 'AnObject.intfield'}) class TestObject(TestField): def setUp(self): super(TestObject, self).setUp() @obj_base.VersionedObjectRegistry.register class TestableObject(obj_base.VersionedObject): fields = { 'uuid': fields.StringField(), } def __eq__(self, value): # NOTE(danms): Be rather lax about this equality thing to # satisfy the assertEqual() in test_from_primitive(). We # just want to make sure the right type of object is re-created return value.__class__.__name__ == TestableObject.__name__ class OtherTestableObject(obj_base.VersionedObject): pass test_inst = TestableObject() self._test_cls = TestableObject self.field = fields.Field(fields.Object('TestableObject')) self.coerce_good_values = [(test_inst, test_inst)] self.coerce_bad_values = [OtherTestableObject(), 1, 'foo'] self.to_primitive_values = [(test_inst, test_inst.obj_to_primitive())] self.from_primitive_values = [(test_inst.obj_to_primitive(), test_inst), (test_inst, test_inst)] def test_stringify(self): obj = self._test_cls(uuid='fake-uuid') self.assertEqual('TestableObject(fake-uuid)', self.field.stringify(obj)) def test_from_primitive(self): @obj_base.VersionedObjectRegistry.register class TestFakeObject(obj_base.VersionedObject): OBJ_PROJECT_NAMESPACE = 'fake-project' @obj_base.VersionedObjectRegistry.register class TestBar(TestFakeObject, obj_base.ComparableVersionedObject): fields = { 'name': fields.StringField(), } @obj_base.VersionedObjectRegistry.register class TestFoo(TestFakeObject, obj_base.ComparableVersionedObject): fields = { 'name': fields.StringField(), 'bar': fields.ObjectField('TestBar', nullable=True) } bar = TestBar(name='bar') foo = TestFoo(name='foo', bar=bar) from_primitive_values = [(foo.obj_to_primitive(), foo), (foo, foo)] for prim_val, out_val in from_primitive_values: self.assertEqual(out_val, self.field.from_primitive( foo, 'attr', prim_val)) def test_inheritance(self): # We need a whole lot of classes in a hierarchy to # test subclass recognition for the Object field class TestAnimal(obj_base.VersionedObject): pass class TestMammal(TestAnimal): pass class TestReptile(TestAnimal): pass # We'll use this to create a diamond in the # class hierarchy class TestPet(TestAnimal): pass # Non-versioned object mixin class TestScary(object): pass class TestCrocodile(TestReptile, TestPet, TestScary): pass class TestPig(TestMammal): pass class TestDog(TestMammal, TestPet): pass # Some fictional animals wolfy = TestDog() # Terminator-2 ticktock = TestCrocodile() # Peter Pan babe = TestPig() # Babe # The various classes animals = fields.Object('TestAnimal', subclasses=True) mammals = fields.Object('TestMammal', subclasses=True) reptiles = fields.Object('TestReptile', subclasses=True) pets = fields.Object('TestPet', subclasses=True) pigs = fields.Object('TestPig', subclasses=True) dogs = fields.Object('TestDog', subclasses=True) crocs = fields.Object('TestCrocodile', subclasses=True) self.assertEqual(["TestDog", "TestMammal", "TestPet", "TestAnimal", "VersionedObject"], fields.Object._get_all_obj_names(wolfy)) self.assertEqual(["TestCrocodile", "TestReptile", "TestPet", "TestAnimal", "VersionedObject"], fields.Object._get_all_obj_names(ticktock)) self.assertEqual(["TestPig", "TestMammal", "TestAnimal", "VersionedObject"], fields.Object._get_all_obj_names(babe)) # When stringifying we should see the subclass object name # not the base class object name self.assertEqual("TestDog", animals.stringify(wolfy)) self.assertEqual("TestCrocodile", animals.stringify(ticktock)) self.assertEqual("TestPig", animals.stringify(babe)) # Everything is an animal self.assertEqual(wolfy, animals.coerce(None, "animal", wolfy)) self.assertEqual(ticktock, animals.coerce(None, "animal", ticktock)) self.assertEqual(babe, animals.coerce(None, "animal", babe)) # crocodiles are not mammals self.assertEqual(wolfy, mammals.coerce(None, "animal", wolfy)) self.assertRaises(ValueError, mammals.coerce, None, "animal", ticktock) self.assertEqual(babe, mammals.coerce(None, "animal", babe)) # dogs and pigs are not reptiles self.assertRaises(ValueError, reptiles.coerce, None, "animal", wolfy) self.assertEqual(ticktock, reptiles.coerce(None, "animal", ticktock)) self.assertRaises(ValueError, reptiles.coerce, None, "animal", babe) # pigs are not pets, but crocodiles (!) & dogs are self.assertEqual(wolfy, pets.coerce(None, "animal", wolfy)) self.assertEqual(ticktock, pets.coerce(None, "animal", ticktock)) self.assertRaises(ValueError, pets.coerce, None, "animal", babe) # Only dogs are dogs self.assertEqual(wolfy, dogs.coerce(None, "animal", wolfy)) self.assertRaises(ValueError, dogs.coerce, None, "animal", ticktock) self.assertRaises(ValueError, dogs.coerce, None, "animal", babe) # Only crocs are crocs self.assertRaises(ValueError, crocs.coerce, None, "animal", wolfy) self.assertEqual(ticktock, crocs.coerce(None, "animal", ticktock)) self.assertRaises(ValueError, crocs.coerce, None, "animal", babe) # Only pigs are pigs self.assertRaises(ValueError, pigs.coerce, None, "animal", ticktock) self.assertRaises(ValueError, pigs.coerce, None, "animal", wolfy) self.assertEqual(babe, pigs.coerce(None, "animal", babe)) def test_coerce_bad_value_primitive_type(self): # Tests that the ValueError has the primitive type in it's message. ex = self.assertRaises(ValueError, self.field.coerce, 'obj', 'attr', [{}]) self.assertEqual('An object of type TestableObject is required ' 'in field attr, not a list', str(ex)) def test_get_schema(self): self.assertEqual( { 'properties': { 'versioned_object.changes': {'items': {'type': 'string'}, 'type': 'array'}, 'versioned_object.data': { 'description': 'fields of TestableObject', 'properties': {'uuid': {'readonly': False, 'type': ['string']}}, 'required': ['uuid'], 'type': 'object'}, 'versioned_object.name': {'type': 'string'}, 'versioned_object.namespace': {'type': 'string'}, 'versioned_object.version': {'type': 'string'} }, 'readonly': False, 'required': ['versioned_object.namespace', 'versioned_object.name', 'versioned_object.version', 'versioned_object.data'], 'type': ['object'] }, self.field.get_schema()) class TestIPAddress(TestField): def setUp(self): super(TestIPAddress, self).setUp() self.field = fields.IPAddressField() self.coerce_good_values = [('1.2.3.4', netaddr.IPAddress('1.2.3.4')), ('::1', netaddr.IPAddress('::1')), (netaddr.IPAddress('::1'), netaddr.IPAddress('::1'))] self.coerce_bad_values = ['1-2', 'foo'] self.to_primitive_values = [(netaddr.IPAddress('1.2.3.4'), '1.2.3.4'), (netaddr.IPAddress('::1'), '::1')] self.from_primitive_values = [('1.2.3.4', netaddr.IPAddress('1.2.3.4')), ('::1', netaddr.IPAddress('::1'))] class TestIPAddressV4(TestField): def setUp(self): super(TestIPAddressV4, self).setUp() self.field = fields.IPV4AddressField() self.coerce_good_values = [('1.2.3.4', netaddr.IPAddress('1.2.3.4')), (netaddr.IPAddress('1.2.3.4'), netaddr.IPAddress('1.2.3.4'))] self.coerce_bad_values = ['1-2', 'foo', '::1'] self.to_primitive_values = [(netaddr.IPAddress('1.2.3.4'), '1.2.3.4')] self.from_primitive_values = [('1.2.3.4', netaddr.IPAddress('1.2.3.4'))] def test_get_schema(self): self.assertEqual({'type': ['string'], 'readonly': False, 'format': 'ipv4'}, self.field.get_schema()) class TestIPAddressV6(TestField): def setUp(self): super(TestIPAddressV6, self).setUp() self.field = fields.IPV6AddressField() self.coerce_good_values = [('::1', netaddr.IPAddress('::1')), (netaddr.IPAddress('::1'), netaddr.IPAddress('::1'))] self.coerce_bad_values = ['1.2', 'foo', '1.2.3.4'] self.to_primitive_values = [(netaddr.IPAddress('::1'), '::1')] self.from_primitive_values = [('::1', netaddr.IPAddress('::1'))] def test_get_schema(self): self.assertEqual({'type': ['string'], 'readonly': False, 'format': 'ipv6'}, self.field.get_schema()) class TestIPV4AndV6Address(TestField): def setUp(self): super(TestIPV4AndV6Address, self).setUp() self.field = fields.IPV4AndV6Address() self.coerce_good_values = [('::1', netaddr.IPAddress('::1')), (netaddr.IPAddress('::1'), netaddr.IPAddress('::1')), ('1.2.3.4', netaddr.IPAddress('1.2.3.4')), (netaddr.IPAddress('1.2.3.4'), netaddr.IPAddress('1.2.3.4'))] self.coerce_bad_values = ['1-2', 'foo'] self.to_primitive_values = [(netaddr.IPAddress('::1'), '::1'), (netaddr.IPAddress('1.2.3.4'), '1.2.3.4')] self.from_primitive_values = [('::1', netaddr.IPAddress('::1')), ('1.2.3.4', netaddr.IPAddress('1.2.3.4'))] def test_get_schema(self): self.assertEqual({'oneOf': [{'format': 'ipv4', 'type': ['string']}, {'format': 'ipv6', 'type': ['string']}]}, self.field.get_schema()) class TestIPNetwork(TestField): def setUp(self): super(TestIPNetwork, self).setUp() self.field = fields.IPNetworkField() self.coerce_good_values = [('::1/0', netaddr.IPNetwork('::1/0')), ('1.2.3.4/24', netaddr.IPNetwork('1.2.3.4/24')), (netaddr.IPNetwork('::1/32'), netaddr.IPNetwork('::1/32'))] self.coerce_bad_values = ['foo'] self.to_primitive_values = [(netaddr.IPNetwork('::1/0'), '::1/0')] self.from_primitive_values = [('::1/0', netaddr.IPNetwork('::1/0'))] class TestIPV4Network(TestField): def setUp(self): super(TestIPV4Network, self).setUp() self.field = fields.IPV4NetworkField() self.coerce_good_values = [('1.2.3.4/24', netaddr.IPNetwork('1.2.3.4/24'))] self.coerce_bad_values = ['foo', '::1/32'] self.to_primitive_values = [(netaddr.IPNetwork('1.2.3.4/24'), '1.2.3.4/24')] self.from_primitive_values = [('1.2.3.4/24', netaddr.IPNetwork('1.2.3.4/24'))] def test_get_schema(self): schema = self.field.get_schema() self.assertEqual(['string'], schema['type']) self.assertEqual(False, schema['readonly']) pattern = schema['pattern'] for _, valid_val in self.coerce_good_values: self.assertRegex(str(valid_val), pattern) invalid_vals = [x for x in self.coerce_bad_values] for invalid_val in invalid_vals: self.assertNotRegex(str(invalid_val), pattern) class TestIPV6Network(TestField): def setUp(self): super(TestIPV6Network, self).setUp() self.field = fields.IPV6NetworkField() self.coerce_good_values = [('::1/0', netaddr.IPNetwork('::1/0')), (netaddr.IPNetwork('::1/32'), netaddr.IPNetwork('::1/32'))] self.coerce_bad_values = ['foo', '1.2.3.4/24'] self.to_primitive_values = [(netaddr.IPNetwork('::1/0'), '::1/0')] self.from_primitive_values = [('::1/0', netaddr.IPNetwork('::1/0'))] def test_get_schema(self): schema = self.field.get_schema() self.assertEqual(['string'], schema['type']) self.assertEqual(False, schema['readonly']) pattern = schema['pattern'] for _, valid_val in self.coerce_good_values: self.assertRegex(str(valid_val), pattern) invalid_vals = [x for x in self.coerce_bad_values] for invalid_val in invalid_vals: self.assertNotRegex(str(invalid_val), pattern) class FakeCounter(object): def __init__(self): self.n = 0 def __iter__(self): return self def __next__(self): if self.n <= 4: self.n += 1 return self.n else: raise StopIteration class TestListTypes(test.TestCase): def test_regular_list(self): fields.List(fields.Integer).coerce(None, None, [1, 2]) def test_non_iterable(self): self.assertRaises(ValueError, fields.List(fields.Integer).coerce, None, None, 2) def test_string_iterable(self): self.assertRaises(ValueError, fields.List(fields.Integer).coerce, None, None, 'hello') def test_mapping_iterable(self): self.assertRaises(ValueError, fields.List(fields.Integer).coerce, None, None, {'a': 1, 'b': 2}) def test_iter_class(self): fields.List(fields.Integer).coerce(None, None, FakeCounter()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/oslo_versionedobjects/tests/test_fixture.py0000664000175000017500000010053600000000000026736 0ustar00zuulzuul00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import datetime import hashlib import inspect from unittest import mock import iso8601 from oslo_versionedobjects import base from oslo_versionedobjects import exception from oslo_versionedobjects import fields from oslo_versionedobjects import fixture from oslo_versionedobjects import test class MyObject(base.VersionedObject): fields = {'diglett': fields.IntegerField()} @base.remotable def remotable_method(self): pass @classmethod @base.remotable def remotable_classmethod(cls): pass def non_remotable_method(self): pass @classmethod def non_remotable_classmethod(cls): pass class MyObject2(base.VersionedObject): pass class MyExtraObject(base.VersionedObject): pass class TestObjectComparators(test.TestCase): @base.VersionedObjectRegistry.register_if(False) class MyComparedObject(base.VersionedObject): fields = {'foo': fields.IntegerField(), 'bar': fields.IntegerField()} @base.VersionedObjectRegistry.register_if(False) class MyComparedObjectWithTZ(base.VersionedObject): fields = {'tzfield': fields.DateTimeField()} def test_compare_obj(self): mock_test = mock.Mock() mock_test.assertEqual = mock.Mock() my_obj = self.MyComparedObject(foo=1, bar=2) my_db_obj = {'foo': 1, 'bar': 2} fixture.compare_obj(mock_test, my_obj, my_db_obj) expected_calls = [(1, 1), (2, 2)] actual_calls = [c[0] for c in mock_test.assertEqual.call_args_list] for call in expected_calls: self.assertIn(call, actual_calls) def test_compare_obj_with_unset(self): # If the object has nothing set, and also the db object has the same # thing not set, it's OK. mock_test = mock.Mock() mock_test.assertEqual = mock.Mock() my_obj = self.MyComparedObject() my_db_obj = {} fixture.compare_obj(mock_test, my_obj, my_db_obj) self.assertFalse(mock_test.assertEqual.called, "assertEqual should " "not have been called, there is nothing to compare.") def test_compare_obj_with_unset_in_obj(self): # If the db dict has something set, but the object doesn't, that's != mock_test = mock.Mock() mock_test.assertEqual = mock.Mock() my_obj = self.MyComparedObject(foo=1) my_db_obj = {'foo': 1, 'bar': 2} self.assertRaises(AssertionError, fixture.compare_obj, mock_test, my_obj, my_db_obj) def test_compare_obj_with_unset_in_db_dict(self): # If the object has something set, but the db dict doesn't, that's != mock_test = mock.Mock() mock_test.assertEqual = mock.Mock() my_obj = self.MyComparedObject(foo=1, bar=2) my_db_obj = {'foo': 1} self.assertRaises(AssertionError, fixture.compare_obj, mock_test, my_obj, my_db_obj) def test_compare_obj_with_unset_in_obj_ignored(self): # If the db dict has something set, but the object doesn't, but we # ignore that key, we are equal my_obj = self.MyComparedObject(foo=1) my_db_obj = {'foo': 1, 'bar': 2} ignore = ['bar'] fixture.compare_obj(self, my_obj, my_db_obj, allow_missing=ignore) def test_compare_obj_with_unset_in_db_dict_ignored(self): # If the object has something set, but the db dict doesn't, but we # ignore that key, we are equal my_obj = self.MyComparedObject(foo=1, bar=2) my_db_obj = {'foo': 1} ignore = ['bar'] fixture.compare_obj(self, my_obj, my_db_obj, allow_missing=ignore) def test_compare_obj_with_allow_missing_unequal(self): # If the tested key is in allow_missing, but both the obj and db_obj # have the value set, we should still check it for equality mock_test = mock.Mock() mock_test.assertEqual = mock.Mock() my_obj = self.MyComparedObject(foo=1, bar=2) my_db_obj = {'foo': 1, 'bar': 1} ignore = ['bar'] fixture.compare_obj(mock_test, my_obj, my_db_obj, allow_missing=ignore) expected_calls = [(1, 1), (1, 2)] actual_calls = [c[0] for c in mock_test.assertEqual.call_args_list] for call in expected_calls: self.assertIn(call, actual_calls) def test_compare_obj_with_subs(self): mock_test = mock.Mock() mock_test.assertEqual = mock.Mock() my_obj = self.MyComparedObject(foo=1, bar=2) my_db_obj = {'doo': 1, 'bar': 2} subs = {'foo': 'doo'} fixture.compare_obj(mock_test, my_obj, my_db_obj, subs=subs) expected_calls = [(1, 1), (2, 2)] actual_calls = [c[0] for c in mock_test.assertEqual.call_args_list] for call in expected_calls: self.assertIn(call, actual_calls) def test_compare_obj_with_allow_missing(self): mock_test = mock.Mock() mock_test.assertEqual = mock.Mock() my_obj = self.MyComparedObject(foo=1) my_db_obj = {'foo': 1, 'bar': 2} ignores = ['bar'] fixture.compare_obj(mock_test, my_obj, my_db_obj, allow_missing=ignores) mock_test.assertEqual.assert_called_once_with(1, 1) def test_compare_obj_with_comparators(self): mock_test = mock.Mock() mock_test.assertEqual = mock.Mock() comparator = mock.Mock() comp_dict = {'foo': comparator} my_obj = self.MyComparedObject(foo=1, bar=2) my_db_obj = {'foo': 1, 'bar': 2} fixture.compare_obj(mock_test, my_obj, my_db_obj, comparators=comp_dict) comparator.assert_called_once_with(1, 1) mock_test.assertEqual.assert_called_once_with(2, 2) def test_compare_obj_with_dt(self): mock_test = mock.Mock() mock_test.assertEqual = mock.Mock() dt = datetime.datetime(1955, 11, 5, tzinfo=iso8601.iso8601.UTC) replaced_dt = dt.replace(tzinfo=None) my_obj = self.MyComparedObjectWithTZ(tzfield=dt) my_db_obj = {'tzfield': replaced_dt} fixture.compare_obj(mock_test, my_obj, my_db_obj) mock_test.assertEqual.assert_called_once_with(replaced_dt, replaced_dt) class FakeResource(base.VersionedObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'identifier': fields.Field(fields.Integer(), default=123) } class TestObjectVersionChecker(test.TestCase): def setUp(self): super(TestObjectVersionChecker, self).setUp() objects = [MyObject, MyObject2, ] self.obj_classes = {obj.__name__: [obj] for obj in objects} self.ovc = fixture.ObjectVersionChecker(obj_classes=self.obj_classes) def test_get_hashes(self): # Make sure get_hashes retrieves the fingerprint of all objects fp = 'ashketchum' with mock.patch.object(self.ovc, '_get_fingerprint') as mock_gf: mock_gf.return_value = fp actual = self.ovc.get_hashes() expected = self._generate_hashes(self.obj_classes, fp) self.assertEqual(expected, actual, "ObjectVersionChecker is not " "getting the fingerprints of all registered " "objects.") def test_get_hashes_with_extra_data(self): # Make sure get_hashes uses the extra_data_func fp = 'garyoak' mock_func = mock.MagicMock() with mock.patch.object(self.ovc, '_get_fingerprint') as mock_gf: mock_gf.return_value = fp actual = self.ovc.get_hashes(extra_data_func=mock_func) expected = self._generate_hashes(self.obj_classes, fp) expected_calls = [((name,), {'extra_data_func': mock_func}) for name in self.obj_classes.keys()] self.assertEqual(expected, actual, "ObjectVersionChecker is not " "getting the fingerprints of all registered " "objects.") self.assertEqual(len(expected_calls), len(mock_gf.call_args_list), "get_hashes() did not call get the fingerprints of " "all objects in the registry.") for call in expected_calls: self.assertIn(call, mock_gf.call_args_list, "get_hashes() did not call _get_fingerprint()" "correctly.") def test_test_hashes_none_changed(self): # Make sure test_hashes() generates an empty dictionary when # there are no objects that have changed fp = 'pikachu' hashes = self._generate_hashes(self.obj_classes, fp) with mock.patch.object(self.ovc, 'get_hashes') as mock_gh: mock_gh.return_value = hashes # I'm so sorry, but they have to be named this way actual_expected, actual_actual = self.ovc.test_hashes(hashes) expected_expected = expected_actual = {} self.assertEqual(expected_expected, actual_expected, "There are no " "objects changed, so the 'expected' return value " "should contain no objects.") self.assertEqual(expected_actual, actual_actual, "There are no " "objects changed, so the 'actual' return value " "should contain no objects.") def test_test_hashes_class_not_added(self): # Make sure the expected and actual values differ when a class # was added to the registry, but not the static dictionary fp = 'gyrados' new_classes = copy.copy(self.obj_classes) self._add_class(new_classes, MyExtraObject) expected_hashes = self._generate_hashes(self.obj_classes, fp) actual_hashes = self._generate_hashes(new_classes, fp) with mock.patch.object(self.ovc, 'get_hashes') as mock_gh: mock_gh.return_value = actual_hashes actual_exp, actual_act = self.ovc.test_hashes(expected_hashes) expected_expected = {MyExtraObject.__name__: None} expected_actual = {MyExtraObject.__name__: fp} self.assertEqual(expected_expected, actual_exp, "Expected hashes " "should not contain the fingerprint of the class " "that has not been added to the expected hash " "dictionary.") self.assertEqual(expected_actual, actual_act, "The actual hash " "should contain the class that was added to the " "registry.") def test_test_hashes_new_fp_incorrect(self): # Make sure the expected and actual values differ when a fingerprint # was changed, but the static dictionary was not updated fp1 = 'beedrill' fp2 = 'snorlax' expected_hashes = self._generate_hashes(self.obj_classes, fp1) actual_hashes = copy.copy(expected_hashes) actual_hashes[MyObject.__name__] = fp2 with mock.patch.object(self.ovc, 'get_hashes') as mock_gh: mock_gh.return_value = actual_hashes actual_exp, actual_act = self.ovc.test_hashes(expected_hashes) expected_expected = {MyObject.__name__: fp1} expected_actual = {MyObject.__name__: fp2} self.assertEqual(expected_expected, actual_exp, "Expected hashes " "should contain the updated object with the old " "hash.") self.assertEqual(expected_actual, actual_act, "Actual hashes " "should contain the updated object with the new " "hash.") def test_test_hashes_passes_extra_func(self): # Make sure that test_hashes passes the extra_func to get_hashes mock_extra_func = mock.Mock() with mock.patch.object(self.ovc, 'get_hashes') as mock_get_hashes: self.ovc.test_hashes({}, extra_data_func=mock_extra_func) mock_get_hashes.assert_called_once_with( extra_data_func=mock_extra_func) def test_get_dependency_tree(self): # Make sure get_dependency_tree() gets the dependencies of all # objects in the registry with mock.patch.object(self.ovc, '_get_dependencies') as mock_gd: self.ovc.get_dependency_tree() expected_calls = [(({}, MyObject),), (({}, MyObject2),)] self.assertEqual(2, len(mock_gd.call_args_list), "get_dependency_tree() tried to get the dependencies" " too many times.") for call in expected_calls: self.assertIn(call, mock_gd.call_args_list, "get_dependency_tree() did not get the dependencies " "of the objects correctly.") def test_test_relationships_none_changed(self): # Make sure test_relationships() generates an empty dictionary when # no relationships have been changed dep_tree = {} # tree will be {'MyObject': {'MyObject2': '1.0'}} self._add_dependency(MyObject, MyObject2, dep_tree) with mock.patch.object(self.ovc, 'get_dependency_tree') as mock_gdt: mock_gdt.return_value = dep_tree actual_exp, actual_act = self.ovc.test_relationships(dep_tree) expected_expected = expected_actual = {} self.assertEqual(expected_expected, actual_exp, "There are no " "objects changed, so the 'expected' return value " "should contain no objects.") self.assertEqual(expected_actual, actual_act, "There are no " "objects changed, so the 'actual' return value " "should contain no objects.") def test_test_relationships_rel_added(self): # Make sure expected and actual relationships differ if a # relationship is added to a class exp_tree = {} actual_tree = {} self._add_dependency(MyObject, MyObject2, exp_tree) self._add_dependency(MyObject, MyObject2, actual_tree) self._add_dependency(MyObject, MyExtraObject, actual_tree) with mock.patch.object(self.ovc, 'get_dependency_tree') as mock_gdt: mock_gdt.return_value = actual_tree actual_exp, actual_act = self.ovc.test_relationships(exp_tree) expected_expected = {'MyObject': {'MyObject2': '1.0'}} expected_actual = {'MyObject': {'MyObject2': '1.0', 'MyExtraObject': '1.0'}} self.assertEqual(expected_expected, actual_exp, "The expected " "relationship tree is not being built from changes " "correctly.") self.assertEqual(expected_actual, actual_act, "The actual " "relationship tree is not being built from changes " "correctly.") def test_test_relationships_class_added(self): # Make sure expected and actual relationships differ if a new # class is added to the relationship tree exp_tree = {} actual_tree = {} self._add_dependency(MyObject, MyObject2, exp_tree) self._add_dependency(MyObject, MyObject2, actual_tree) self._add_dependency(MyObject2, MyExtraObject, actual_tree) with mock.patch.object(self.ovc, 'get_dependency_tree') as mock_gdt: mock_gdt.return_value = actual_tree actual_exp, actual_act = self.ovc.test_relationships(exp_tree) expected_expected = {'MyObject2': None} expected_actual = {'MyObject2': {'MyExtraObject': '1.0'}} self.assertEqual(expected_expected, actual_exp, "The expected " "relationship tree is not being built from changes " "correctly.") self.assertEqual(expected_actual, actual_act, "The actual " "relationship tree is not being built from changes " "correctly.") def test_test_compatibility_routines(self): # Make sure test_compatibility_routines() checks the object # compatibility of all objects in the registry del self.ovc.obj_classes[MyObject2.__name__] with mock.patch.object(self.ovc, '_test_object_compatibility') as toc: self.ovc.test_compatibility_routines() toc.assert_called_once_with(MyObject, manifest=None, init_args=[], init_kwargs={}) def test_test_compatibility_routines_with_manifest(self): # Make sure test_compatibility_routines() uses the version manifest del self.ovc.obj_classes[MyObject2.__name__] man = {'who': 'cares'} with mock.patch.object(self.ovc, '_test_object_compatibility') as toc: with mock.patch('oslo_versionedobjects.base' '.obj_tree_get_versions') as otgv: otgv.return_value = man self.ovc.test_compatibility_routines(use_manifest=True) otgv.assert_called_once_with(MyObject.__name__) toc.assert_called_once_with(MyObject, manifest=man, init_args=[], init_kwargs={}) def test_test_compatibility_routines_with_args_kwargs(self): # Make sure test_compatibility_routines() uses init args/kwargs del self.ovc.obj_classes[MyObject2.__name__] init_args = {MyObject: [1]} init_kwargs = {MyObject: {'foo': 'bar'}} with mock.patch.object(self.ovc, '_test_object_compatibility') as toc: self.ovc.test_compatibility_routines(init_args=init_args, init_kwargs=init_kwargs) toc.assert_called_once_with(MyObject, manifest=None, init_args=[1], init_kwargs={'foo': 'bar'}) def test_test_relationships_in_order(self): # Make sure test_relationships_in_order() tests the relationships # of all objects in the registry with mock.patch.object(self.ovc, '_test_relationships_in_order') as mock_tr: self.ovc.test_relationships_in_order() expected_calls = [((MyObject,),), ((MyObject2,),)] self.assertEqual(2, len(mock_tr.call_args_list), "test_relationships_in_order() tested too many " "relationships.") for call in expected_calls: self.assertIn(call, mock_tr.call_args_list, "test_relationships_in_order() did not test the " "relationships of the individual objects " "correctly.") def test_test_relationships_in_order_positive(self): # Make sure a correct relationship ordering doesn't blow up rels = {'bellsprout': [('1.0', '1.0'), ('1.1', '1.2'), ('1.3', '1.3')]} MyObject.obj_relationships = rels self.ovc._test_relationships_in_order(MyObject) def test_test_relationships_in_order_negative(self): # Make sure an out-of-order relationship does blow up rels = {'rattata': [('1.0', '1.0'), ('1.1', '1.2'), ('1.3', '1.1')]} MyObject.obj_relationships = rels self.assertRaises(AssertionError, self.ovc._test_relationships_in_order, MyObject) def test_find_remotable_method(self): # Make sure we can find a remotable method on an object method = self.ovc._find_remotable_method(MyObject, MyObject.remotable_method) self.assertEqual(MyObject.remotable_method.original_fn, method, "_find_remotable_method() did not find the remotable" " method of MyObject.") def test_find_remotable_method_classmethod(self): # Make sure we can find a remotable classmethod on an object rcm = MyObject.remotable_classmethod method = self.ovc._find_remotable_method(MyObject, rcm) expected = rcm.__get__(None, MyObject).original_fn self.assertEqual(expected, method, "_find_remotable_method() did not " "find the remotable classmethod.") def test_find_remotable_method_non_remotable_method(self): # Make sure nothing is found when we have only a non-remotable method nrm = MyObject.non_remotable_method method = self.ovc._find_remotable_method(MyObject, nrm) self.assertIsNone(method, "_find_remotable_method() found a method " "that isn't remotable.") def test_find_remotable_method_non_remotable_classmethod(self): # Make sure we don't find a non-remotable classmethod nrcm = MyObject.non_remotable_classmethod method = self.ovc._find_remotable_method(MyObject, nrcm) self.assertIsNone(method, "_find_remotable_method() found a method " "that isn't remotable.") def test_get_fingerprint(self): # Make sure _get_fingerprint() generates a consistent fingerprint MyObject.VERSION = '1.1' argspec = 'vulpix' with mock.patch.object(fixture, 'get_method_spec') as mock_gas: mock_gas.return_value = argspec fp = self.ovc._get_fingerprint(MyObject.__name__) exp_fields = sorted(list(MyObject.fields.items())) exp_methods = sorted([('remotable_method', argspec), ('remotable_classmethod', argspec)]) expected_relevant_data = (exp_fields, exp_methods) # NOTE(hberaud) the following hashlib usage will emit a bandit # warning. It can be solved by passing `usedforsecurity=False` to # the md5 function, however, this parameter was introduced with py39 # so passing it will break py38 unittest. I'd suggest to ignore this # bandit rule while py38 is in our supported runtimes. expected_hash = hashlib.md5(bytes(repr( expected_relevant_data).encode())).hexdigest() # nosec expected_fp = '%s-%s' % (MyObject.VERSION, expected_hash) self.assertEqual(expected_fp, fp, "_get_fingerprint() did not " "generate a correct fingerprint.") def test_get_fingerprint_with_child_versions(self): # Make sure _get_fingerprint() generates a consistent fingerprint # when child_versions are present child_versions = {'1.0': '1.0', '1.1': '1.1'} MyObject.VERSION = '1.1' MyObject.child_versions = child_versions argspec = 'onix' with mock.patch.object(fixture, 'get_method_spec') as mock_gas: mock_gas.return_value = argspec fp = self.ovc._get_fingerprint(MyObject.__name__) exp_fields = sorted(list(MyObject.fields.items())) exp_methods = sorted([('remotable_method', argspec), ('remotable_classmethod', argspec)]) exp_child_versions = collections.OrderedDict(sorted( child_versions.items())) exp_relevant_data = (exp_fields, exp_methods, exp_child_versions) # NOTE(hberaud) the following hashlib usage will emit a bandit # warning. It can be solved by passing `usedforsecurity=False` to # the md5 function, however, this parameter was introduced with py39 # so passing it will break py38 unittest. I'd suggest to ignore this # bandit rule while py38 is in our supported runtimes. expected_hash = hashlib.md5(bytes(repr( exp_relevant_data).encode())).hexdigest() # nosec expected_fp = '%s-%s' % (MyObject.VERSION, expected_hash) self.assertEqual(expected_fp, fp, "_get_fingerprint() did not " "generate a correct fingerprint.") def test_get_fingerprint_with_extra_data(self): # Make sure _get_fingerprint() uses extra_data_func when it is # supplied class ExtraDataObj(base.VersionedObject): pass def get_data(obj_class): return (obj_class,) ExtraDataObj.VERSION = '1.1' argspec = 'cubone' self._add_class(self.obj_classes, ExtraDataObj) with mock.patch.object(fixture, 'get_method_spec') as mock_gas: mock_gas.return_value = argspec fp = self.ovc._get_fingerprint(ExtraDataObj.__name__, extra_data_func=get_data) exp_fields = [] exp_methods = [] exp_extra_data = ExtraDataObj exp_relevant_data = (exp_fields, exp_methods, exp_extra_data) # NOTE(hberaud) the following hashlib usage will emit a bandit # warning. It can be solved by passing `usedforsecurity=False` to # the md5 function, however, this parameter was introduced with py39 # so passing it will break py38 unittest. I'd suggest to ignore this # bandit rule while py38 is in our supported runtimes. expected_hash = hashlib.md5(bytes(repr( exp_relevant_data).encode())).hexdigest() # nosec expected_fp = '%s-%s' % (ExtraDataObj.VERSION, expected_hash) self.assertEqual(expected_fp, fp, "_get_fingerprint() did not " "generate a correct fingerprint.") def test_get_fingerprint_with_defaulted_set(self): class ClassWithDefaultedSetField(base.VersionedObject): VERSION = 1.0 fields = { 'empty_default': fields.SetOfIntegersField(default=set()), 'non_empty_default': fields.SetOfIntegersField(default={1, 2}) } self._add_class(self.obj_classes, ClassWithDefaultedSetField) # it is expected that this hash is stable across python versions expected = '1.0-bcc44920f2f727eca463c6eb4fe8445b' actual = self.ovc._get_fingerprint(ClassWithDefaultedSetField.__name__) self.assertEqual(expected, actual) def test_get_dependencies(self): # Make sure _get_dependencies() generates a correct tree when parsing # an object self._add_class(self.obj_classes, MyExtraObject) MyObject.fields['subob'] = fields.ObjectField('MyExtraObject') MyExtraObject.VERSION = '1.0' tree = {} self.ovc._get_dependencies(tree, MyObject) expected_tree = {'MyObject': {'MyExtraObject': '1.0'}} self.assertEqual(expected_tree, tree, "_get_dependencies() did " "not generate a correct dependency tree.") def test_test_object_compatibility(self): # Make sure _test_object_compatibility() tests obj_to_primitive() # on each prior version to the current version to_prim = mock.MagicMock(spec=callable) MyObject.VERSION = '1.1' MyObject.obj_to_primitive = to_prim self.ovc._test_object_compatibility(MyObject) expected_calls = [((), {'target_version': '1.0'}), ((), {'target_version': '1.1'})] self.assertEqual(expected_calls, to_prim.call_args_list, "_test_object_compatibility() did not test " "obj_to_primitive() on the correct target versions") def test_test_object_compatibility_args_kwargs(self): # Make sure _test_object_compatibility() tests obj_to_primitive() # with the correct args and kwargs to init to_prim = mock.MagicMock(spec=callable) MyObject.obj_to_primitive = to_prim MyObject.VERSION = '1.1' args = [1] kwargs = {'foo': 'bar'} with mock.patch.object(MyObject, '__init__', return_value=None) as mock_init: self.ovc._test_object_compatibility(MyObject, init_args=args, init_kwargs=kwargs) expected_init = ((1,), {'foo': 'bar'}) expected_init_calls = [expected_init, expected_init] self.assertEqual(expected_init_calls, mock_init.call_args_list, "_test_object_compatibility() did not call " "__init__() properly on the object") expected_to_prim = [((), {'target_version': '1.0'}), ((), {'target_version': '1.1'})] self.assertEqual(expected_to_prim, to_prim.call_args_list, "_test_object_compatibility() did not test " "obj_to_primitive() on the correct target versions") def _add_class(self, obj_classes, cls): obj_classes[cls.__name__] = [cls] def _generate_hashes(self, classes, fp): # Generate hashes for classes, giving fp as the fingerprint # for all classes return {cls: fp for cls in classes.keys()} def _add_dependency(self, parent_cls, child_cls, tree): # Add a dependency to the tree with the parent class holding # version 1.0 of the given child class deps = tree.get(parent_cls.__name__, {}) deps[child_cls.__name__] = '1.0' tree[parent_cls.__name__] = deps class TestVersionedObjectRegistryFixture(test.TestCase): primitive = {'versioned_object.name': 'FakeResource', 'versioned_object.namespace': 'versionedobjects', 'versioned_object.version': '1.0', 'versioned_object.data': {'identifier': 123}} def test_object_registered_temporarily(self): # Test object that has not been registered self.assertRaises( exception.UnsupportedObjectError, FakeResource.obj_from_primitive, self.primitive) with fixture.VersionedObjectRegistryFixture() as obj_registry: # Register object locally obj_registry.setUp() obj_registry.register(FakeResource) # Test object has now been registered obj = FakeResource.obj_from_primitive( self.primitive) self.assertEqual(obj.identifier, 123) self.assertEqual('1.0', obj.VERSION) # Test object that is no longer registered self.assertRaises( exception.UnsupportedObjectError, FakeResource.obj_from_primitive, self.primitive) class TestStableObjectJsonFixture(test.TestCase): def test_changes_sort(self): @base.VersionedObjectRegistry.register_if(False) class TestObject(base.VersionedObject): fields = {'z': fields.StringField(), 'a': fields.StringField()} def obj_what_changed(self): return ['z', 'a'] obj = TestObject(a='foo', z='bar') self.assertEqual(['z', 'a'], obj.obj_to_primitive()['versioned_object.changes']) with fixture.StableObjectJsonFixture(): self.assertEqual( ['a', 'z'], obj.obj_to_primitive()['versioned_object.changes']) class TestMethodSpec(test.TestCase): def setUp(self): super(TestMethodSpec, self).setUp() def test_method1(a, b, kw1=123, **kwargs): pass def test_method2(a, b, *args): pass def test_method3(a, b, *args, kw1=123, **kwargs): pass self._test_method1 = test_method1 self._test_method2 = test_method2 self._test_method3 = test_method3 def test_method_spec_compat(self): self.assertEqual(fixture.CompatArgSpec(args=['a', 'b', 'kw1'], varargs=None, keywords='kwargs', defaults=(123,)), fixture.get_method_spec(self._test_method1)) self.assertEqual(fixture.CompatArgSpec(args=['a', 'b'], varargs='args', keywords=None, defaults=None), fixture.get_method_spec(self._test_method2)) self.assertEqual(inspect.getfullargspec(self._test_method3), fixture.get_method_spec(self._test_method3)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/oslo_versionedobjects/tests/test_objects.py0000664000175000017500000030553300000000000026705 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import jsonschema import logging from unittest import mock from oslo_context import context from oslo_serialization import jsonutils from oslo_utils import timeutils import testtools from testtools import matchers from oslo_versionedobjects import base from oslo_versionedobjects import exception from oslo_versionedobjects import fields from oslo_versionedobjects import fixture from oslo_versionedobjects import test LOG = logging.getLogger(__name__) def is_test_object(cls): """Return True if class is defined in the tests. :param cls: Class to inspect """ return 'oslo_versionedobjects.tests' in cls.__module__ @base.VersionedObjectRegistry.register class MyOwnedObject(base.VersionedObject): VERSION = '1.0' fields = {'baz': fields.Field(fields.Integer())} @base.VersionedObjectRegistry.register class MyObj(base.VersionedObject, base.VersionedObjectDictCompat): VERSION = '1.6' fields = {'foo': fields.Field(fields.Integer(), default=1), 'bar': fields.Field(fields.String()), 'missing': fields.Field(fields.String()), 'readonly': fields.Field(fields.Integer(), read_only=True), 'rel_object': fields.ObjectField('MyOwnedObject', nullable=True), 'rel_objects': fields.ListOfObjectsField('MyOwnedObject', nullable=True), 'mutable_default': fields.ListOfStringsField(default=[]), 'timestamp': fields.DateTimeField(nullable=True), } @staticmethod def _from_db_object(context, obj, db_obj): self = MyObj() self.foo = db_obj['foo'] self.bar = db_obj['bar'] self.missing = db_obj['missing'] self.readonly = 1 return self def obj_load_attr(self, attrname): setattr(self, attrname, 'loaded!') @base.remotable_classmethod def query(cls, context): obj = cls(context=context, foo=1, bar='bar') obj.obj_reset_changes() return obj @base.remotable def marco(self): return 'polo' @base.remotable def _update_test(self): project_id = getattr(context, 'tenant', None) if project_id is None: project_id = getattr(context, 'project_id', None) if project_id == 'alternate': self.bar = 'alternate-context' else: self.bar = 'updated' @base.remotable def save(self): self.obj_reset_changes() @base.remotable def refresh(self): self.foo = 321 self.bar = 'refreshed' self.obj_reset_changes() @base.remotable def modify_save_modify(self): self.bar = 'meow' self.save() self.foo = 42 self.rel_object = MyOwnedObject(baz=42) def obj_make_compatible(self, primitive, target_version): super(MyObj, self).obj_make_compatible(primitive, target_version) # NOTE(danms): Simulate an older version that had a different # format for the 'bar' attribute if target_version == '1.1' and 'bar' in primitive: primitive['bar'] = 'old%s' % primitive['bar'] @base.VersionedObjectRegistry.register class MyComparableObj(MyObj, base.ComparableVersionedObject): pass @base.VersionedObjectRegistry.register class MyObjDiffVers(MyObj): VERSION = '1.5' @classmethod def obj_name(cls): return 'MyObj' @base.VersionedObjectRegistry.register_if(False) class MyObj2(base.VersionedObject): @classmethod def obj_name(cls): return 'MyObj' @base.remotable_classmethod def query(cls, *args, **kwargs): pass @base.VersionedObjectRegistry.register_if(False) class MySensitiveObj(base.VersionedObject): VERSION = '1.0' fields = { 'data': fields.SensitiveStringField(nullable=True) } @base.remotable_classmethod def query(cls, *args, **kwargs): pass class RandomMixInWithNoFields(object): """Used to test object inheritance using a mixin that has no fields.""" pass @base.VersionedObjectRegistry.register class TestSubclassedObject(RandomMixInWithNoFields, MyObj): fields = {'new_field': fields.Field(fields.String())} child_versions = { '1.0': '1.0', '1.1': '1.1', '1.2': '1.1', '1.3': '1.2', '1.4': '1.3', '1.5': '1.4', '1.6': '1.5', '1.7': '1.6', } @base.VersionedObjectRegistry.register class MyCompoundObject(base.VersionedObject): fields = { "foo": fields.Field(fields.List(fields.Integer())), "bar": fields.Field(fields.Dict(fields.Integer())), "baz": fields.Field(fields.Set(fields.Integer())) } class TestRegistry(test.TestCase): def test_obj_tracking(self): @base.VersionedObjectRegistry.register class NewBaseClass(object): VERSION = '1.0' fields = {} @classmethod def obj_name(cls): return cls.__name__ @base.VersionedObjectRegistry.register class Fake1TestObj1(NewBaseClass): @classmethod def obj_name(cls): return 'fake1' @base.VersionedObjectRegistry.register class Fake1TestObj2(Fake1TestObj1): pass @base.VersionedObjectRegistry.register class Fake1TestObj3(Fake1TestObj1): VERSION = '1.1' @base.VersionedObjectRegistry.register class Fake2TestObj1(NewBaseClass): @classmethod def obj_name(cls): return 'fake2' @base.VersionedObjectRegistry.register class Fake1TestObj4(Fake1TestObj3): VERSION = '1.2' @base.VersionedObjectRegistry.register class Fake2TestObj2(Fake2TestObj1): VERSION = '1.1' @base.VersionedObjectRegistry.register class Fake1TestObj5(Fake1TestObj1): VERSION = '1.1' @base.VersionedObjectRegistry.register_if(False) class ConditionalObj1(NewBaseClass): fields = {'foo': fields.IntegerField()} @base.VersionedObjectRegistry.register_if(True) class ConditionalObj2(NewBaseClass): fields = {'foo': fields.IntegerField()} # Newest versions first in the list. Duplicate versions take the # newest object. expected = {'fake1': [Fake1TestObj4, Fake1TestObj5, Fake1TestObj2], 'fake2': [Fake2TestObj2, Fake2TestObj1]} self.assertEqual(expected['fake1'], base.VersionedObjectRegistry.obj_classes()['fake1']) self.assertEqual(expected['fake2'], base.VersionedObjectRegistry.obj_classes()['fake2']) self.assertEqual( [], base.VersionedObjectRegistry.obj_classes()['ConditionalObj1']) self.assertTrue(hasattr(ConditionalObj1, 'foo')) self.assertEqual( [ConditionalObj2], base.VersionedObjectRegistry.obj_classes()['ConditionalObj2']) self.assertTrue(hasattr(ConditionalObj2, 'foo')) def test_field_checking(self): def create_class(field): @base.VersionedObjectRegistry.register class TestField(base.VersionedObject): VERSION = '1.5' fields = {'foo': field()} return TestField create_class(fields.DateTimeField) self.assertRaises(exception.ObjectFieldInvalid, create_class, fields.DateTime) self.assertRaises(exception.ObjectFieldInvalid, create_class, int) def test_registration_hook(self): class TestObject(base.VersionedObject): VERSION = '1.0' class TestObjectNewer(base.VersionedObject): VERSION = '1.1' @classmethod def obj_name(cls): return 'TestObject' registry = base.VersionedObjectRegistry() with mock.patch.object(registry, 'registration_hook') as mock_hook: registry._register_class(TestObject) mock_hook.assert_called_once_with(TestObject, 0) with mock.patch.object(registry, 'registration_hook') as mock_hook: registry._register_class(TestObjectNewer) mock_hook.assert_called_once_with(TestObjectNewer, 0) def test_subclassability(self): class MyRegistryOne(base.VersionedObjectRegistry): def registration_hook(self, cls, index): cls.reg_to = "one" class MyRegistryTwo(base.VersionedObjectRegistry): def registration_hook(self, cls, index): cls.reg_to = "two" @MyRegistryOne.register class AVersionedObject1(base.VersionedObject): VERSION = '1.0' fields = {'baz': fields.Field(fields.Integer())} @MyRegistryTwo.register class AVersionedObject2(base.VersionedObject): VERSION = '1.0' fields = {'baz': fields.Field(fields.Integer())} self.assertIn('AVersionedObject1', MyRegistryOne.obj_classes()) self.assertIn('AVersionedObject2', MyRegistryOne.obj_classes()) self.assertIn('AVersionedObject1', MyRegistryTwo.obj_classes()) self.assertIn('AVersionedObject2', MyRegistryTwo.obj_classes()) self.assertIn('AVersionedObject1', base.VersionedObjectRegistry.obj_classes()) self.assertIn('AVersionedObject2', base.VersionedObjectRegistry.obj_classes()) self.assertEqual(AVersionedObject1.reg_to, "one") self.assertEqual(AVersionedObject2.reg_to, "two") @mock.patch.object(base.VersionedObjectRegistry, '__new__') def test_register(self, mock_registry): mock_reg_obj = mock.Mock() mock_registry.return_value = mock_reg_obj mock_reg_obj._register_class = mock.Mock() class my_class(object): pass base.VersionedObjectRegistry.register(my_class) mock_reg_obj._register_class.assert_called_once_with(my_class) @mock.patch.object(base.VersionedObjectRegistry, 'register') def test_register_if(self, mock_register): class my_class(object): pass base.VersionedObjectRegistry.register_if(True)(my_class) mock_register.assert_called_once_with(my_class) @mock.patch.object(base, '_make_class_properties') def test_register_if_false(self, mock_make_props): class my_class(object): pass base.VersionedObjectRegistry.register_if(False)(my_class) mock_make_props.assert_called_once_with(my_class) @mock.patch.object(base.VersionedObjectRegistry, 'register_if') def test_objectify(self, mock_register_if): mock_reg_callable = mock.Mock() mock_register_if.return_value = mock_reg_callable class my_class(object): pass base.VersionedObjectRegistry.objectify(my_class) mock_register_if.assert_called_once_with(False) mock_reg_callable.assert_called_once_with(my_class) class TestObjMakeList(test.TestCase): def test_obj_make_list(self): @base.VersionedObjectRegistry.register class MyList(base.ObjectListBase, base.VersionedObject): fields = { 'objects': fields.ListOfObjectsField('MyObj'), } db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'}, {'foo': 2, 'bar': 'bat', 'missing': 'apple'}, ] mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs) self.assertEqual(2, len(mylist)) self.assertEqual('ctxt', mylist._context) for index, item in enumerate(mylist): self.assertEqual(db_objs[index]['foo'], item.foo) self.assertEqual(db_objs[index]['bar'], item.bar) self.assertEqual(db_objs[index]['missing'], item.missing) class TestGetSubobjectVersion(test.TestCase): def setUp(self): super(TestGetSubobjectVersion, self).setUp() self.backport_mock = mock.MagicMock() self.rels = [('1.1', '1.0'), ('1.3', '1.1')] def test_get_subobject_version_not_existing(self): # Verify that exception is raised if we try backporting # to a version where we did not contain the subobject self.assertRaises(exception.TargetBeforeSubobjectExistedException, base._get_subobject_version, '1.0', self.rels, self.backport_mock) def test_get_subobject_version_explicit_version(self): # Verify that we backport to the correct subobject version when the # version we are going back to is explicitly said in the relationships base._get_subobject_version('1.3', self.rels, self.backport_mock) self.backport_mock.assert_called_once_with('1.1') def test_get_subobject_version_implicit_version(self): # Verify that we backport to the correct subobject version when the # version backporting to is not explicitly stated in the relationships base._get_subobject_version('1.2', self.rels, self.backport_mock) self.backport_mock.assert_called_once_with('1.0') class TestDoSubobjectBackport(test.TestCase): @base.VersionedObjectRegistry.register class ParentObj(base.VersionedObject): VERSION = '1.1' fields = {'child': fields.ObjectField('ChildObj', nullable=True)} obj_relationships = {'child': [('1.0', '1.0'), ('1.1', '1.1')]} @base.VersionedObjectRegistry.register class ParentObjList(base.VersionedObject, base.ObjectListBase): VERSION = '1.1' fields = {'objects': fields.ListOfObjectsField('ChildObj')} obj_relationships = {'objects': [('1.0', '1.0'), ('1.1', '1.1')]} @base.VersionedObjectRegistry.register class ChildObj(base.VersionedObject): VERSION = '1.1' fields = {'foo': fields.IntegerField()} def test_do_subobject_backport_without_manifest(self): child = self.ChildObj(foo=1) parent = self.ParentObj(child=child) parent_primitive = parent.obj_to_primitive()['versioned_object.data'] primitive = child.obj_to_primitive()['versioned_object.data'] version = '1.0' compat_func = 'obj_make_compatible_from_manifest' with mock.patch.object(child, compat_func) as mock_compat: base._do_subobject_backport(version, parent, 'child', parent_primitive) mock_compat.assert_called_once_with(primitive, version, version_manifest=None) def test_do_subobject_backport_with_manifest(self): child = self.ChildObj(foo=1) parent = self.ParentObj(child=child) parent_primitive = parent.obj_to_primitive()['versioned_object.data'] primitive = child.obj_to_primitive()['versioned_object.data'] version = '1.0' manifest = {'ChildObj': '1.0'} parent._obj_version_manifest = manifest compat_func = 'obj_make_compatible_from_manifest' with mock.patch.object(child, compat_func) as mock_compat: base._do_subobject_backport(version, parent, 'child', parent_primitive) mock_compat.assert_called_once_with(primitive, version, version_manifest=manifest) def test_do_subobject_backport_with_manifest_old_parent(self): child = self.ChildObj(foo=1) parent = self.ParentObj(child=child) manifest = {'ChildObj': '1.0'} parent_primitive = parent.obj_to_primitive(target_version='1.1', version_manifest=manifest) child_primitive = parent_primitive['versioned_object.data']['child'] self.assertEqual('1.0', child_primitive['versioned_object.version']) def test_do_subobject_backport_list_object(self): child = self.ChildObj(foo=1) parent = self.ParentObjList(objects=[child]) parent_primitive = parent.obj_to_primitive()['versioned_object.data'] primitive = child.obj_to_primitive()['versioned_object.data'] version = '1.0' compat_func = 'obj_make_compatible_from_manifest' with mock.patch.object(child, compat_func) as mock_compat: base._do_subobject_backport(version, parent, 'objects', parent_primitive) mock_compat.assert_called_once_with(primitive, version, version_manifest=None) def test_do_subobject_backport_list_object_with_manifest(self): child = self.ChildObj(foo=1) parent = self.ParentObjList(objects=[child]) manifest = {'ChildObj': '1.0', 'ParentObjList': '1.0'} parent_primitive = parent.obj_to_primitive(target_version='1.0', version_manifest=manifest) self.assertEqual('1.0', parent_primitive['versioned_object.version']) child_primitive = parent_primitive['versioned_object.data']['objects'] self.assertEqual('1.0', child_primitive[0]['versioned_object.version']) def test_do_subobject_backport_null_child(self): parent = self.ParentObj(child=None) parent_primitive = parent.obj_to_primitive()['versioned_object.data'] version = '1.0' compat_func = 'obj_make_compatible_from_manifest' with mock.patch.object(self.ChildObj, compat_func) as mock_compat: base._do_subobject_backport(version, parent, 'child', parent_primitive) self.assertFalse(mock_compat.called, "obj_make_compatible_from_manifest() should not " "have been called because the subobject is " "None.") def test_to_primitive_calls_make_compatible_manifest(self): obj = self.ParentObj() with mock.patch.object(obj, 'obj_make_compatible_from_manifest') as m: obj.obj_to_primitive(target_version='1.0', version_manifest=mock.sentinel.manifest) m.assert_called_once_with(mock.ANY, '1.0', mock.sentinel.manifest) class _BaseTestCase(test.TestCase): def setUp(self): super(_BaseTestCase, self).setUp() self.user_id = 'fake-user' self.project_id = 'fake-project' self.context = context.RequestContext(self.user_id, self.project_id) def json_comparator(self, expected, obj_val): # json-ify an object field for comparison with its db str # equivalent self.assertEqual(expected, jsonutils.dumps(obj_val)) def str_comparator(self, expected, obj_val): """Compare a field to a string value Compare an object field to a string in the db by performing a simple coercion on the object field value. """ self.assertEqual(expected, str(obj_val)) def assertNotIsInstance(self, obj, cls, msg=None): """Python < v2.7 compatibility. Assert 'not isinstance(obj, cls).""" try: f = super(_BaseTestCase, self).assertNotIsInstance except AttributeError: self.assertThat(obj, matchers.Not(matchers.IsInstance(cls)), message=msg or '') else: f(obj, cls, msg=msg) class TestFixture(_BaseTestCase): def test_fake_indirection_takes_serializer(self): ser = mock.MagicMock() iapi = fixture.FakeIndirectionAPI(ser) ser.serialize_entity.return_value = mock.sentinel.serial iapi.object_action(mock.sentinel.context, mock.sentinel.objinst, mock.sentinel.objmethod, (), {}) ser.serialize_entity.assert_called_once_with(mock.sentinel.context, mock.sentinel.objinst) ser.deserialize_entity.assert_called_once_with(mock.sentinel.context, mock.sentinel.serial) def test_indirection_fixture_takes_indirection_api(self): iapi = mock.sentinel.iapi self.useFixture(fixture.IndirectionFixture(iapi)) self.assertEqual(iapi, base.VersionedObject.indirection_api) def test_indirection_action(self): self.useFixture(fixture.IndirectionFixture()) obj = MyObj(context=self.context) with mock.patch.object(base.VersionedObject.indirection_api, 'object_action') as mock_action: mock_action.return_value = ({}, 'foo') obj.marco() mock_action.assert_called_once_with(self.context, obj, 'marco', (), {}) @mock.patch('oslo_versionedobjects.base.obj_tree_get_versions') def test_indirection_class_action(self, mock_otgv): mock_otgv.return_value = mock.sentinel.versions self.useFixture(fixture.IndirectionFixture()) with mock.patch.object(base.VersionedObject.indirection_api, 'object_class_action_versions') as mock_caction: mock_caction.return_value = 'foo' MyObj.query(self.context) mock_caction.assert_called_once_with(self.context, 'MyObj', 'query', mock.sentinel.versions, (), {}) def test_fake_indirection_serializes_arguments(self): ser = mock.MagicMock() iapi = fixture.FakeIndirectionAPI(serializer=ser) arg1 = mock.MagicMock() arg2 = mock.MagicMock() iapi.object_action(mock.sentinel.context, mock.sentinel.objinst, mock.sentinel.objmethod, (arg1,), {'foo': arg2}) ser.serialize_entity.assert_any_call(mock.sentinel.context, arg1) ser.serialize_entity.assert_any_call(mock.sentinel.context, arg2) def test_get_hashes(self): checker = fixture.ObjectVersionChecker() hashes = checker.get_hashes() # NOTE(danms): If this object's version or hash changes, this needs # to change. Otherwise, leave it alone. self.assertEqual('1.6-fb5f5379168bf08f7f2ce0a745e91027', hashes['TestSubclassedObject']) def test_test_hashes(self): checker = fixture.ObjectVersionChecker() hashes = checker.get_hashes() actual_hash = hashes['TestSubclassedObject'] hashes['TestSubclassedObject'] = 'foo' expected, actual = checker.test_hashes(hashes) self.assertEqual(['TestSubclassedObject'], list(expected.keys())) self.assertEqual(['TestSubclassedObject'], list(actual.keys())) self.assertEqual('foo', expected['TestSubclassedObject']) self.assertEqual(actual_hash, actual['TestSubclassedObject']) def test_get_dependency_tree(self): checker = fixture.ObjectVersionChecker() tree = checker.get_dependency_tree() # NOTE(danms): If this object's dependencies change, this n eeds # to change. Otherwise, leave it alone. self.assertEqual({'MyOwnedObject': '1.0'}, tree['TestSubclassedObject']) def test_test_relationships(self): checker = fixture.ObjectVersionChecker() tree = checker.get_dependency_tree() actual = tree['TestSubclassedObject'] tree['TestSubclassedObject']['Foo'] = '9.8' expected, actual = checker.test_relationships(tree) self.assertEqual(['TestSubclassedObject'], list(expected.keys())) self.assertEqual(['TestSubclassedObject'], list(actual.keys())) self.assertEqual({'MyOwnedObject': '1.0', 'Foo': '9.8'}, expected['TestSubclassedObject']) self.assertEqual({'MyOwnedObject': '1.0'}, actual['TestSubclassedObject']) def test_test_compatibility(self): fake_classes = {mock.sentinel.class_one: [mock.sentinel.impl_one_one, mock.sentinel.impl_one_two], mock.sentinel.class_two: [mock.sentinel.impl_two_one, mock.sentinel.impl_two_two], } checker = fixture.ObjectVersionChecker(fake_classes) @mock.patch.object(checker, '_test_object_compatibility') def test(mock_compat): checker.test_compatibility_routines() mock_compat.assert_has_calls( [mock.call(mock.sentinel.impl_one_one, manifest=None, init_args=[], init_kwargs={}), mock.call(mock.sentinel.impl_one_two, manifest=None, init_args=[], init_kwargs={}), mock.call(mock.sentinel.impl_two_one, manifest=None, init_args=[], init_kwargs={}), mock.call(mock.sentinel.impl_two_two, manifest=None, init_args=[], init_kwargs={})], any_order=True) test() def test_test_compatibility_checks_obj_to_primitive(self): fake = mock.MagicMock() fake.VERSION = '1.3' checker = fixture.ObjectVersionChecker() checker._test_object_compatibility(fake) fake().obj_to_primitive.assert_has_calls( [mock.call(target_version='1.0'), mock.call(target_version='1.1'), mock.call(target_version='1.2'), mock.call(target_version='1.3')]) def test_test_relationships_in_order(self): fake_classes = {mock.sentinel.class_one: [mock.sentinel.impl_one_one, mock.sentinel.impl_one_two], mock.sentinel.class_two: [mock.sentinel.impl_two_one, mock.sentinel.impl_two_two], } checker = fixture.ObjectVersionChecker(fake_classes) @mock.patch.object(checker, '_test_relationships_in_order') def test(mock_compat): checker.test_relationships_in_order() mock_compat.assert_has_calls( [mock.call(mock.sentinel.impl_one_one), mock.call(mock.sentinel.impl_one_two), mock.call(mock.sentinel.impl_two_one), mock.call(mock.sentinel.impl_two_two)], any_order=True) test() def test_test_relationships_in_order_good(self): fake = mock.MagicMock() fake.VERSION = '1.5' fake.fields = {'foo': fields.ObjectField('bar')} fake.obj_relationships = {'foo': [('1.2', '1.0'), ('1.3', '1.2')]} checker = fixture.ObjectVersionChecker() checker._test_relationships_in_order(fake) def _test_test_relationships_in_order_bad(self, fake_rels): fake = mock.MagicMock() fake.VERSION = '1.5' fake.fields = {'foo': fields.ObjectField('bar')} fake.obj_relationships = fake_rels checker = fixture.ObjectVersionChecker() self.assertRaises(AssertionError, checker._test_relationships_in_order, fake) def test_test_relationships_in_order_bad_my_version(self): self._test_test_relationships_in_order_bad( {'foo': [('1.4', '1.1'), ('1.3', '1.2')]}) def test_test_relationships_in_order_bad_child_version(self): self._test_test_relationships_in_order_bad( {'foo': [('1.2', '1.3'), ('1.3', '1.2')]}) def test_test_relationships_in_order_bad_both_versions(self): self._test_test_relationships_in_order_bad( {'foo': [('1.5', '1.4'), ('1.3', '1.2')]}) class _LocalTest(_BaseTestCase): def setUp(self): super(_LocalTest, self).setUp() self.assertIsNone(base.VersionedObject.indirection_api) class _RemoteTest(_BaseTestCase): def setUp(self): super(_RemoteTest, self).setUp() self.useFixture(fixture.IndirectionFixture()) class _TestObject(object): # def test_object_attrs_in_init(self): # # Spot check a few # objects.Instance # objects.InstanceInfoCache # objects.SecurityGroup # # Now check the test one in this file. Should be newest version # self.assertEqual('1.6', objects.MyObj.VERSION) def test_hydration_type_error(self): primitive = {'versioned_object.name': 'MyObj', 'versioned_object.namespace': 'versionedobjects', 'versioned_object.version': '1.5', 'versioned_object.data': {'foo': 'a'}} self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive) def test_hydration(self): primitive = {'versioned_object.name': 'MyObj', 'versioned_object.namespace': 'versionedobjects', 'versioned_object.version': '1.5', 'versioned_object.data': {'foo': 1}} real_method = MyObj._obj_from_primitive def _obj_from_primitive(*args): return real_method(*args) with mock.patch.object(MyObj, '_obj_from_primitive') as ofp: ofp.side_effect = _obj_from_primitive obj = MyObj.obj_from_primitive(primitive) ofp.assert_called_once_with(None, '1.5', primitive) self.assertEqual(obj.foo, 1) def test_hydration_version_different(self): primitive = {'versioned_object.name': 'MyObj', 'versioned_object.namespace': 'versionedobjects', 'versioned_object.version': '1.2', 'versioned_object.data': {'foo': 1}} obj = MyObj.obj_from_primitive(primitive) self.assertEqual(obj.foo, 1) self.assertEqual('1.2', obj.VERSION) def test_hydration_bad_ns(self): primitive = {'versioned_object.name': 'MyObj', 'versioned_object.namespace': 'foo', 'versioned_object.version': '1.5', 'versioned_object.data': {'foo': 1}} self.assertRaises(exception.UnsupportedObjectError, MyObj.obj_from_primitive, primitive) def test_hydration_additional_unexpected_stuff(self): primitive = {'versioned_object.name': 'MyObj', 'versioned_object.namespace': 'versionedobjects', 'versioned_object.version': '1.5.1', 'versioned_object.data': { 'foo': 1, 'unexpected_thing': 'foobar'}} obj = MyObj.obj_from_primitive(primitive) self.assertEqual(1, obj.foo) self.assertFalse(hasattr(obj, 'unexpected_thing')) # NOTE(danms): If we call obj_from_primitive() directly # with a version containing .z, we'll get that version # in the resulting object. In reality, when using the # serializer, we'll get that snipped off (tested # elsewhere) self.assertEqual('1.5.1', obj.VERSION) def test_dehydration(self): expected = {'versioned_object.name': 'MyObj', 'versioned_object.namespace': 'versionedobjects', 'versioned_object.version': '1.6', 'versioned_object.data': {'foo': 1}} obj = MyObj(foo=1) obj.obj_reset_changes() self.assertEqual(obj.obj_to_primitive(), expected) def test_dehydration_invalid_version(self): obj = MyObj(foo=1) obj.obj_reset_changes() self.assertRaises(exception.InvalidTargetVersion, obj.obj_to_primitive, target_version='1.7') def test_dehydration_same_version(self): expected = {'versioned_object.name': 'MyObj', 'versioned_object.namespace': 'versionedobjects', 'versioned_object.version': '1.6', 'versioned_object.data': {'foo': 1}} obj = MyObj(foo=1) obj.obj_reset_changes() with mock.patch.object(obj, 'obj_make_compatible') as mock_compat: self.assertEqual( obj.obj_to_primitive(target_version='1.6'), expected) self.assertFalse(mock_compat.called) def test_object_property(self): obj = MyObj(foo=1) self.assertEqual(obj.foo, 1) def test_object_property_type_error(self): obj = MyObj() def fail(): obj.foo = 'a' self.assertRaises(ValueError, fail) def test_object_dict_syntax(self): obj = MyObj(foo=123, bar='text') self.assertEqual(obj['foo'], 123) self.assertIn('bar', obj) self.assertNotIn('missing', obj) self.assertEqual(sorted(iter(obj)), ['bar', 'foo']) self.assertEqual(sorted(obj.keys()), ['bar', 'foo']) self.assertEqual(sorted(obj.values(), key=str), [123, 'text']) self.assertEqual(sorted(obj.items()), [('bar', 'text'), ('foo', 123)]) self.assertEqual(dict(obj), {'foo': 123, 'bar': 'text'}) def test_non_dict_remotable(self): @base.VersionedObjectRegistry.register class TestObject(base.VersionedObject): @base.remotable def test_method(self): return 123 obj = TestObject(context=self.context) self.assertEqual(123, obj.test_method()) def test_load(self): obj = MyObj() self.assertEqual(obj.bar, 'loaded!') def test_load_in_base(self): @base.VersionedObjectRegistry.register class Foo(base.VersionedObject): fields = {'foobar': fields.Field(fields.Integer())} obj = Foo() with self.assertRaisesRegex(NotImplementedError, ".*foobar.*"): obj.foobar def test_loaded_in_primitive(self): obj = MyObj(foo=1) obj.obj_reset_changes() self.assertEqual(obj.bar, 'loaded!') expected = {'versioned_object.name': 'MyObj', 'versioned_object.namespace': 'versionedobjects', 'versioned_object.version': '1.6', 'versioned_object.changes': ['bar'], 'versioned_object.data': {'foo': 1, 'bar': 'loaded!'}} self.assertEqual(obj.obj_to_primitive(), expected) def test_changes_in_primitive(self): obj = MyObj(foo=123) self.assertEqual(obj.obj_what_changed(), set(['foo'])) primitive = obj.obj_to_primitive() self.assertIn('versioned_object.changes', primitive) obj2 = MyObj.obj_from_primitive(primitive) self.assertEqual(obj2.obj_what_changed(), set(['foo'])) obj2.obj_reset_changes() self.assertEqual(obj2.obj_what_changed(), set()) def test_obj_class_from_name(self): obj = base.VersionedObject.obj_class_from_name('MyObj', '1.5') self.assertEqual('1.5', obj.VERSION) def test_obj_class_from_name_latest_compatible(self): obj = base.VersionedObject.obj_class_from_name('MyObj', '1.1') self.assertEqual('1.6', obj.VERSION) def test_unknown_objtype(self): self.assertRaises(exception.UnsupportedObjectError, base.VersionedObject.obj_class_from_name, 'foo', '1.0') def test_obj_class_from_name_supported_version(self): self.assertRaises(exception.IncompatibleObjectVersion, base.VersionedObject.obj_class_from_name, 'MyObj', '1.25') try: base.VersionedObject.obj_class_from_name('MyObj', '1.25') except exception.IncompatibleObjectVersion as error: self.assertEqual('1.6', error.kwargs['supported']) def test_orphaned_object(self): obj = MyObj.query(self.context) obj._context = None self.assertRaises(exception.OrphanedObjectError, obj._update_test) def test_changed_1(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(obj.obj_what_changed(), set(['foo'])) obj._update_test() self.assertEqual(obj.obj_what_changed(), set(['foo', 'bar'])) self.assertEqual(obj.foo, 123) def test_changed_2(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(obj.obj_what_changed(), set(['foo'])) obj.save() self.assertEqual(obj.obj_what_changed(), set([])) self.assertEqual(obj.foo, 123) def test_changed_3(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(obj.obj_what_changed(), set(['foo'])) obj.refresh() self.assertEqual(obj.obj_what_changed(), set([])) self.assertEqual(obj.foo, 321) self.assertEqual(obj.bar, 'refreshed') def test_changed_4(self): obj = MyObj.query(self.context) obj.bar = 'something' self.assertEqual(obj.obj_what_changed(), set(['bar'])) obj.modify_save_modify() self.assertEqual(obj.obj_what_changed(), set(['foo', 'rel_object'])) self.assertEqual(obj.foo, 42) self.assertEqual(obj.bar, 'meow') self.assertIsInstance(obj.rel_object, MyOwnedObject) def test_changed_with_sub_object(self): @base.VersionedObjectRegistry.register class ParentObject(base.VersionedObject): fields = {'foo': fields.IntegerField(), 'bar': fields.ObjectField('MyObj'), } obj = ParentObject() self.assertEqual(set(), obj.obj_what_changed()) obj.foo = 1 self.assertEqual(set(['foo']), obj.obj_what_changed()) bar = MyObj() obj.bar = bar self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) obj.obj_reset_changes() self.assertEqual(set(), obj.obj_what_changed()) bar.foo = 1 self.assertEqual(set(['bar']), obj.obj_what_changed()) def test_changed_with_bogus_field(self): obj = MyObj() obj.foo = 123 # Add a bogus field name to the changed list, as could be the # case if we're sent some broken primitive from another node. obj._changed_fields.add('does_not_exist') self.assertEqual(set(['foo']), obj.obj_what_changed()) self.assertEqual({'foo': 123}, obj.obj_get_changes()) def test_static_result(self): obj = MyObj.query(self.context) self.assertEqual(obj.bar, 'bar') result = obj.marco() self.assertEqual(result, 'polo') def test_updates(self): obj = MyObj.query(self.context) self.assertEqual(obj.foo, 1) obj._update_test() self.assertEqual(obj.bar, 'updated') def test_contains(self): obj = MyOwnedObject() self.assertNotIn('baz', obj) obj.baz = 1 self.assertIn('baz', obj) self.assertNotIn('does_not_exist', obj) def test_obj_attr_is_set(self): obj = MyObj(foo=1) self.assertTrue(obj.obj_attr_is_set('foo')) self.assertFalse(obj.obj_attr_is_set('bar')) self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang') def test_obj_reset_changes_recursive(self): obj = MyObj(rel_object=MyOwnedObject(baz=123), rel_objects=[MyOwnedObject(baz=456)]) self.assertEqual(set(['rel_object', 'rel_objects']), obj.obj_what_changed()) obj.obj_reset_changes() self.assertEqual(set(['rel_object']), obj.obj_what_changed()) self.assertEqual(set(['baz']), obj.rel_object.obj_what_changed()) self.assertEqual(set(['baz']), obj.rel_objects[0].obj_what_changed()) obj.obj_reset_changes(recursive=True, fields=['foo']) self.assertEqual(set(['rel_object']), obj.obj_what_changed()) self.assertEqual(set(['baz']), obj.rel_object.obj_what_changed()) self.assertEqual(set(['baz']), obj.rel_objects[0].obj_what_changed()) obj.obj_reset_changes(recursive=True) self.assertEqual(set([]), obj.rel_object.obj_what_changed()) self.assertEqual(set([]), obj.obj_what_changed()) def test_get(self): obj = MyObj(foo=1) # Foo has value, should not get the default self.assertEqual(obj.get('foo', 2), 1) # Foo has value, should return the value without error self.assertEqual(obj.get('foo'), 1) # Bar is not loaded, so we should get the default self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded') # Bar without a default should lazy-load self.assertEqual(obj.get('bar'), 'loaded!') # Bar now has a default, but loaded value should be returned self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!') # Invalid attribute should raise AttributeError self.assertRaises(AttributeError, obj.get, 'nothing') # ...even with a default self.assertRaises(AttributeError, obj.get, 'nothing', 3) def test_object_inheritance(self): base_fields = [] myobj_fields = (['foo', 'bar', 'missing', 'readonly', 'rel_object', 'rel_objects', 'mutable_default', 'timestamp'] + base_fields) myobj3_fields = ['new_field'] self.assertTrue(issubclass(TestSubclassedObject, MyObj)) self.assertEqual(len(myobj_fields), len(MyObj.fields)) self.assertEqual(set(myobj_fields), set(MyObj.fields.keys())) self.assertEqual(len(myobj_fields) + len(myobj3_fields), len(TestSubclassedObject.fields)) self.assertEqual(set(myobj_fields) | set(myobj3_fields), set(TestSubclassedObject.fields.keys())) def test_obj_as_admin(self): self.skipTest('oslo.context does not support elevated()') obj = MyObj(context=self.context) def fake(*args, **kwargs): self.assertTrue(obj._context.is_admin) with mock.patch.object(obj, 'obj_reset_changes') as mock_fn: mock_fn.side_effect = fake with obj.obj_as_admin(): obj.save() self.assertTrue(mock_fn.called) self.assertFalse(obj._context.is_admin) def test_get_changes(self): obj = MyObj() self.assertEqual({}, obj.obj_get_changes()) obj.foo = 123 self.assertEqual({'foo': 123}, obj.obj_get_changes()) obj.bar = 'test' self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) obj.obj_reset_changes() self.assertEqual({}, obj.obj_get_changes()) timestamp = datetime.datetime(2001, 1, 1, tzinfo=datetime.timezone.utc) with mock.patch.object(timeutils, 'utcnow') as mock_utcnow: mock_utcnow.return_value = timestamp obj.timestamp = timeutils.utcnow() self.assertEqual({'timestamp': timestamp}, obj.obj_get_changes()) obj.obj_reset_changes() self.assertEqual({}, obj.obj_get_changes()) # Timestamp without tzinfo causes mismatch timestamp = datetime.datetime(2001, 1, 1) with mock.patch.object(timeutils, 'utcnow') as mock_utcnow: mock_utcnow.return_value = timestamp obj.timestamp = timeutils.utcnow() self.assertRaises(TypeError, obj.obj_get_changes()) obj.obj_reset_changes() self.assertEqual({}, obj.obj_get_changes()) def test_obj_fields(self): class TestObj(base.VersionedObject): fields = {'foo': fields.Field(fields.Integer())} obj_extra_fields = ['bar'] @property def bar(self): return 'this is bar' obj = TestObj() self.assertEqual(['foo', 'bar'], obj.obj_fields) def test_obj_context(self): class TestObj(base.VersionedObject): pass # context is available through the public property context = mock.Mock() obj = TestObj(context) self.assertEqual(context, obj.obj_context) # ..but it's not available for update new_context = mock.Mock() self.assertRaises( AttributeError, setattr, obj, 'obj_context', new_context) def test_obj_constructor(self): obj = MyObj(context=self.context, foo=123, bar='abc') self.assertEqual(123, obj.foo) self.assertEqual('abc', obj.bar) self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) def test_obj_read_only(self): obj = MyObj(context=self.context, foo=123, bar='abc') obj.readonly = 1 self.assertRaises(exception.ReadOnlyFieldError, setattr, obj, 'readonly', 2) def test_obj_mutable_default(self): obj = MyObj(context=self.context, foo=123, bar='abc') obj.mutable_default = None obj.mutable_default.append('s1') self.assertEqual(obj.mutable_default, ['s1']) obj1 = MyObj(context=self.context, foo=123, bar='abc') obj1.mutable_default = None obj1.mutable_default.append('s2') self.assertEqual(obj1.mutable_default, ['s2']) def test_obj_mutable_default_set_default(self): obj1 = MyObj(context=self.context, foo=123, bar='abc') obj1.obj_set_defaults('mutable_default') self.assertEqual(obj1.mutable_default, []) obj1.mutable_default.append('s1') self.assertEqual(obj1.mutable_default, ['s1']) obj2 = MyObj(context=self.context, foo=123, bar='abc') obj2.obj_set_defaults('mutable_default') self.assertEqual(obj2.mutable_default, []) obj2.mutable_default.append('s2') self.assertEqual(obj2.mutable_default, ['s2']) def test_obj_repr(self): obj = MyObj(foo=123) self.assertEqual('MyObj(bar=,foo=123,missing=,' 'mutable_default=,readonly=,' 'rel_object=,rel_objects=,timestamp=)', repr(obj)) def test_obj_repr_sensitive(self): obj = MySensitiveObj(data="""{'admin_password':'mypassword'}""") self.assertEqual( 'MySensitiveObj(data=\'{\'admin_password\':\'***\'}\')', repr(obj)) obj2 = MySensitiveObj() self.assertEqual('MySensitiveObj(data=)', repr(obj2)) def test_obj_repr_unicode(self): obj = MyObj(bar='\u0191\u01A1\u01A1') # verify the unicode string has been encoded as ASCII if on python 2 self.assertEqual("MyObj(bar='\u0191\u01A1\u01A1',foo=," "missing=,mutable_default=,readonly=," "rel_object=,rel_objects=,timestamp=)", repr(obj)) def test_obj_make_obj_compatible_with_relationships(self): subobj = MyOwnedObject(baz=1) obj = MyObj(rel_object=subobj) obj.obj_relationships = { 'rel_object': [('1.5', '1.1'), ('1.7', '1.2')], } primitive = obj.obj_to_primitive()['versioned_object.data'] with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: obj._obj_make_obj_compatible(copy.copy(primitive), '1.8', 'rel_object') self.assertFalse(mock_compat.called) with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: obj._obj_make_obj_compatible(copy.copy(primitive), '1.7', 'rel_object') mock_compat.assert_called_once_with( primitive['rel_object']['versioned_object.data'], '1.2') self.assertEqual( '1.2', primitive['rel_object']['versioned_object.version']) with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: obj._obj_make_obj_compatible(copy.copy(primitive), '1.6', 'rel_object') mock_compat.assert_called_once_with( primitive['rel_object']['versioned_object.data'], '1.1') self.assertEqual( '1.1', primitive['rel_object']['versioned_object.version']) with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: obj._obj_make_obj_compatible(copy.copy(primitive), '1.5', 'rel_object') mock_compat.assert_called_once_with( primitive['rel_object']['versioned_object.data'], '1.1') self.assertEqual( '1.1', primitive['rel_object']['versioned_object.version']) with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: _prim = copy.copy(primitive) obj._obj_make_obj_compatible(_prim, '1.4', 'rel_object') self.assertFalse(mock_compat.called) self.assertNotIn('rel_object', _prim) def test_obj_make_compatible_hits_sub_objects_with_rels(self): subobj = MyOwnedObject(baz=1) obj = MyObj(foo=123, rel_object=subobj) obj.obj_relationships = {'rel_object': [('1.0', '1.0')]} with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat: obj.obj_make_compatible({'rel_object': 'foo'}, '1.10') mock_compat.assert_called_once_with({'rel_object': 'foo'}, '1.10', 'rel_object') def test_obj_make_compatible_skips_unset_sub_objects_with_rels(self): obj = MyObj(foo=123) obj.obj_relationships = {'rel_object': [('1.0', '1.0')]} with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat: obj.obj_make_compatible({'rel_object': 'foo'}, '1.10') self.assertFalse(mock_compat.called) def test_obj_make_compatible_complains_about_missing_rel_rules(self): subobj = MyOwnedObject(baz=1) obj = MyObj(foo=123, rel_object=subobj) obj.obj_relationships = {} self.assertRaises(exception.ObjectActionError, obj.obj_make_compatible, {}, '1.0') def test_obj_make_compatible_handles_list_of_objects_with_rels(self): subobj = MyOwnedObject(baz=1) obj = MyObj(rel_objects=[subobj]) obj.obj_relationships = {'rel_objects': [('1.0', '1.123')]} def fake_make_compat(primitive, version, **k): self.assertEqual('1.123', version) self.assertIn('baz', primitive) with mock.patch.object(subobj, 'obj_make_compatible') as mock_mc: mock_mc.side_effect = fake_make_compat obj.obj_to_primitive('1.0') self.assertTrue(mock_mc.called) def test_obj_make_compatible_with_manifest(self): subobj = MyOwnedObject(baz=1) obj = MyObj(rel_object=subobj) obj.obj_relationships = {} orig_primitive = obj.obj_to_primitive()['versioned_object.data'] with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: manifest = {'MyOwnedObject': '1.2'} primitive = copy.deepcopy(orig_primitive) obj.obj_make_compatible_from_manifest(primitive, '1.5', manifest) mock_compat.assert_called_once_with( primitive['rel_object']['versioned_object.data'], '1.2') self.assertEqual( '1.2', primitive['rel_object']['versioned_object.version']) with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: manifest = {'MyOwnedObject': '1.0'} primitive = copy.deepcopy(orig_primitive) obj.obj_make_compatible_from_manifest(primitive, '1.5', manifest) mock_compat.assert_called_once_with( primitive['rel_object']['versioned_object.data'], '1.0') self.assertEqual( '1.0', primitive['rel_object']['versioned_object.version']) with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: manifest = {} primitive = copy.deepcopy(orig_primitive) obj.obj_make_compatible_from_manifest(primitive, '1.5', manifest) self.assertFalse(mock_compat.called) self.assertEqual( '1.0', primitive['rel_object']['versioned_object.version']) def test_obj_make_compatible_with_manifest_subobj(self): # Make sure that we call the subobject's "from_manifest" method # as well subobj = MyOwnedObject(baz=1) obj = MyObj(rel_object=subobj) obj.obj_relationships = {} manifest = {'MyOwnedObject': '1.2'} primitive = obj.obj_to_primitive()['versioned_object.data'] method = 'obj_make_compatible_from_manifest' with mock.patch.object(subobj, method) as mock_compat: obj.obj_make_compatible_from_manifest(primitive, '1.5', manifest) mock_compat.assert_called_once_with( primitive['rel_object']['versioned_object.data'], '1.2', version_manifest=manifest) def test_obj_make_compatible_with_manifest_subobj_list(self): # Make sure that we call the subobject's "from_manifest" method # as well subobj = MyOwnedObject(baz=1) obj = MyObj(rel_objects=[subobj]) obj.obj_relationships = {} manifest = {'MyOwnedObject': '1.2'} primitive = obj.obj_to_primitive()['versioned_object.data'] method = 'obj_make_compatible_from_manifest' with mock.patch.object(subobj, method) as mock_compat: obj.obj_make_compatible_from_manifest(primitive, '1.5', manifest) mock_compat.assert_called_once_with( primitive['rel_objects'][0]['versioned_object.data'], '1.2', version_manifest=manifest) def test_obj_make_compatible_removes_field_cleans_changes(self): @base.VersionedObjectRegistry.register_if(False) class TestObject(base.VersionedObject): VERSION = '1.1' fields = {'foo': fields.StringField(), 'bar': fields.StringField()} def obj_make_compatible(self, primitive, target_version): del primitive['bar'] obj = TestObject(foo='test1', bar='test2') prim = obj.obj_to_primitive('1.0') self.assertEqual(['foo'], prim['versioned_object.changes']) def test_delattr(self): obj = MyObj(bar='foo') del obj.bar # Should appear unset now self.assertFalse(obj.obj_attr_is_set('bar')) # Make sure post-delete, references trigger lazy loads self.assertEqual('loaded!', getattr(obj, 'bar')) def test_delattr_unset(self): obj = MyObj() self.assertRaises(AttributeError, delattr, obj, 'bar') def test_obj_make_compatible_on_list_base(self): @base.VersionedObjectRegistry.register_if(False) class MyList(base.ObjectListBase, base.VersionedObject): VERSION = '1.1' fields = {'objects': fields.ListOfObjectsField('MyObj')} childobj = MyObj(foo=1) listobj = MyList(objects=[childobj]) compat_func = 'obj_make_compatible_from_manifest' with mock.patch.object(childobj, compat_func) as mock_compat: listobj.obj_to_primitive(target_version='1.0') mock_compat.assert_called_once_with({'foo': 1}, '1.0', version_manifest=None) def test_comparable_objects(self): class NonVersionedObject(object): pass obj1 = MyComparableObj(foo=1) obj2 = MyComparableObj(foo=1) obj3 = MyComparableObj(foo=2) obj4 = NonVersionedObject() self.assertTrue(obj1 == obj2) self.assertFalse(obj1 == obj3) self.assertFalse(obj1 == obj4) self.assertNotEqual(obj1, None) def test_compound_clone(self): obj = MyCompoundObject() obj.foo = [1, 2, 3] obj.bar = {"a": 1, "b": 2, "c": 3} obj.baz = set([1, 2, 3]) copy = obj.obj_clone() self.assertEqual(obj.foo, copy.foo) self.assertEqual(obj.bar, copy.bar) self.assertEqual(obj.baz, copy.baz) # ensure that the cloned object still coerces values in its compounds copy.foo.append("4") copy.bar.update(d="4") copy.baz.add("4") self.assertEqual([1, 2, 3, 4], copy.foo) self.assertEqual({"a": 1, "b": 2, "c": 3, "d": 4}, copy.bar) self.assertEqual(set([1, 2, 3, 4]), copy.baz) def test_obj_list_fields_modifications(self): @base.VersionedObjectRegistry.register class ObjWithList(base.VersionedObject): fields = { 'list_field': fields.Field(fields.List(fields.Integer())), } obj = ObjWithList() def set_by_index(val): obj.list_field[0] = val def append(val): obj.list_field.append(val) def extend(val): obj.list_field.extend([val]) def add(val): obj.list_field = obj.list_field + [val] def iadd(val): """Test += corner case a=a+b and a+=b use different magic methods under the hood: first one calls __add__ which clones initial value before the assignment, second one call __iadd__ which modifies the initial list. Assignment should cause coercing in both cases, but __iadd__ may corrupt the initial value even if the assignment fails. So it should be overridden as well, and this test is needed to verify it """ obj.list_field += [val] def insert(val): obj.list_field.insert(0, val) def simple_slice(val): obj.list_field[:] = [val] def extended_slice(val): """Extended slice case Extended slice (and regular slices in py3) are handled differently thus needing a separate test """ obj.list_field[::2] = [val] # positive tests to ensure that coercing works properly obj.list_field = ["42"] set_by_index("1") append("2") extend("3") add("4") iadd("5") insert("0") self.assertEqual([0, 1, 2, 3, 4, 5], obj.list_field) simple_slice("10") self.assertEqual([10], obj.list_field) extended_slice("42") self.assertEqual([42], obj.list_field) obj.obj_reset_changes() # negative tests with non-coerceable values self.assertRaises(ValueError, set_by_index, "abc") self.assertRaises(ValueError, append, "abc") self.assertRaises(ValueError, extend, "abc") self.assertRaises(ValueError, add, "abc") self.assertRaises(ValueError, iadd, "abc") self.assertRaises(ValueError, insert, "abc") self.assertRaises(ValueError, simple_slice, "abc") self.assertRaises(ValueError, extended_slice, "abc") # ensure that nothing has been changed self.assertEqual([42], obj.list_field) self.assertEqual({}, obj.obj_get_changes()) def test_obj_dict_field_modifications(self): @base.VersionedObjectRegistry.register class ObjWithDict(base.VersionedObject): fields = { 'dict_field': fields.Field(fields.Dict(fields.Integer())), } obj = ObjWithDict() obj.dict_field = {"1": 1, "3": 3, "4": 4} def set_by_key(key, value): obj.dict_field[key] = value def add_by_key(key, value): obj.dict_field[key] = value def update_w_dict(key, value): obj.dict_field.update({key: value}) def update_w_kwargs(key, value): obj.dict_field.update(**{key: value}) def setdefault(key, value): obj.dict_field.setdefault(key, value) # positive tests to ensure that coercing works properly set_by_key("1", "10") add_by_key("2", "20") update_w_dict("3", "30") update_w_kwargs("4", "40") setdefault("5", "50") self.assertEqual({"1": 10, "2": 20, "3": 30, "4": 40, "5": 50}, obj.dict_field) obj.obj_reset_changes() # negative tests with non-coerceable values self.assertRaises(ValueError, set_by_key, "key", "abc") self.assertRaises(ValueError, add_by_key, "other", "abc") self.assertRaises(ValueError, update_w_dict, "key", "abc") self.assertRaises(ValueError, update_w_kwargs, "key", "abc") self.assertRaises(ValueError, setdefault, "other", "abc") # ensure that nothing has been changed self.assertEqual({"1": 10, "2": 20, "3": 30, "4": 40, "5": 50}, obj.dict_field) self.assertEqual({}, obj.obj_get_changes()) def test_obj_set_field_modifications(self): @base.VersionedObjectRegistry.register class ObjWithSet(base.VersionedObject): fields = { 'set_field': fields.Field(fields.Set(fields.Integer())) } obj = ObjWithSet() obj.set_field = set([42]) def add(value): obj.set_field.add(value) def update_w_set(value): obj.set_field.update(set([value])) def update_w_list(value): obj.set_field.update([value, value, value]) def sym_diff_upd(value): obj.set_field.symmetric_difference_update(set([value])) def union(value): obj.set_field = obj.set_field | set([value]) def iunion(value): obj.set_field |= set([value]) def xor(value): obj.set_field = obj.set_field ^ set([value]) def ixor(value): obj.set_field ^= set([value]) # positive tests to ensure that coercing works properly sym_diff_upd("42") add("1") update_w_list("2") update_w_set("3") union("4") iunion("5") xor("6") ixor("7") self.assertEqual(set([1, 2, 3, 4, 5, 6, 7]), obj.set_field) obj.set_field = set([42]) obj.obj_reset_changes() # negative tests with non-coerceable values self.assertRaises(ValueError, add, "abc") self.assertRaises(ValueError, update_w_list, "abc") self.assertRaises(ValueError, update_w_set, "abc") self.assertRaises(ValueError, sym_diff_upd, "abc") self.assertRaises(ValueError, union, "abc") self.assertRaises(ValueError, iunion, "abc") self.assertRaises(ValueError, xor, "abc") self.assertRaises(ValueError, ixor, "abc") # ensure that nothing has been changed self.assertEqual(set([42]), obj.set_field) self.assertEqual({}, obj.obj_get_changes()) class TestObject(_LocalTest, _TestObject): def test_set_defaults(self): obj = MyObj() obj.obj_set_defaults('foo') self.assertTrue(obj.obj_attr_is_set('foo')) self.assertEqual(1, obj.foo) def test_set_defaults_no_default(self): obj = MyObj() self.assertRaises(exception.ObjectActionError, obj.obj_set_defaults, 'bar') def test_set_all_defaults(self): obj = MyObj() obj.obj_set_defaults() self.assertEqual(set(['mutable_default', 'foo']), obj.obj_what_changed()) self.assertEqual(1, obj.foo) def test_set_defaults_not_overwrite(self): # NOTE(danms): deleted defaults to False, so verify that it does # not get reset by obj_set_defaults() obj = MyObj(deleted=True) obj.obj_set_defaults() self.assertEqual(1, obj.foo) self.assertTrue(obj.deleted) class TestRemoteObject(_RemoteTest, _TestObject): @mock.patch('oslo_versionedobjects.base.obj_tree_get_versions') def test_major_version_mismatch(self, mock_otgv): mock_otgv.return_value = {'MyObj': '2.0'} self.assertRaises(exception.IncompatibleObjectVersion, MyObj2.query, self.context) @mock.patch('oslo_versionedobjects.base.obj_tree_get_versions') def test_minor_version_greater(self, mock_otgv): mock_otgv.return_value = {'MyObj': '1.7'} self.assertRaises(exception.IncompatibleObjectVersion, MyObj2.query, self.context) @mock.patch('oslo_versionedobjects.base.obj_tree_get_versions') def test_minor_version_less(self, mock_otgv): mock_otgv.return_value = {'MyObj': '1.2'} obj = MyObj2.query(self.context) self.assertEqual(obj.bar, 'bar') @mock.patch('oslo_versionedobjects.base.obj_tree_get_versions') def test_compat(self, mock_otgv): mock_otgv.return_value = {'MyObj': '1.1'} obj = MyObj2.query(self.context) self.assertEqual('oldbar', obj.bar) @mock.patch('oslo_versionedobjects.base.obj_tree_get_versions') def test_revision_ignored(self, mock_otgv): mock_otgv.return_value = {'MyObj': '1.1.456'} obj = MyObj2.query(self.context) self.assertEqual('bar', obj.bar) def test_class_action_falls_back_compat(self): with mock.patch.object(base.VersionedObject, 'indirection_api') as ma: ma.object_class_action_versions.side_effect = NotImplementedError MyObj.query(self.context) ma.object_class_action.assert_called_once_with( self.context, 'MyObj', 'query', MyObj.VERSION, (), {}) class TestObjectListBase(test.TestCase): def test_list_like_operations(self): @base.VersionedObjectRegistry.register class MyElement(base.VersionedObject): fields = {'foo': fields.IntegerField()} def __init__(self, foo): super(MyElement, self).__init__() self.foo = foo class Foo(base.ObjectListBase, base.VersionedObject): fields = {'objects': fields.ListOfObjectsField('MyElement')} objlist = Foo(context='foo', objects=[MyElement(1), MyElement(2), MyElement(3)]) self.assertEqual(list(objlist), objlist.objects) self.assertEqual(len(objlist), 3) self.assertIn(objlist.objects[0], objlist) self.assertEqual(list(objlist[:1]), [objlist.objects[0]]) self.assertEqual(objlist[:1]._context, 'foo') self.assertEqual(objlist[2], objlist.objects[2]) self.assertEqual(objlist.count(objlist.objects[0]), 1) self.assertEqual(objlist.index(objlist.objects[1]), 1) objlist.sort(key=lambda x: x.foo, reverse=True) self.assertEqual([3, 2, 1], [x.foo for x in objlist]) def test_serialization(self): @base.VersionedObjectRegistry.register class Foo(base.ObjectListBase, base.VersionedObject): fields = {'objects': fields.ListOfObjectsField('Bar')} @base.VersionedObjectRegistry.register class Bar(base.VersionedObject): fields = {'foo': fields.Field(fields.String())} obj = Foo(objects=[]) for i in 'abc': bar = Bar(foo=i) obj.objects.append(bar) obj2 = base.VersionedObject.obj_from_primitive(obj.obj_to_primitive()) self.assertFalse(obj is obj2) self.assertEqual([x.foo for x in obj], [y.foo for y in obj2]) def _test_object_list_version_mappings(self, list_obj_class): # Figure out what sort of object this list is for list_field = list_obj_class.fields['objects'] item_obj_field = list_field._type._element_type item_obj_name = item_obj_field._type._obj_name # Look through all object classes of this type and make sure that # the versions we find are covered by the parent list class obj_classes = base.VersionedObjectRegistry.obj_classes()[item_obj_name] for item_class in obj_classes: if is_test_object(item_class): continue self.assertIn( item_class.VERSION, list_obj_class.child_versions.values(), 'Version mapping is incomplete for %s' % ( list_obj_class.__name__)) def test_object_version_mappings(self): self.skipTest('this needs to be generalized') # Find all object list classes and make sure that they at least handle # all the current object versions for obj_classes in base.VersionedObjectRegistry.obj_classes().values(): for obj_class in obj_classes: if issubclass(obj_class, base.ObjectListBase): self._test_object_list_version_mappings(obj_class) def test_obj_make_compatible_child_versions(self): @base.VersionedObjectRegistry.register class MyElement(base.VersionedObject): fields = {'foo': fields.IntegerField()} @base.VersionedObjectRegistry.register class Foo(base.ObjectListBase, base.VersionedObject): VERSION = '1.1' fields = {'objects': fields.ListOfObjectsField('MyElement')} child_versions = {'1.0': '1.0', '1.1': '1.0'} subobj = MyElement(foo=1) obj = Foo(objects=[subobj]) primitive = obj.obj_to_primitive()['versioned_object.data'] with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: obj.obj_make_compatible(copy.copy(primitive), '1.1') self.assertTrue(mock_compat.called) def test_obj_make_compatible_obj_relationships(self): @base.VersionedObjectRegistry.register class MyElement(base.VersionedObject): fields = {'foo': fields.IntegerField()} @base.VersionedObjectRegistry.register class Bar(base.ObjectListBase, base.VersionedObject): VERSION = '1.1' fields = {'objects': fields.ListOfObjectsField('MyElement')} obj_relationships = { 'objects': [('1.0', '1.0'), ('1.1', '1.0')] } subobj = MyElement(foo=1) obj = Bar(objects=[subobj]) primitive = obj.obj_to_primitive()['versioned_object.data'] with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: obj.obj_make_compatible(copy.copy(primitive), '1.1') self.assertTrue(mock_compat.called) def test_obj_make_compatible_no_relationships(self): @base.VersionedObjectRegistry.register class MyElement(base.VersionedObject): fields = {'foo': fields.IntegerField()} @base.VersionedObjectRegistry.register class Baz(base.ObjectListBase, base.VersionedObject): VERSION = '1.1' fields = {'objects': fields.ListOfObjectsField('MyElement')} subobj = MyElement(foo=1) obj = Baz(objects=[subobj]) primitive = obj.obj_to_primitive()['versioned_object.data'] with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: obj.obj_make_compatible(copy.copy(primitive), '1.1') self.assertTrue(mock_compat.called) def test_list_changes(self): @base.VersionedObjectRegistry.register class Foo(base.ObjectListBase, base.VersionedObject): fields = {'objects': fields.ListOfObjectsField('Bar')} @base.VersionedObjectRegistry.register class Bar(base.VersionedObject): fields = {'foo': fields.StringField()} obj = Foo(objects=[]) self.assertEqual(set(['objects']), obj.obj_what_changed()) obj.objects.append(Bar(foo='test')) self.assertEqual(set(['objects']), obj.obj_what_changed()) obj.obj_reset_changes() # This should still look dirty because the child is dirty self.assertEqual(set(['objects']), obj.obj_what_changed()) obj.objects[0].obj_reset_changes() # This should now look clean because the child is clean self.assertEqual(set(), obj.obj_what_changed()) def test_initialize_objects(self): class Foo(base.ObjectListBase, base.VersionedObject): fields = {'objects': fields.ListOfObjectsField('Bar')} class Bar(base.VersionedObject): fields = {'foo': fields.StringField()} obj = Foo() self.assertEqual([], obj.objects) self.assertEqual(set(), obj.obj_what_changed()) def test_obj_repr(self): @base.VersionedObjectRegistry.register class Foo(base.ObjectListBase, base.VersionedObject): fields = {'objects': fields.ListOfObjectsField('Bar')} @base.VersionedObjectRegistry.register class Bar(base.VersionedObject): fields = {'uuid': fields.StringField()} obj = Foo(objects=[Bar(uuid='fake-uuid')]) self.assertEqual('Foo(objects=[Bar(fake-uuid)])', repr(obj)) class TestObjectSerializer(_BaseTestCase): def test_serialize_entity_primitive(self): ser = base.VersionedObjectSerializer() for thing in (1, 'foo', [1, 2], {'foo': 'bar'}): self.assertEqual(thing, ser.serialize_entity(None, thing)) def test_deserialize_entity_primitive(self): ser = base.VersionedObjectSerializer() for thing in (1, 'foo', [1, 2], {'foo': 'bar'}): self.assertEqual(thing, ser.deserialize_entity(None, thing)) def test_serialize_set_to_list(self): ser = base.VersionedObjectSerializer() self.assertEqual([1, 2], ser.serialize_entity(None, set([1, 2]))) @mock.patch('oslo_versionedobjects.base.VersionedObject.indirection_api') def _test_deserialize_entity_newer(self, obj_version, backported_to, mock_iapi, my_version='1.6'): ser = base.VersionedObjectSerializer() mock_iapi.object_backport_versions.return_value = 'backported' @base.VersionedObjectRegistry.register class MyTestObj(MyObj): VERSION = my_version obj = MyTestObj() obj.VERSION = obj_version primitive = obj.obj_to_primitive() result = ser.deserialize_entity(self.context, primitive) if backported_to is None: self.assertFalse(mock_iapi.object_backport_versions.called) else: self.assertEqual('backported', result) mock_iapi.object_backport_versions.assert_called_with( self.context, primitive, {'MyTestObj': my_version, 'MyOwnedObject': '1.0'}) def test_deserialize_entity_newer_version_backports(self): self._test_deserialize_entity_newer('1.25', '1.6') def test_deserialize_entity_newer_revision_does_not_backport_zero(self): self._test_deserialize_entity_newer('1.6.0', None) def test_deserialize_entity_newer_revision_does_not_backport(self): self._test_deserialize_entity_newer('1.6.1', None) def test_deserialize_entity_newer_version_passes_revision(self): self._test_deserialize_entity_newer('1.7', '1.6.1', my_version='1.6.1') def test_deserialize_dot_z_with_extra_stuff(self): primitive = {'versioned_object.name': 'MyObj', 'versioned_object.namespace': 'versionedobjects', 'versioned_object.version': '1.6.1', 'versioned_object.data': { 'foo': 1, 'unexpected_thing': 'foobar'}} ser = base.VersionedObjectSerializer() obj = ser.deserialize_entity(self.context, primitive) self.assertEqual(1, obj.foo) self.assertFalse(hasattr(obj, 'unexpected_thing')) # NOTE(danms): The serializer is where the logic lives that # avoids backports for cases where only a .z difference in # the received object version is detected. As a result, we # end up with a version of what we expected, effectively the # .0 of the object. self.assertEqual('1.6', obj.VERSION) def test_deserialize_entity_newer_version_no_indirection(self): ser = base.VersionedObjectSerializer() obj = MyObj() obj.VERSION = '1.25' primitive = obj.obj_to_primitive() self.assertRaises(exception.IncompatibleObjectVersion, ser.deserialize_entity, self.context, primitive) def _test_nested_backport(self, old): @base.VersionedObjectRegistry.register class Parent(base.VersionedObject): VERSION = '1.0' fields = { 'child': fields.ObjectField('MyObj'), } @base.VersionedObjectRegistry.register # noqa class Parent(base.VersionedObject): # noqa VERSION = '1.1' fields = { 'child': fields.ObjectField('MyObj'), } child = MyObj(foo=1) parent = Parent(child=child) prim = parent.obj_to_primitive() child_prim = prim['versioned_object.data']['child'] child_prim['versioned_object.version'] = '1.10' ser = base.VersionedObjectSerializer() with mock.patch.object(base.VersionedObject, 'indirection_api') as a: if old: a.object_backport_versions.side_effect = NotImplementedError ser.deserialize_entity(self.context, prim) a.object_backport_versions.assert_called_once_with( self.context, prim, {'Parent': '1.1', 'MyObj': '1.6', 'MyOwnedObject': '1.0'}) if old: # NOTE(danms): This should be the version of the parent object, # not the child. If wrong, this will be '1.6', which is the max # child version in our registry. a.object_backport.assert_called_once_with( self.context, prim, '1.1') def test_nested_backport_new_method(self): self._test_nested_backport(old=False) def test_nested_backport_old_method(self): self._test_nested_backport(old=True) def test_object_serialization(self): ser = base.VersionedObjectSerializer() obj = MyObj() primitive = ser.serialize_entity(self.context, obj) self.assertIn('versioned_object.name', primitive) obj2 = ser.deserialize_entity(self.context, primitive) self.assertIsInstance(obj2, MyObj) self.assertEqual(self.context, obj2._context) def test_object_serialization_iterables(self): ser = base.VersionedObjectSerializer() obj = MyObj() for iterable in (list, tuple, set): thing = iterable([obj]) primitive = ser.serialize_entity(self.context, thing) self.assertEqual(1, len(primitive)) for item in primitive: self.assertNotIsInstance(item, base.VersionedObject) thing2 = ser.deserialize_entity(self.context, primitive) self.assertEqual(1, len(thing2)) for item in thing2: self.assertIsInstance(item, MyObj) # dict case thing = {'key': obj} primitive = ser.serialize_entity(self.context, thing) self.assertEqual(1, len(primitive)) for item in primitive.values(): self.assertNotIsInstance(item, base.VersionedObject) thing2 = ser.deserialize_entity(self.context, primitive) self.assertEqual(1, len(thing2)) for item in thing2.values(): self.assertIsInstance(item, MyObj) # object-action updates dict case thing = {'foo': obj.obj_to_primitive()} primitive = ser.serialize_entity(self.context, thing) self.assertEqual(thing, primitive) thing2 = ser.deserialize_entity(self.context, thing) self.assertIsInstance(thing2['foo'], base.VersionedObject) def test_serializer_subclass_namespace(self): @base.VersionedObjectRegistry.register class MyNSObj(base.VersionedObject): OBJ_SERIAL_NAMESPACE = 'foo' fields = {'foo': fields.IntegerField()} class MySerializer(base.VersionedObjectSerializer): OBJ_BASE_CLASS = MyNSObj ser = MySerializer() obj = MyNSObj(foo=123) obj2 = ser.deserialize_entity(None, ser.serialize_entity(None, obj)) self.assertIsInstance(obj2, MyNSObj) self.assertEqual(obj.foo, obj2.foo) def test_serializer_subclass_namespace_mismatch(self): @base.VersionedObjectRegistry.register class MyNSObj(base.VersionedObject): OBJ_SERIAL_NAMESPACE = 'foo' fields = {'foo': fields.IntegerField()} class MySerializer(base.VersionedObjectSerializer): OBJ_BASE_CLASS = MyNSObj myser = MySerializer() voser = base.VersionedObjectSerializer() obj = MyObj(foo=123) obj2 = myser.deserialize_entity(None, voser.serialize_entity(None, obj)) # NOTE(danms): The new serializer should have ignored the objects # serialized by the base serializer, so obj2 here should be a dict # primitive and not a hydrated object self.assertNotIsInstance(obj2, MyNSObj) self.assertIn('versioned_object.name', obj2) def test_serializer_subclass_base_object_indirection(self): @base.VersionedObjectRegistry.register class MyNSObj(base.VersionedObject): OBJ_SERIAL_NAMESPACE = 'foo' fields = {'foo': fields.IntegerField()} indirection_api = mock.MagicMock() class MySerializer(base.VersionedObjectSerializer): OBJ_BASE_CLASS = MyNSObj ser = MySerializer() prim = MyNSObj(foo=1).obj_to_primitive() prim['foo.version'] = '2.0' ser.deserialize_entity(mock.sentinel.context, prim) indirection_api = MyNSObj.indirection_api indirection_api.object_backport_versions.assert_called_once_with( mock.sentinel.context, prim, {'MyNSObj': '1.0'}) @mock.patch('oslo_versionedobjects.base.VersionedObject.indirection_api') def test_serializer_calls_old_backport_interface(self, indirection_api): @base.VersionedObjectRegistry.register class MyOldObj(base.VersionedObject): pass ser = base.VersionedObjectSerializer() prim = MyOldObj(foo=1).obj_to_primitive() prim['versioned_object.version'] = '2.0' indirection_api.object_backport_versions.side_effect = ( NotImplementedError('Old')) ser.deserialize_entity(mock.sentinel.context, prim) indirection_api.object_backport.assert_called_once_with( mock.sentinel.context, prim, '1.0') class TestSchemaGeneration(test.TestCase): @base.VersionedObjectRegistry.register class FakeObject(base.VersionedObject): fields = { 'a_boolean': fields.BooleanField(nullable=True), } @base.VersionedObjectRegistry.register class FakeComplexObject(base.VersionedObject): fields = { 'a_dict': fields.DictOfListOfStringsField(), 'an_obj': fields.ObjectField('FakeObject', nullable=True), 'list_of_objs': fields.ListOfObjectsField('FakeObject'), } def test_to_json_schema(self): schema = self.FakeObject.to_json_schema() self.assertEqual({ '$schema': 'http://json-schema.org/draft-04/schema#', 'title': 'FakeObject', 'type': ['object'], 'properties': { 'versioned_object.namespace': { 'type': 'string' }, 'versioned_object.name': { 'type': 'string' }, 'versioned_object.version': { 'type': 'string' }, 'versioned_object.changes': { 'type': 'array', 'items': { 'type': 'string' } }, 'versioned_object.data': { 'type': 'object', 'description': 'fields of FakeObject', 'properties': { 'a_boolean': { 'readonly': False, 'type': ['boolean', 'null']}, }, }, }, 'required': ['versioned_object.namespace', 'versioned_object.name', 'versioned_object.version', 'versioned_object.data'] }, schema) jsonschema.validate(self.FakeObject(a_boolean=True).obj_to_primitive(), self.FakeObject.to_json_schema()) def test_to_json_schema_complex_object(self): schema = self.FakeComplexObject.to_json_schema() expected_schema = { '$schema': 'http://json-schema.org/draft-04/schema#', 'properties': { 'versioned_object.changes': {'items': {'type': 'string'}, 'type': 'array'}, 'versioned_object.data': { 'description': 'fields of FakeComplexObject', 'properties': { 'a_dict': { 'readonly': False, 'type': ['object'], 'additionalProperties': { 'type': ['array'], 'readonly': False, 'items': { 'type': ['string'], 'readonly': False}}}, 'an_obj': { 'properties': { 'versioned_object.changes': {'items': {'type': 'string'}, 'type': 'array'}, 'versioned_object.data': { 'description': 'fields of FakeObject', 'properties': {'a_boolean': {'readonly': False, 'type': ['boolean', 'null']}}, 'type': 'object'}, 'versioned_object.name': {'type': 'string'}, 'versioned_object.namespace': {'type': 'string'}, 'versioned_object.version': {'type': 'string'}}, 'readonly': False, 'required': ['versioned_object.namespace', 'versioned_object.name', 'versioned_object.version', 'versioned_object.data'], 'type': ['object', 'null']}, 'list_of_objs': { 'items': { 'properties': { 'versioned_object.changes': {'items': {'type': 'string'}, 'type': 'array'}, 'versioned_object.data': { 'description': 'fields of FakeObject', 'properties': { 'a_boolean': { 'readonly': False, 'type': ['boolean', 'null']}}, 'type': 'object'}, 'versioned_object.name': {'type': 'string'}, 'versioned_object.namespace': {'type': 'string'}, 'versioned_object.version': {'type': 'string'}}, 'readonly': False, 'required': ['versioned_object.namespace', 'versioned_object.name', 'versioned_object.version', 'versioned_object.data'], 'type': ['object']}, 'readonly': False, 'type': ['array']}}, 'required': ['a_dict', 'list_of_objs'], 'type': 'object'}, 'versioned_object.name': {'type': 'string'}, 'versioned_object.namespace': {'type': 'string'}, 'versioned_object.version': {'type': 'string'}}, 'required': ['versioned_object.namespace', 'versioned_object.name', 'versioned_object.version', 'versioned_object.data'], 'title': 'FakeComplexObject', 'type': ['object']} self.assertEqual(expected_schema, schema) fake_obj = self.FakeComplexObject( a_dict={'key1': ['foo', 'bar'], 'key2': ['bar', 'baz']}, an_obj=self.FakeObject(a_boolean=True), list_of_objs=[self.FakeObject(a_boolean=False), self.FakeObject(a_boolean=True), self.FakeObject(a_boolean=False)]) primitives = fake_obj.obj_to_primitive() jsonschema.validate(primitives, schema) class TestNamespaceCompatibility(test.TestCase): def setUp(self): super(TestNamespaceCompatibility, self).setUp() @base.VersionedObjectRegistry.register_if(False) class TestObject(base.VersionedObject): OBJ_SERIAL_NAMESPACE = 'foo' OBJ_PROJECT_NAMESPACE = 'tests' self.test_class = TestObject def test_obj_primitive_key(self): self.assertEqual('foo.data', self.test_class._obj_primitive_key('data')) def test_obj_primitive_field(self): primitive = { 'foo.data': mock.sentinel.data, } self.assertEqual(mock.sentinel.data, self.test_class._obj_primitive_field(primitive, 'data')) def test_obj_primitive_field_namespace(self): primitive = { 'foo.name': 'TestObject', 'foo.namespace': 'tests', 'foo.version': '1.0', 'foo.data': {}, } with mock.patch.object(self.test_class, 'obj_class_from_name'): self.test_class.obj_from_primitive(primitive) def test_obj_primitive_field_namespace_wrong(self): primitive = { 'foo.name': 'TestObject', 'foo.namespace': 'wrong', 'foo.version': '1.0', 'foo.data': {}, } self.assertRaises(exception.UnsupportedObjectError, self.test_class.obj_from_primitive, primitive) class TestUtilityMethods(test.TestCase): def test_flat(self): @base.VersionedObjectRegistry.register class TestObject(base.VersionedObject): VERSION = '1.23' fields = {} tree = base.obj_tree_get_versions('TestObject') self.assertEqual({'TestObject': '1.23'}, tree) def test_parent_child(self): @base.VersionedObjectRegistry.register class TestChild(base.VersionedObject): VERSION = '2.34' @base.VersionedObjectRegistry.register class TestObject(base.VersionedObject): VERSION = '1.23' fields = { 'child': fields.ObjectField('TestChild'), } tree = base.obj_tree_get_versions('TestObject') self.assertEqual({'TestObject': '1.23', 'TestChild': '2.34'}, tree) def test_complex(self): @base.VersionedObjectRegistry.register class TestChild(base.VersionedObject): VERSION = '2.34' @base.VersionedObjectRegistry.register class TestChildTwo(base.VersionedObject): VERSION = '4.56' fields = { 'sibling': fields.ObjectField('TestChild'), } @base.VersionedObjectRegistry.register class TestObject(base.VersionedObject): VERSION = '1.23' fields = { 'child': fields.ObjectField('TestChild'), 'childtwo': fields.ListOfObjectsField('TestChildTwo'), } tree = base.obj_tree_get_versions('TestObject') self.assertEqual({'TestObject': '1.23', 'TestChild': '2.34', 'TestChildTwo': '4.56'}, tree) def test_complex_loopy(self): @base.VersionedObjectRegistry.register class TestChild(base.VersionedObject): VERSION = '2.34' fields = { 'sibling': fields.ObjectField('TestChildTwo'), } @base.VersionedObjectRegistry.register class TestChildTwo(base.VersionedObject): VERSION = '4.56' fields = { 'sibling': fields.ObjectField('TestChild'), 'parents': fields.ListOfObjectsField('TestObject'), } @base.VersionedObjectRegistry.register class TestObject(base.VersionedObject): VERSION = '1.23' fields = { 'child': fields.ObjectField('TestChild'), 'childtwo': fields.ListOfObjectsField('TestChildTwo'), } tree = base.obj_tree_get_versions('TestObject') self.assertEqual({'TestObject': '1.23', 'TestChild': '2.34', 'TestChildTwo': '4.56'}, tree) def test_missing_referenced(self): """Ensure a missing child object is highlighted.""" @base.VersionedObjectRegistry.register class TestObjectFoo(base.VersionedObject): VERSION = '1.23' fields = { # note that this object does not exist 'child': fields.ObjectField('TestChildBar'), } exc = self.assertRaises(exception.UnregisteredSubobject, base.obj_tree_get_versions, 'TestObjectFoo') self.assertIn('TestChildBar is referenced by TestObjectFoo', exc.format_message()) class TestListObjectConcat(test.TestCase): def test_list_object_concat(self): @base.VersionedObjectRegistry.register_if(False) class MyList(base.ObjectListBase, base.VersionedObject): fields = {'objects': fields.ListOfObjectsField('MyOwnedObject')} values = [1, 2, 42] list1 = MyList(objects=[MyOwnedObject(baz=values[0]), MyOwnedObject(baz=values[1])]) list2 = MyList(objects=[MyOwnedObject(baz=values[2])]) concat_list = list1 + list2 for idx, obj in enumerate(concat_list): self.assertEqual(values[idx], obj.baz) # Assert that the original lists are unmodified self.assertEqual(2, len(list1.objects)) self.assertEqual(1, list1.objects[0].baz) self.assertEqual(2, list1.objects[1].baz) self.assertEqual(1, len(list2.objects)) self.assertEqual(42, list2.objects[0].baz) def test_list_object_concat_fails_different_objects(self): @base.VersionedObjectRegistry.register_if(False) class MyList(base.ObjectListBase, base.VersionedObject): fields = {'objects': fields.ListOfObjectsField('MyOwnedObject')} @base.VersionedObjectRegistry.register_if(False) class MyList2(base.ObjectListBase, base.VersionedObject): fields = {'objects': fields.ListOfObjectsField('MyOwnedObject')} list1 = MyList(objects=[MyOwnedObject(baz=1)]) list2 = MyList2(objects=[MyOwnedObject(baz=2)]) def add(x, y): return x + y self.assertRaises(TypeError, add, list1, list2) # Assert that the original lists are unmodified self.assertEqual(1, len(list1.objects)) self.assertEqual(1, len(list2.objects)) self.assertEqual(1, list1.objects[0].baz) self.assertEqual(2, list2.objects[0].baz) def test_list_object_concat_fails_extra_fields(self): @base.VersionedObjectRegistry.register_if(False) class MyList(base.ObjectListBase, base.VersionedObject): fields = {'objects': fields.ListOfObjectsField('MyOwnedObject'), 'foo': fields.IntegerField(nullable=True)} list1 = MyList(objects=[MyOwnedObject(baz=1)]) list2 = MyList(objects=[MyOwnedObject(baz=2)]) def add(x, y): return x + y self.assertRaises(TypeError, add, list1, list2) # Assert that the original lists are unmodified self.assertEqual(1, len(list1.objects)) self.assertEqual(1, len(list2.objects)) self.assertEqual(1, list1.objects[0].baz) self.assertEqual(2, list2.objects[0].baz) def test_builtin_list_add_fails(self): @base.VersionedObjectRegistry.register_if(False) class MyList(base.ObjectListBase, base.VersionedObject): fields = {'objects': fields.ListOfObjectsField('MyOwnedObject')} list1 = MyList(objects=[MyOwnedObject(baz=1)]) def add(obj): return obj + [] self.assertRaises(TypeError, add, list1) def test_builtin_list_radd_fails(self): @base.VersionedObjectRegistry.register_if(False) class MyList(base.ObjectListBase, base.VersionedObject): fields = {'objects': fields.ListOfObjectsField('MyOwnedObject')} list1 = MyList(objects=[MyOwnedObject(baz=1)]) def add(obj): return [] + obj self.assertRaises(TypeError, add, list1) class TestTimestampedObject(test.TestCase): """Test TimestampedObject mixin. Do this by creating an object that uses the mixin and confirm that the added fields are there and in fact behaves as the DateTimeFields we desire. """ def setUp(self): super(TestTimestampedObject, self).setUp() @base.VersionedObjectRegistry.register_if(False) class MyTimestampedObject(base.VersionedObject, base.TimestampedObject): fields = { 'field1': fields.Field(fields.String()), } self.myclass = MyTimestampedObject self.my_object = self.myclass(field1='field1') def test_timestamped_has_fields(self): self.assertEqual('field1', self.my_object.field1) self.assertIn('updated_at', self.my_object.fields) self.assertIn('created_at', self.my_object.fields) def test_timestamped_holds_timestamps(self): now = timeutils.utcnow(with_timezone=True) self.my_object.updated_at = now self.my_object.created_at = now self.assertEqual(now, self.my_object.updated_at) self.assertEqual(now, self.my_object.created_at) def test_timestamped_rejects_not_timestamps(self): with testtools.ExpectedException(ValueError, '.*parse date.*'): self.my_object.updated_at = 'a string' with testtools.ExpectedException(ValueError, '.*parse date.*'): self.my_object.created_at = 'a string' ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.850407 oslo.versionedobjects-3.4.0/releasenotes/0000775000175000017500000000000000000000000020555 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.862407 oslo.versionedobjects-3.4.0/releasenotes/notes/0000775000175000017500000000000000000000000021705 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/notes/add-reno-996dd44974d53238.yaml0000664000175000017500000000007200000000000026303 0ustar00zuulzuul00000000000000--- other: - Introduce reno for deployer release notes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/notes/drop-python27-support-b3e377b0dcfa4f5c.yaml0000664000175000017500000000017700000000000031376 0ustar00zuulzuul00000000000000--- upgrade: - | Support for Python 2.7 has been dropped. The minimum version of Python now supported is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/notes/update_md5_for_fips-e5a8f8f438ac81fb.yaml0000664000175000017500000000022700000000000031100 0ustar00zuulzuul00000000000000--- features: - Updated _get_fingerprint to use new oslo.utils encapsulation of md5 to allow md5 hashes to be returned on a FIPS enabled system. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.866407 oslo.versionedobjects-3.4.0/releasenotes/source/0000775000175000017500000000000000000000000022055 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/2023.1.rst0000664000175000017500000000020200000000000023326 0ustar00zuulzuul00000000000000=========================== 2023.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/2023.2.rst0000664000175000017500000000020200000000000023327 0ustar00zuulzuul00000000000000=========================== 2023.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/2024.1.rst0000664000175000017500000000020200000000000023327 0ustar00zuulzuul00000000000000=========================== 2024.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.1 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.866407 oslo.versionedobjects-3.4.0/releasenotes/source/_static/0000775000175000017500000000000000000000000023503 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000000000000000025754 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.866407 oslo.versionedobjects-3.4.0/releasenotes/source/_templates/0000775000175000017500000000000000000000000024212 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/_templates/.placeholder0000664000175000017500000000000000000000000026463 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/conf.py0000664000175000017500000002154600000000000023364 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/oslo.versionedobjects' openstackdocs_bug_project = 'oslo.versionedobjects' openstackdocs_bug_tag = '' openstackdocs_auto_name = False # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'oslo.versionedobjects Release Notes' copyright = '2016, oslo.versionedobjects Developers' # Release notes do not need a version in the title, they span # multiple versions. # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'oslo.versionedobjectsReleaseNotesDoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'oslo.versionedobjectsReleaseNotes.tex', 'oslo.versionedobjects Release Notes Documentation', 'oslo.versionedobjects Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'oslo.versionedobjectsReleaseNotes', 'oslo.versionedobjects Release Notes Documentation', ['oslo.versionedobjects Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'oslo.versionedobjectsReleaseNotes', 'oslo.versionedobjects Release Notes Documentation', 'oslo.versionedobjects Developers', 'oslo.versionedobjectsReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/index.rst0000664000175000017500000000045200000000000023717 0ustar00zuulzuul00000000000000===================================== oslo.versionedobjects Release Notes ===================================== .. toctree:: :maxdepth: 1 unreleased 2024.1 2023.2 2023.1 zed yoga xena wallaby victoria ussuri train stein rocky queens pike ocata ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.850407 oslo.versionedobjects-3.4.0/releasenotes/source/locale/0000775000175000017500000000000000000000000023314 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.850407 oslo.versionedobjects-3.4.0/releasenotes/source/locale/en_GB/0000775000175000017500000000000000000000000024266 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.866407 oslo.versionedobjects-3.4.0/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000026053 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000664000175000017500000000520700000000000031110 0ustar00zuulzuul00000000000000# Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2020. #zanata # Andi Chandler , 2022. #zanata # Andi Chandler , 2023. #zanata msgid "" msgstr "" "Project-Id-Version: oslo.versionedobjects Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2023-05-08 11:19+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2023-06-21 08:08+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "1.19.0" msgstr "1.19.0" msgid "2.0.0" msgstr "2.0.0" msgid "2.4.0" msgstr "2.4.0" msgid "2023.1 Series Release Notes" msgstr "2023.1 Series Release Notes" msgid "Current Series Release Notes" msgstr "Current Series Release Notes" msgid "Introduce reno for deployer release notes." msgstr "Introduce Reno for deployer release notes." msgid "New Features" msgstr "New Features" msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "Other Notes" msgstr "Other Notes" msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "Queens Series Release Notes" msgstr "Queens Series Release Notes" msgid "Rocky Series Release Notes" msgstr "Rocky Series Release Notes" msgid "Stein Series Release Notes" msgstr "Stein Series Release Notes" msgid "" "Support for Python 2.7 has been dropped. The minimum version of Python now " "supported is Python 3.6." msgstr "" "Support for Python 2.7 has been dropped. The minimum version of Python now " "supported is Python 3.6." msgid "Train Series Release Notes" msgstr "Train Series Release Notes" msgid "" "Updated _get_fingerprint to use new oslo.utils encapsulation of md5 to allow " "md5 hashes to be returned on a FIPS enabled system." msgstr "" "Updated _get_fingerprint to use new oslo.utils encapsulation of MD5 to allow " "MD5 hashes to be returned on a FIPS-enabled system." msgid "Upgrade Notes" msgstr "Upgrade Notes" msgid "Ussuri Series Release Notes" msgstr "Ussuri Series Release Notes" msgid "Victoria Series Release Notes" msgstr "Victoria Series Release Notes" msgid "Wallaby Series Release Notes" msgstr "Wallaby Series Release Notes" msgid "Xena Series Release Notes" msgstr "Xena Series Release Notes" msgid "Yoga Series Release Notes" msgstr "Yoga Series Release Notes" msgid "Zed Series Release Notes" msgstr "Zed Series Release Notes" msgid "oslo.versionedobjects Release Notes" msgstr "oslo.versionedobjects Release Notes" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/ocata.rst0000664000175000017500000000023000000000000023671 0ustar00zuulzuul00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/pike.rst0000664000175000017500000000021700000000000023537 0ustar00zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/queens.rst0000664000175000017500000000022300000000000024104 0ustar00zuulzuul00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/rocky.rst0000664000175000017500000000022100000000000023731 0ustar00zuulzuul00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/stein.rst0000664000175000017500000000022100000000000023724 0ustar00zuulzuul00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/train.rst0000664000175000017500000000017600000000000023730 0ustar00zuulzuul00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/unreleased.rst0000664000175000017500000000016000000000000024733 0ustar00zuulzuul00000000000000============================== Current Series Release Notes ============================== .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/ussuri.rst0000664000175000017500000000020200000000000024133 0ustar00zuulzuul00000000000000=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/victoria.rst0000664000175000017500000000022000000000000024421 0ustar00zuulzuul00000000000000============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: unmaintained/victoria ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/wallaby.rst0000664000175000017500000000021400000000000024237 0ustar00zuulzuul00000000000000============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: unmaintained/wallaby ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/xena.rst0000664000175000017500000000020000000000000023532 0ustar00zuulzuul00000000000000========================= Xena Series Release Notes ========================= .. release-notes:: :branch: unmaintained/xena ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/yoga.rst0000664000175000017500000000020000000000000023536 0ustar00zuulzuul00000000000000========================= Yoga Series Release Notes ========================= .. release-notes:: :branch: unmaintained/yoga ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/releasenotes/source/zed.rst0000664000175000017500000000017400000000000023373 0ustar00zuulzuul00000000000000======================== Zed Series Release Notes ======================== .. release-notes:: :branch: unmaintained/zed ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/requirements.txt0000664000175000017500000000051700000000000021353 0ustar00zuulzuul00000000000000oslo.concurrency>=3.26.0 # Apache-2.0 oslo.config>=5.2.0 # Apache-2.0 oslo.context>=2.19.2 # Apache-2.0 oslo.messaging>=5.29.0 # Apache-2.0 oslo.serialization>=2.18.0 # Apache-2.0 oslo.utils>=4.7.0 # Apache-2.0 iso8601>=0.1.11 # MIT oslo.log>=3.36.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 WebOb>=1.7.1 # MIT netaddr>=0.7.18 # BSD ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1716452798.866407 oslo.versionedobjects-3.4.0/setup.cfg0000664000175000017500000000176500000000000017716 0ustar00zuulzuul00000000000000[metadata] name = oslo.versionedobjects summary = Oslo Versioned Objects library description_file = README.rst author = OpenStack author_email = openstack-discuss@lists.openstack.org home_page = https://docs.openstack.org/oslo.versionedobjects/latest/ python_requires = >=3.8 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3 :: Only Programming Language :: Python :: Implementation :: CPython [files] packages = oslo_versionedobjects [entry_points] oslo.config.opts = oslo.versionedobjects = oslo_versionedobjects._options:list_opts [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/setup.py0000664000175000017500000000127100000000000017577 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/test-requirements.txt0000664000175000017500000000044200000000000022325 0ustar00zuulzuul00000000000000hacking>=6.1.0,<6.2.0 # Apache-2.0 oslotest>=3.2.0 # Apache-2.0 testtools>=2.2.0 # MIT coverage>=4.0 # Apache-2.0 jsonschema>=3.2.0 # MIT stestr>=2.0.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD # Bandit security code scanner bandit>=1.7.0,<1.8.0 # Apache-2.0 pre-commit>=2.6.0 # MIT ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1716452752.0 oslo.versionedobjects-3.4.0/tox.ini0000664000175000017500000000252000000000000017376 0ustar00zuulzuul00000000000000[tox] minversion = 3.18.0 envlist = py3,pep8 basepython = python3 ignore_basepython_conflict = true [testenv] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/test-requirements.txt commands = stestr run --slowest {posargs} [testenv:pep8] commands = pre-commit run -a # Run security linter bandit -r oslo_versionedobjects tests -n5 --skip B303 [testenv:venv] commands = {posargs} [testenv:cover] commands = python setup.py test --coverage --coverage-package-name=oslo_versionedobjects --testr-args='{posargs}' [testenv:docs] allowlist_externals = rm deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt commands = rm -fr doc/build sphinx-build -W --keep-going -b html doc/source doc/build/html [testenv:releasenotes] allowlist_externals = rm deps = {[testenv:docs]deps} commands = rm -rf releasenotes/build sphinx-build -a -E -W -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html [flake8] # E123, E125 skipped as they are invalid PEP-8. # W504 skipped as you must choose this or W503 show-source = True ignore = E123,E125,W504 builtins = _ exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build [hacking] import_exceptions = oslo_versionedobjects._i18n