././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1720086402.548608 oslo.db-16.0.0/0000775000175000017500000000000000000000000013161 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/.coveragerc0000664000175000017500000000015100000000000015277 0ustar00zuulzuul00000000000000[run] branch = True source = oslo_db omit = oslo_db/tests/* [report] ignore_errors = True precision = 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/.mailmap0000664000175000017500000000013000000000000014574 0ustar00zuulzuul00000000000000# Format is: # # ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/.pre-commit-config.yaml0000664000175000017500000000252300000000000017444 0ustar00zuulzuul00000000000000# We from the Oslo project decided to pin repos based on the # commit hash instead of the version tag to prevend arbitrary # code from running in developer's machines. To update to a # newer version, run `pre-commit autoupdate` and then replace # the newer versions with their commit hash. default_language_version: python: python3 repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: 9136088a246768144165fcc3ecc3d31bb686920a # v3.3.0 hooks: - id: trailing-whitespace # Replaces or checks mixed line ending - id: mixed-line-ending args: ['--fix', 'lf'] exclude: '.*\.(svg)$' # Forbid files which have a UTF-8 byte-order marker - id: check-byte-order-marker # Checks that non-binary executables have a proper shebang - id: check-executables-have-shebangs # Check for files that contain merge conflict strings. - id: check-merge-conflict # Check for debugger imports and py37+ breakpoint() # calls in python source - id: debug-statements - id: check-yaml files: .*\.(yaml|yml)$ - repo: local hooks: - id: flake8 name: flake8 additional_dependencies: - hacking>=6.1.0,<6.2.0 language: python entry: flake8 files: '^.*\.py$' exclude: '^(doc|releasenotes|tools)/.*$' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/.stestr.conf0000664000175000017500000000007700000000000015436 0ustar00zuulzuul00000000000000[DEFAULT] test_path=${OS_TEST_PATH:-oslo_db/tests} top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/.zuul.yaml0000664000175000017500000000362500000000000015130 0ustar00zuulzuul00000000000000- job: name: oslodb-tox-py38-tips parent: openstack-tox-py38 description: | Run unit tests for oslo.db with main branch of important libs. Takes advantage of the base tox job's install-siblings feature. # The job only tests the latest and shouldn't be run on the stable branches branches: regex: ^stable negate: true required-projects: - name: github.com/sqlalchemy/sqlalchemy override-checkout: main - name: github.com/sqlalchemy/alembic override-checkout: main vars: # Set work dir to oslo.db so that if it's triggered by one of the # other repos the tests will run in the same place zuul_work_dir: src/opendev.org/openstack/oslo.db - job: name: oslodb-tox-py310-tips parent: openstack-tox-py310 description: | Run unit tests for oslo.db with main branch of important libs. Takes advantage of the base tox job's install-siblings feature. # The job only tests the latest and shouldn't be run on the stable branches branches: regex: ^stable negate: true required-projects: - name: github.com/sqlalchemy/sqlalchemy override-checkout: main - name: github.com/sqlalchemy/alembic override-checkout: main vars: # Set work dir to oslo.db so that if it's triggered by one of the # other repos the tests will run in the same place zuul_work_dir: src/opendev.org/openstack/oslo.db - project-template: name: oslodb-tox-unit-tips check: jobs: - oslodb-tox-py38-tips - oslodb-tox-py310-tips gate: jobs: - oslodb-tox-py38-tips - oslodb-tox-py310-tips - project: templates: - oslodb-tox-unit-tips - check-requirements - lib-forward-testing-python3 - openstack-python3-jobs - periodic-stable-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086402.0 oslo.db-16.0.0/AUTHORS0000664000175000017500000001700600000000000014235 0ustar00zuulzuul00000000000000AKamyshnikova Adrian Chiris Akihiro Motoki Alessio Ababilov Alexander Gorodnev Alexandru Coman Alexei Kornienko Allison Randal Alvaro Lopez Garcia Andreas Jaeger Andreas Jaeger Andrew Laski Andrew Melton Andrey Kurilin Angus Lees Angus Salkeld Angus Salkeld Anh Tran Ann Kamyshnikova Ayumu Ueha Ben Nemec Boris Bobrov Boris Pavlovic Brant Knudson Brian Elliott Bryan Jones Cedric Brandily Chang Bo Guo ChangBo Guo(gcb) Chris Behrens Christian Berendt Chuck Short Corey Bryant Cyril Roelandt D G Lee Daniel Bengtsson Darragh O'Reilly Davanum Srinivas Davanum Srinivas David Edery David Peraza Devananda van der Veen Dima Shulyak Dina Belova Dirk Mueller Dmitry Tantsur Dong Ma Doug Hellmann Doug Hellmann Edan David Eli Qiao Elod Illes Eric Brown Eric Guo Eric Harney Eric Windisch Erik Olof Gunnar Andersson Eugene Nikanorov Eugeniya Kudryashova Flavio Percoco Florian Haas Gary Kotton Gary Kotton Ghanshyam Ghanshyam Mann Gregory Haynes HanXue Lai Henry Gessau Hervé Beraud Huai Jiang Ihar Hrachyshka Ildiko Ilya Pekelny Ilya Shakhat James Carey James E. Blair Janonymous Jasakov Artem Jason Kölker Javier Pena Jay Lau Jay Pipes Jeremy Stanley Joe Gordon Joe Heck Johannes Erdfelt Joshua Harlow Joshua Harlow Joshua Harlow Julia Varlamova Julian Sy Julien Danjou Kai Zhang Kamlesh Chauvhan Kenneth Giusti Kevin Benton Kevin Benton Lucas Alvares Gomes Luis A. Garcia Marco Fargetta Mark McLoughlin Matt Riedemann Matthew Treinish Max Lobur Mehdi Abaakouk Mehdi Abaakouk Michael J Fork Michael Wilson Mike Bayer Moisés Guimarães de Medeiros Monty Taylor Morgan Fainberg Nikita Konovalov Noorul Islam K M Oleksii Chuprykov OpenStack Release Bot Paul Bourke Pavel Kholkin Pekelny Ilya Petr Blaho Pierre Riteau Pierre-Samuel Le Stang Rajaram Mallya Robert Collins Rodolfo Alonso Hernandez Roman Podoliaka Roman Podolyaka Roman Vasilets Ronald Bradford Sam Betts Sean Dague Sean McGinnis Sean McGinnis Sergey Kraynev Sergey Lukjanov Sergey Nikitin Shawn Boyette Shuangtai Tian Stephen Finucane Steve Kowalik Steve Martinelli Steven Hardy Takashi Kajinami Takashi Natsume Thomas Bechtold Thomas Herve Tianhua Huang Tim Kelsey Timofey Durakov Tony Breeds Tony Xu Tovin Seven Victor Sergeyev Victor Stinner Vieri <15050873171@163.com> Vlad Okhrimenko Vladyslav Drok Vu Cong Tuan Wu Wenxiang Yaguo Zhou Yaguo Zhou Yikun Jiang YuehuiLei Zane Bitter Zhang Chun Zhang Xin Zhi Yan Liu ZhijunWei ZhongShengping Zhongyue Luo ahdj007 bhagyashris blue55 caoyuan chenghuiyu chenlx dengzhaosen dineshbhor ekudryashova fumihiko kakuma gengjh howardlee int32bit jacky06 likui liuyamin ljhuang loooosy melanie witt melissaml nandal oorgeron pkholkin rossella sunguangning tengqm venkatamahesh wanghui wangqi wingwj wu.shiming xuanyandong yan.haifeng zhang-jinnan zhangboye zhangguoqing zhangyangyang zhulingjie ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/CONTRIBUTING.rst0000664000175000017500000000663700000000000015636 0ustar00zuulzuul00000000000000If you would like to contribute to the development of oslo's libraries, first you must take a look to this page: https://specs.openstack.org/openstack/oslo-specs/specs/policy/contributing.html ================= How to contribute ================= If you would like to contribute to the development of OpenStack, you must follow the steps in this page: https://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/oslo.db How to run unit tests ===================== oslo.db (as all OpenStack projects) uses tox to run unit tests. You can find general information about OpenStack unit tests and testing with tox in wiki_. oslo.db tests use PyMySQL as the default MySQL DB API driver (which is true for OpenStack), and psycopg2 for PostgreSQL. pip will build these libs in your venv, so you must ensure that you have the required system packages installed for psycopg2 (PyMySQL is a pure-Python implementation and so needs no additional system packages). For Ubuntu/Debian they are python-dev, and libpq-dev. For Fedora/CentOS - gcc, python-devel and postgresql-devel. The oslo.db unit tests system allows to run unittests on real databases. At the moment it supports MySQL, PostgreSQL and SQLite. For testing on a real database backend you need to set up a user ``openstack_citest`` with password ``openstack_citest`` on localhost (some OpenStack projects require a database named 'openstack_citest' too). Please note, that this user must have permissions to create and drop databases. If the testing system is not able to connect to the backend, tests on it will be skipped. For PostgreSQL on Ubuntu you can create a user in the following way:: sudo -u postgres psql postgres=# create user openstack_citest with createdb login password 'openstack_citest'; For MySQL you can use the following commands:: mysql -u root mysql> CREATE USER 'openstack_citest'@'localhost' IDENTIFIED BY 'openstack_citest'; mysql> GRANT ALL PRIVILEGES ON * . * TO 'openstack_citest'@'localhost'; mysql> FLUSH PRIVILEGES; See the script ``tools/test-setup.sh`` on how the databases are set up excactly in the OpenStack CI infrastructure and use that for your set up. Alternatively, you can use `pifpaf`_ to run the unit tests directly without setting up the database yourself. You still need to have the database software installed on your system. The following tox environments can be used:: tox -e py27-mysql tox -e py27-postgresql tox -e py34-mysql tox -e py34-postgresql tox -e py27-all tox -e py34-all The database will be set up for you locally and temporarily on each run. Another way is to start `pifpaf` manually and use it to run the tests as you wish:: $ eval `pifpaf -g OS_TEST_DBAPI_ADMIN_CONNECTION run postgresql` $ echo $OS_TEST_DBAPI_ADMIN_CONNECTION postgresql://localhost/postgres?host=/var/folders/7k/pwdhb_mj2cv4zyr0kyrlzjx40000gq/T/tmpMGqN8C&port=9824 $ tox -e py27 […] $ tox -e py34 […] # Kill pifpaf once you're done $ kill $PIFPAF_PID .. _wiki: https://wiki.openstack.org/wiki/Testing#Unit_Tests .. _pifpaf: https://github.com/jd/pifpaf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086402.0 oslo.db-16.0.0/ChangeLog0000664000175000017500000012420300000000000014735 0ustar00zuulzuul00000000000000CHANGES ======= 16.0.0 ------ * Omit params for execute if empty 15.1.0 ------ * reno: Update master for unmaintained/zed * Remove old excludes * exc\_filters: Handle OperationalError for MariaDB/Galera * Update master for stable/2024.1 * reno: Update master for unmaintained/xena * reno: Update master for unmaintained/wallaby * reno: Update master for unmaintained/victoria * Remove [database] use\_tpool and TpoolDbapiWrapper 15.0.0 ------ * Display coverage report * reno: Update master for unmaintained/yoga * Bump hacking (again) * Bump hacking * Update python classifier in setup.cfg * Fix zuul config warning * bindep: Use new mysql-\* package names * Update master for stable/2023.2 * Add link to bug 14.0.0 ------ * Remove MySQL NDB Cluster Support * trivial: Reflow docstrings * tests: Use column\_property * exc\_filters: Do not reraise 13.1.0 ------ * Handle server default quoting * Bump bandit * Imported Translations from Zanata 13.0.1 ------ * Revert "Moves supported python runtimes from version 3.8 to 3.10" 13.0.0 ------ * Ignore unset opts when deprecated * Add release note for base test class removal * Remove logic for SQLAlchemy < 1.4 * Match exceptions with multiple lines * Remove dead code * Don't sleep in tests * Moves supported python runtimes from version 3.8 to 3.10 * Run unit tests against main branch of sqlalchemy, alembic * Use SQLAlchemy native pre-ping * Get test suite to full pass with SQLAlchemy 2.0 * Do not convert to string \`\`url.URL\`\` objects * Remove legacy base test classes * Remove sqlalchemy-migrate * Rollback the connection after server ping method * Update master for stable/2023.1 * Imported Translations from Zanata 12.3.1 ------ * Fix default value for wsrep\_sync\_wait option 12.3.0 ------ * Add option for wsrep\_sync\_wait * skip bandit on oslo\_db/tests 12.2.0 ------ * Add Python3 antelope unit tests * Imported Translations from Zanata * Fix misuse of assert\_has\_calls * tests: Define a primary key * tests: Fix compatibility with PostgreSQL 14+ * Update master for stable/zed * types: Set 'cache\_ok' (redux) 12.1.0 ------ * Replace abc.abstractproperty with property and abc.abstractmethod * Deprecate MySQL NDB Cluster Support * trivial: Formatting changes for oslo\_db.options 12.0.0 ------ * Imported Translations from Zanata * Drop python3.6/3.7 support in testing runtime 11.3.0 ------ * Add Python3 zed unit tests * Update master for stable/yoga * tox: Silence output * trivial: Don't emit warnings for our own deprecations * tests: Enable SAWarning warnings * Remove the 'Session.autocommit' parameter * Add missing 'connect' wrapper * Don't call 'begin()' on existing transaction 11.2.0 ------ * utils: Remove troublesome utility methods * Update python testing classifier * tests: Restore - don't reset - warning filters 11.1.0 ------ * Configure driver for postgres * Add Python3 yoga unit tests * Update master for stable/xena 11.0.0 ------ * requirements: Bump sqlalchemy lower constraint * Remove use of Session.begin.subtransactions flag * Don't rely on implicit autocommit * Replace use of 'Engine.execute()' * Don't call mapper() outside of declarative registry * Don't pass kwargs to connection.execute() * Replace use of Executable.execute method * Remove unnecessary warning filter * Replace use of Engine.scalar() * Don't use the 'Row.keys()' method * Don't use dict-style attribute accesses * Don't use plain string SQL statements * Update import of declarative\_base() * Replace use of Table.autoload parameter * Replace use of update.values parameter * Replace use of update.whereclause parameter * Replace use of insert.values parameter * Add missing bind argument to calls * Don't pass strings to Connection.execute() * Remove use of MetaData.bind argument * Remove legacy calling style of select() * tests: Enable SQLAlchemy 2.0 deprecation warnings * utils: Deprecate sqlalchemy-migrate-related functions * tests: Enable SADeprecationWarning warnings * tests: Use common base class * tests: Enfeeble 'oslo\_db.tests.utils.BaseTestCase' * tests: Clean up base test * Drop checks for IBM DB2 * tox: Simplify test running * options: Remove deprecated option aliases * tests: Remove 'ModelsMigrationsSync.check\_foreign\_keys' * concurrency: Deprecate 'TpoolDbapiWrapper' * sqlalchemy: Remove checks for older deps 10.0.0 ------ * Remove the idle\_timeout option * Remove the useless else * types: Set 'cache\_ok' * Changed minversion in tox to 3.18.0 9.1.0 ----- * Followup of "Added handler for mysql 8.0.19 duplicate key error update" * Added handler for mysql 8.0.19 duplicate key error update * update the pre-commit-hooks version * Replace getargspec with getfullargspec 9.0.0 ----- * setup.cfg: Replace dashes with underscores 8.6.0 ----- * Don't use private API to get query criteria * Remove the sql\_max\_pool\_size option * Fix formatting of release list * move flake8 as a pre-commit local target * Add Python3 xena unit tests * Update master for stable/wallaby * Fix the conflict status with hacking * Remove lower-constraints remnants * remove unicode from code * Use TOX\_CONSTRAINTS\_FILE * Dropping lower constraints testing * Accommodate immutable URL api * Use TOX\_CONSTRAINTS\_FILE * Use py3 as the default runtime for tox 8.5.0 ----- * Deprecate the 'oslo\_db.sqlalchemy.migration\_cli' module * Deprecate 'oslo\_db.sqlalchemy.migration' module * Imported Translations from Zanata * Adding pre-commit * Add Python3 wallaby unit tests * Update master for stable/victoria 8.4.0 ----- * [goal] Migrate testing to ubuntu focal * Bump bandit version 8.3.0 ----- * requirements: Drop os-testr * Make test-setup.sh compatible with mysql8 8.2.1 ----- * Fix pygments style * Set create\_constraint=True for boolean constraint test 8.2.0 ----- * Fix hacking min version to 3.0.1 * Switch to newer openstackdocstheme and reno versions * Remove the unused coding style modules * Remove translation sections from setup.cfg * Align contributing doc with oslo's policy * Bump default tox env from py37 to py38 * Add py38 package metadata * Imported Translations from Zanata * Add release notes links to doc index * Remove use of deprecated LOG.warn * Add Python3 victoria unit tests * Update master for stable/ussuri * Modernize use of table.count() with func.count() 8.1.0 ----- * Use unittest.mock instead of third party mock * Update hacking for Python3 8.0.0 ----- * Remove 'handle\_connect\_error' * Drop use of six * Remove final references to mox * Raise minimum SQLAlchemy version to 1.2.0 * remove outdated header * reword releasenote for py27 support dropping * Ignore releasenote artifacts files 7.0.0 ----- * [ussuri][goal] Drop python 2.7 support and testing * Drop unittest2 usage * tox: Trivial cleanup 6.0.0 ----- * Imported Translations from Zanata * gitignore: Add reno artefacts * Convert remaining use of mox * Remove deprecated class DbMigrationError since we already have DBMigrationError 5.1.1 ----- * Use regex to compare SQL strings with IN * Bump the openstackdocstheme extension to 1.20 * Reduce severity of wrapped exceptions logs to warning * tox: Keeping going with docs * Switch to official Ussuri jobs 5.1.0 ----- * Update master for stable/train 5.0.2 ----- * Add Python 3 Train unit tests * Add libpq-dev to bindep.txt * Use connect, not contextual\_connect * Add local bindep.txt 5.0.1 ----- * Rollback existing nested transacvtion before restarting * Dropping the py35 testing 5.0.0 ----- * docs: Use sphinxcontrib.apidoc for building API docs * Cap Bandit below 1.6.0 and update Sphinx requirement * Replace git.openstack.org URLs with opendev.org URLs 4.46.0 ------ * OpenDev Migration Patch * Fix deprecation warnings under py36 * Support context function argument as keyword * Removing deprecated min\_pool\_size * Bump psycopg lower-constraint to 2.7 * Update master for stable/stein 4.44.0 ------ * exc\_filters: fix deadlock detection for MariaDB/Galera cluster * Resolve SAWarning in Query.soft\_delete() * Update hacking version 4.43.0 ------ * Remove convert\_unicode flag * Use template for lower-constraints * Update mailinglist from dev to discuss 4.42.0 ------ * Add "is\_started" flag to enginefacade * Move warnings to their own module * Clean up .gitignore references to personal tools * Always build universal wheels * Don't quote {posargs} in tox.ini 4.41.1 ------ * Fix FOREIGN KEY messages for MariaDB 10.2, 10.3 * Imported Translations from Zanata * add lib-forward-testing-python3 test job * add python 3.6 unit test job * import zuul job settings from project-config * Update reno for stable/rocky * Switch to stestr 4.40.0 ------ * Rename enginefacade.async to enginefacade.async\_ * Add release notes to README.rst 4.39.0 ------ * Remove most server\_default comparison logic * remove sqla\_09 test environment * fix tox python3 overrides 4.38.0 ------ * Remove stale pip-missing-reqs tox test * Deprecate min\_pool\_size * List PyMySQL first in installation docs 4.37.0 ------ * Trivial: Update pypi url to new url 4.36.0 ------ * set default python to python3 * Improve exponential backoff for wrap\_db\_retry * uncap eventlet * add lower-constraints job 4.35.0 ------ * Add testresources / testscenarios to requirements.txt * Updated from global requirements 4.34.0 ------ * Ignore 'use\_tpool' option * Remove tox\_install.sh and align constraints consumption * Update links in README * Ensure all test fixtures in oslo\_db.tests are private * Imported Translations from Zanata * Conditionally adjust for quoting in comparing MySQL defaults * Imported Translations from Zanata * Allow connection query string to be passed separately * Reverse role of synchronous\_reader * Imported Translations from Zanata * Update reno for stable/queens * Updated from global requirements * Fix a typo of "transaction" in comment * Updated from global requirements * Updated from global requirements * Updated from global requirements 4.33.0 ------ * Updated from global requirements * add bandit to pep8 job * Drop tox-mysql-python job * Use the new PTI for document build * Updated from global requirements 4.32.0 ------ * Imported Translations from Zanata * Updated from global requirements * Add requirements.txt for docs builds 4.31.0 ------ * Updated from global requirements * Handle deprecation of inspect.getargspec 4.30.0 ------ * Remove setting of version/release from releasenotes * Updated from global requirements * Updated from global requirements * Updated from global requirements * Use assertRegex instead of assertRegexpMatches * Zuul: add file extension to playbook path * Remove kwarg retry\_on\_request in wrap\_db\_retry * Migrate to zuulv3 - move legacy jobs to project 4.29.0 ------ * Use skipTest() method instead of deprecated skip() * Drop MySQL-python dependency from oslo.db * Updated from global requirements * Remove method get\_connect\_string and is\_backend\_avail * Test illegal "boolean" values without Boolean datatype * Imported Translations from Zanata 4.28.0 ------ * Add new foreign key utility function to utils * Updated from global requirements 4.27.0 ------ * Fix pagination when marker value is None * Updated from global requirements * Remove property message for DBInvalidUnicodeParameter and InvalidSortKey * Throw DBMigrationError instead of DbMigrationError * Remove function optimize\_db\_test\_loader in test\_base.py * Remove provisioned\_engine in class BackendImpl * Remove AutoString\* in ndb.py * Remove class InsertFromSelect * Remove class TransactionResource * Remove method provisioned\_engine in class Backend * Update the documentation link 4.26.0 ------ * Updated from global requirements * Rename idle\_timeout to connection\_recycle\_time * Workaround non-compatible type.adapt() for SQLAlchemy < 1.1 * Let others listen to SQLAlchemy errors * Update reno for stable/pike * Replace ndb "auto" types with unified String * Updated from global requirements * Remove deprecation warning when loading tests/sqlalchemy * Replace six.iteritems() with .items() 4.25.0 ------ * Log an exception when reconnect-to-disconnected occurs * Don't access connection.info if connection is invalidated * Update URLs according to document migration 4.24.1 ------ * update the docs url in the readme * turn on warning-is-error in doc build * switch from oslosphinx to openstackdocstheme 4.24.0 ------ * rearrange content to fit the new standard layout * Updated from global requirements * Fix compatibility with SQLAlchemy < 1.1.0 * Enable MySQL Storage Engine selection * Updated from global requirements * Updated from global requirements * Using assertIsNone(xxx) instead of assertEqual(None, xxx) 4.23.0 ------ * Updated from global requirements * Updated from global requirements 4.22.0 ------ * Updated from global requirements * add release note for new warning about missing driver * Raise only DbMigrationError from migrate 'upgrade' method * Warn on URL without a drivername * Updated from global requirements 4.21.1 ------ * Updated from global requirements * Add 'save\_and\_reraise\_exception' method when call 'session.rollback()' * Move oslo.context to test-requirements * Attach context being used to session/connection info * Updated from global requirements 4.21.0 ------ * Updated from global requirements * Updated from global requirements 4.20.0 ------ * Remove log translations 4.19.0 ------ * Updated from global requirements * Remove deprecated config option sqlite\_db * Imported Translations from Zanata * Updated from global requirements 4.18.0 ------ * Updated from global requirements * Update test requirement * Establish flush() for "sub" facade contexts * Remove unused logging import * Support facade arguments, unstarted facade for patch\_engine() * Repair unused rollback\_reader\_sessions parameter * Updated from global requirements * Prepare for using standard python tests * Explain paginate\_query doesn't provide parameter offset * Updated from global requirements * Update reno for stable/ocata * Coerce booleans to integer values in paginate\_query * Remove references to Python 3.4 4.17.0 ------ * Modify word "whetever" to "whether" * Updated from global requirements * Add Constraints support * Support packet sequence wrong error * docs: mention that it's possible to use Connection directly * exc\_filters: fix deadlock detection for percona xtradb cluster * Replaces uuid.uuid4 with uuidutils.generate\_uuid() * Fix marker checking when value is None * Strip prefix \`migrate\_\` in parameter \`migrate\_engine\` 4.16.0 ------ * migration: don't assume the mixin use provision * Check if an index on given columns exists 4.15.0 ------ * Ensure provision\_new\_database is True by default * Don't run LegacyBaseClass provision test for unavailable database * SoftDeleteMixin: allow for None values * SoftDeleteMixin: coerce deleted param to be an integer * Show team and repo badges on README * Support MariaDB error 1927 * Break optimize\_db\_test\_loader into package and module level * Adjust SAVEPOINT cause test for SQLA 1.1 * Updated from global requirements * Restore provisioning of DBs in legacy test base * Updated from global requirements * Updated from global requirements * Enhanced fixtures for enginefacade-based provisioning * utils: deprecate InsertFromSelect properly * Using new style assertions instead of old style assertions * Updated from global requirements * Fix exc\_filters for mysql-python * Updated from global requirements * Change assertTrue(isinstance()) by optimal assert * OpenStack typo * Changed the home-page link 4.14.0 ------ * standardize release note page ordering * Add a specific exception for 'unknown database' errors * Enable release notes translation * Updated from global requirements * Add DBDataError for "Data too long" * Updated from global requirements * Updated from global requirements * Add additional caution looking for table, info * utils: fix get\_unique\_keys() when model has no table attached to it * Update reno for stable/newton * Updated from global requirements * Correctly detect incomplete sort\_keys passed to paginate\_query * Fix DBReferenceError and DBNonExistentTable with new PyMySQL version 4.13.0 ------ * Updated from global requirements 4.12.0 ------ * Updated from global requirements * Link enginefacade to test database provisioning * Display full reason for backend not available * Updated from global requirements * Deprecate argument sqlite\_db in method set\_defaults 4.11.0 ------ * Updated from global requirements * Add test helpers to enginefacade * Add logging\_name to enginefacade config 4.10.0 ------ * Capture DatabaseError for deadlock check * Add a hook to process newly created engines 4.9.0 ----- * Updated from global requirements * Memoize sys.exc\_info() before attempting a savepoint rollback * Updated from global requirements * Fix parameters of assertEqual are misplaced * Consolidate pifpaf commands into variables * Updated from global requirements * Updated from global requirements * Fixed unit tests running on Windows * Remove discover from setup.cfg * Add dispose\_pool() method to enginefacade context, factory 4.8.0 ----- * Updated from global requirements * Set a min and max on the connection\_debug option * Set max pool size default to 5 * Add support for LONGTEXT, MEDIUMTEXT to JsonEncodedType * tox: add py35 envs for convenience * Deprecate config option sqlite\_db for removal * Catch empty value DBDuplicate errors * release notes: mention changes in wrap\_db\_retry() * Updated from global requirements * api: use sane default in wrap\_db\_retry() * Imported Translations from Zanata * exc\_filters: catch and translate non existent table on drop * exception: make message mandatory in DbMigrationError and deprecates it * Make it possible to use enginefacade decorators with class methods * Updated from global requirements * tests: fix order of assertEqual in exc\_filter * exc\_filters: catch and translate non existent constraint on drop * Replace tempest-lib dependency with os-testr * Imported Translations from Zanata * Fix typos in comments and docstring * Updated from global requirements * Updated from global requirements * Fix typo: 'olso' to 'oslo' * Repair boolean CHECK constraint detection * api: do not log a traceback if error is not expected * Fix imports in doc * Allow testing of MySQL and PostgreSQL scenario locally * Add support for custom JSON serializer * api: always enable retry\_on\_request * Remove oslo-incubator related stuff * Updated from global requirements * Updated from global requirements * Remove direct dependency on babel * Imported Translations from Zanata * Add debtcollector to requirements * Fix unit tests failures, when psycopg2 is not installed * Fix server\_default comparison for BigInteger * Remove unused sqlite\_fk in \_init\_connection\_args call * Updated from global requirements * Fix db\_version checking for sqlalchemy-migrate * Correct docstring * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Raise DbMigrationError for invalid version * Add new filter for DBDataError exception * Fix spelling mistake * Let enginefacade.\_TransactionContextManager look for context * Remove sqlalchemy < 1.0.0 compatible layer * Update reno for stable/mitaka * Updated from global requirements * Add tests for float interval values in wrap\_db\_retry() 4.6.0 ----- * Increase the default max\_overflow value * Updated from global requirements * add reno for release notes management * Updated from global requirements * Updated from global requirements * Clarify the types for retry\_interval args of wrap\_db\_retry 4.5.0 ----- * Updated from global requirements * stop making a copy of options discovered by config generator 4.4.0 ----- * exceptions: provide .message attribute for Py3K compatibility * Updated from global requirements * InvalidSortKey constructor change breaks Heat unittests * exception: fix DBInvalidUnicodeParameter error message * exceptions: enhance InvalidSortKey to carry the invalid key * exception: fix InvalidSortKey message * Update translation setup * Updated from global requirements * Add exc\_filter for invalid Unicode string * Updated from global requirements * Updated from global requirements * Updated from global requirements 4.3.1 ----- * Imported Translations from Zanata * Updated from global requirements * Fix tests to work under both pymsysql 0.6.2 and 0.7.x * Don't log non-db error in retry wrapper 4.3.0 ----- * Updated from global requirements * Put py34 first in the env order of tox * Updated from global requirements 4.2.0 ----- * Fix comparison of Variant and other type in compare\_type * Updated from global requirements * Updated from global requirements * Don't trace DB errors when we're retrying * Updated from global requirements * Remove iso8601 in requirements.txt * Trival: Remove 'MANIFEST.in' 4.1.0 ----- * Refactor deps to use extras and env markers * Added allow\_async property 4.0.0 ----- * Updated from global requirements * Remove python 2.6 classifier * Remove python 2.6 and cleanup tox.ini 3.2.0 ----- * Detect not-started \_TransactionFactory in legacy * Added method get\_legacy\_facade() to the \_TransactionContextManager * Removed Unused variable 'drivertype' * Updated from global requirements * Imported Translations from Zanata * Updated from global requirements 3.1.0 ----- * Updated from global requirements * Updated from global requirements * Add debug logging for DB retry attempt 3.0.0 ----- * Fix coverage configuration and execution * Add universal wheel tag to setup.cfg * No need for Oslo Incubator Sync * Updated from global requirements * Correct invalid reference * Imported Translations from Zanata * Use stevedore directive to document plugins * Make readme and documentation titles consistent * Docstring fixes for enginefacade * Fix warnings in docstrings * Autogenerate the module docs * Add config options to the documentation * Add support for pickling enginefacade context objects * Change ignore-errors to ignore\_errors * Fix the home-page value with Oslo wiki page * Updated from global requirements * Imported Translations from Zanata 2.6.0 ----- * Imported Translations from Transifex * Handle case where oslo\_db.tests has not been imported * Updated from global requirements 2.5.0 ----- * Updated from global requirements * Imported Translations from Transifex * Updated from global requirements * Move runtime test resources into setup.cfg [extras] * Updated from global requirements 2.4.1 ----- * Assume relative revisions belong to alembic * Use correct config key in alembic extension * Fix exception message about unavailable backend 2.4.0 ----- * Imported Translations from Transifex * Updated from global requirements * Updated from global requirements * Fix hacking rules and docs job * Imported Translations from Transifex * pagination: enhance sorting of null values * Upgrade and downgrade based on revision existence * Imported Translations from Transifex * Updated from global requirements * Add JSON-encoded types for sqlalchemy 2.3.0 ----- * Imported Translations from Transifex * Python 3: Use use\_unicode=1 under Python 3 * Imported Translations from Transifex * Updated from global requirements * Fix test\_migrations on Python 3 * Improve failure mode handling in enginefacade 2.2.0 ----- * Imported Translations from Transifex * Updated from global requirements * Updated from global requirements * Add mock to test-requirements.txt * Test that concurrent sqlalchemy transactions don't block * Updated from global requirements * Added catching of errors 1047 (Galera) for MySQL oslo db reconnect * Remove outdated tox environments for SQLAlchemy 0.8 * Imported Translations from Transifex 2.1.0 ----- * Allow projects that use test\_models\_sync to filter some changes * Updated from global requirements * Add legacy get\_sessionmaker() method 2.0.0 ----- * Fix sqlalchemy.ModelBase.\_\_contains\_\_() behaviour * Add tox target to find missing requirements * Allow additional exceptions in wrap\_db\_retry * Remove implicit RequestContext decoration * Add a new ModelBase.items() method * Updated from global requirements * Add oslo.context to requirements.txt * Imported Translations from Transifex 1.12.0 ------ * Updated from global requirements * Remove oslo namespace package * Drop use of 'oslo' namespace package * Switch from MySQL-python to PyMySQL * Updated from global requirements * Switch badges from 'pypip.in' to 'shields.io' * Updated from global requirements 1.11.0 ------ * Replace utils method with oslo.utils reflection provided one * Implement new oslo.db.sqlalchemy.enginefacade module * Allow to fail instead of skip in DbFixture 1.10.0 ------ * Updated from global requirements * Imported Translations from Transifex * Add a keys() method to SQLAlchemy ModelBase * Remove support for Python 3.3 * Updated from global requirements * Remove run\_cross\_tests.sh * Sort model fields using getattr(), not inspect() * Imported Translations from Transifex * Updated from global requirements * Remove pre-SQLAlchemy-0.9.7 compat utilities * Add Python 3 classifiers to setup.cfg 1.9.0 ----- * Uncap library requirements for liberty 1.8.0 ----- * Sanity check after migration * Add filters for DBDataError exception * Add pypi download + version badges * exc\_filters: support for ForeignKey error on delete * Standardize setup.cfg summary for oslo libs * Update to latest hacking * Handle CHECK constraint integrity in PostgreSQL * Catch DBDuplicateError in MySQL if primary key is binary * Imported Translations from Transifex * Updated from global requirements * Imported Translations from Transifex * Provide working SQLA\_VERSION attribute * Avoid excessing logging of RetryRequest exception * Fixed bug in InsertFromSelect columns order * Add process guards + invalidate to the connection pool 1.7.0 ----- * Switch to non-namespaced module import - oslo\_i18n * Fix documented env variable for test connection * Updated from global requirements * Implement generic update-on-match feature 1.6.0 ----- * Updated from global requirements 1.5.0 ----- * Make DBAPI class work with mocks correctly * Updated from global requirements * Imported Translations from Transifex * Fix PyMySQL reference error detection * Use PyMySQL as DB driver in py3 environment * Updated from global requirements * Organize provisioning to use testresources * Add retry decorator allowing to retry DB operations on request * Imported Translations from Transifex * Implement backend-specific drop\_all\_objects for provisioning * Ensure that create\_engine() close test connection * Refactor database migration manager to use given engine * Fix 0 version handling in migration\_cli manager * Updated from global requirements * Fix PatchStacktraceTest for pypy * Update Oslo imports to remove namespace package * Retry query if db deadlock error is received 1.4.1 ----- * Restore the check\_foreign\_keys() method * Ensure DBConnectionError is raised on failed revalidate 1.4.0 ----- * Fix slowest test output after test run * Updated from global requirements * Make sure sort\_key\_attr is QueryableAttribute when query * Ensure mysql\_sql\_mode is set for MySQLOpportunisticTests * Add pretty\_tox wrapper script * Fix PatchStacktraceTest test * Ensure PostgreSQL connection errors are wrapped * Remove check\_foreign\_keys from ModelsMigrationsSync * Move files out of the namespace package * Updated from global requirements * Fix the link to the bug reporting site 1.3.0 ----- * Repair string-based disconnect filters for MySQL, DB2 * Fix python3.x scoping issues with removed 'uee' variable * Updated from global requirements * Fix test\_migrate\_cli for py3 * Fix TestConnectionUtils to py3x compatibility * Updated from global requirements * Upgrade exc\_filters for 'engine' argument and connect behavior * Workflow documentation is now in infra-manual 1.2.0 ----- * Imported Translations from Transifex * Fix nested() for py3 * Make test\_models pass on py3 * Repair include\_object to accommodate new objects * Add table name to foreign keys diff * Updated from global requirements * Handle Galera deadlock on SELECT FOR UPDATE * Add exception filter for \_sqlite\_dupe\_key\_error * Add info on how to run unit tests * Ensure is\_backend\_avail() doesn't leave open connections * Updated from global requirements 1.1.0 ----- * Imported Translations from Transifex * Add pbr to installation requirements * Updated from global requirements * Activate pep8 check that \_ is imported * Assert exceptions based on API, not string messages * Fix python3.x scoping issues with removed 'de' variable * Updated from global requirements * Updated from global requirements * Reorganize DbTestCase to use provisioning completely * Set utf8 encoding for mysql and postgresql * ModelsMigrationsSync: Add check for foreign keys * Updated from global requirements * Remove extraneous vim editor configuration comments * Remove utils.drop\_unique\_constraint() * Improve error reporting for backend import failures * Ensure create\_engine() retries the initial connection test * Imported Translations from Transifex * Use fixture from oslo.config instead of oslo-incubator * Move begin ping listener to a connect listener * Create a nested helper function that will work on py3.x * Imported Translations from Transifex * Start adding a environment for py34/py33 * Explicitly depend on six in requirements file * Unwrap DialectFunctionDispatcher from itself * Updated from global requirements * Use six.wraps instead of functools.wraps * Update help string to use database 1.0.1 ----- * Use \_\_qualname\_\_ if we can * Fixup Fixtures Use in db test classes * Add description for test\_models\_sync function * Use the six provided iterator mix-in * ModelsMigrationsSync:add correct server\_default check for Enum 1.0.0 ----- * Updated from global requirements * Imported Translations from Transifex * Add history/changelog to docs * Add a check for SQLite transactional state * Add run\_cross\_tests.sh script * Let oslotest manage the six.move setting for mox * Fix DBReferenceError on MySQL and SQLite * Renaming in WalkVersionsMixin * Clean up documentation * Use single quotes for db schema sanity check * warn against sorting requirements * ModelsMigrationsSync:Override compare\_server\_default * Updated from global requirements * Imported Translations from Transifex * Add doc8 to tox environment docs * Use oslo.i18n * Repair pysqlite transaction support * Extract logging setup into a separate function * Updated from global requirements * Remove reliance on create\_engine() from TestsExceptionFilter * Consolidate sqlite and mysql event listeners * Use dialect dispatch for engine initiailization * Add get\_non\_innodb\_tables() to utils * Added check to see whether oslotest is installed 0.4.0 ----- * Implement a dialect-level function dispatch system * Move to oslo.utils * Restore correct source file encodings * Handle DB2 SmallInteger type for change\_deleted\_column\_type\_to\_boolean * Imported Translations from Transifex * Fixes comments to pass E265 check * Fixes indentations to pass E128 check * Uses keyword params for i18n string to pass H703 * Adds empty line to multilines docs to pass H405 * Updates one line docstring with dot to pass H402 * Changes import orders to pass H305 check * Fixed DeprecationWarning in exc\_filters * Imported Translations from Transifex * oslo.db.exceptions module documentation * Updated from global requirements * Extension of DBDuplicateEntry exception * oslo.db.options module documentation * oslo.db.api module documentation * Imported Translations from Transifex * Use SQLAlchemy cursor execute events for tracing * Remove sqla\_07 from tox.ini * Updated from global requirements * Specify raise\_on\_warnings=False for mysqlconnector * Make MySQL regexes generic across MySQL drivers * Allow tox tests with complex OS\_TEST\_DBAPI\_CONNECTION URLs * Raise DBReferenceError on foreign key violation * Add host argument to get\_connect\_string() * Imported Translations from Transifex * Don't drop pre-existing database before tests * Port \_is\_db\_connection\_error check to exception filters * Integrate the ping listener into the filter system * Add disconnect modification support to exception handling * Implement new exception interception and filtering layer * Implement the SQLAlchemy \`\`handle\_error()\`\` event * Remove moxstubout.py from oslo.db * Added check for DB2 deadlock error * Bump hacking to version 0.9.2 * Opportunistic migration tests * Move all db exception to exception.py * Enable skipped tests from test\_models.py * Use explicit loops instead of list comprehensions * Imported Translations from Transifex * Allow usage of several iterators on ModelBase * Add DBDuplicateEntry detection for mysqlconnector driver * Check for mysql\_sql\_mode is not None in create\_engine() 0.3.0 ----- * Add a base test case for DB schema comparison * Test for distinct SQLAlchemy major releases * Updated from global requirements * Add \_\_contains\_\_ to ModelBase to fully behave like a dict * Fix test to not assume eventlet isn't present * Avoid usage of mutables as default args * Updated from global requirements 0.2.0 ----- * Fix kwarg passed twice error in EngineFacade.from\_config() 0.1.0 ----- * Add psycopg2 to test-requirements.txt * Adding dependency documentation for MySQL * Prevent races in opportunistic db test cases * Fix Sphinx directive name * Bump hacking to 0.9.x series * Add \_wrap\_db\_error support for postgresql * Handle slave database connection in EngineFacade * Add eventlet.tpool.Proxy for DB API calls * Added \`\`docs\`\` environment to tox.ini * Setup for translation * Remove common context from oslo.db * Remove common context usage from db model\_query() * replace string format arguments with function parameters * Make get\_session() pass kwargs to a sessionmaker * Allow for skipping thread\_yielding * Add index modifying methods * Log a cause of db backend connection failure * Do not always adjust sqlalchemy.engine logging * Fix the test using in-file SQLite database * Updated from global requirements * cleaning up index.rst file * Fix usage of oslo.config * Add import\_exceptions to tox.ini * Fix changing the type of column deleted * Remove redundant default=None for config options * remove definitions of Python Source Code Encoding * Improve help strings * Ignore oslo.db.egg-info * Allow cover tests to work * Fix wrong method name with assert\_called\_once\_with * Fix call to mock.assert\_not\_called() * Remove obsolete entries from .gitignore * Remove patch\_migrate() * Fix typos: Remove extra ")" in message * Fix .gitreview for oslo.db * Fix dhellmann's notes from April 18 * Make the tests passing * Fix the graduate.sh script result * Prevent races in opportunistic db test cases * Drop dependency oslo.db from common.log * Use oslotest instead of common test module * Start ping listener also for postgresql * Add a warning to not use get\_table for working with ForeignKeys * Ignore migrate versioning tables in utf8 sanity check * Fix sqlalchemy utils test cases for SA 0.9.x * Fix Keystone doc build errors with SQLAlchemy 0.9 * Make table utf-8 charset checking be optional for DB migration * Dispose db connections pool on disconnect * Python3: pass bytes as 'contents' to create\_tempfiles() * Do not use the 'extend' method on a dict\_items object * Set sql\_mode callback on connect instead of checkout * Fix excessive logging from db.sqlalchemy.session * Add lockutils fixture to OpportunisticTestCase * Move test\_insert\_from\_select unit test from nova to oslo * Adapt DB provisioning code for CI requirements * Make db utils importable without migrate * Remove requirements.txt from .gitignore * Get mysql\_sql\_mode parameter from config * Prevent incorrect usage of \_wrap\_db\_error() * Python3: define a \_\_next\_\_() method for ModelBase * Add from\_config() method to EngineFacade * db: move all options into database group * Drop special case for MySQL traditional mode, update unit tests * Make TRADITIONAL the default SQL mode * Introduce mysql\_sql\_mode option, remove old warning * Introduce a method to set any MySQL session SQL mode * Handle ibm\_db\_sa DBDuplicateEntry integrity errors * Fix doc build errors in db.sqlalchemy * Fix migration.db\_version when no tables * Update log translation domains * Add model\_query() to db.sqlalchemy.utils module * Fix a small typo in api.py * migration.db\_sync requires an engine now * Remove CONF.database.connection default value * Remove None for dict.get() * Fix duplicating of SQL queries in logs * Update oslo log messages with translation domains * Restore the ability to load the DB backend lazily * Don't use cfg.CONF in oslo.db * Don't store engine instances in oslo.db * Add etc/openstack.conf.sample to .gitignore * py3kcompat: remove * Don't raise MySQL 2013 'Lost connection' errors * Format sql in db.sqlalchemy.session docstring * Handle exception messages with six.text\_type * Drop dependency on log from oslo db code * Automatic retry db.api query if db connection lost * Clean up docstring in db.sqlalchemy.session * Only enable MySQL TRADITIONAL mode if we're running against MySQL * Move db tests base.py to common code * Fix parsing of UC errors in sqlite 3.7.16+/3.8.2+ * Use dialect rather than a particular DB API driver * Move helper DB functions to db.sqlalchemy.utils * Small edits on help strings * Transition from migrate to alembic * Fix mocking of utcnow() for model datetime cols * Add a db check for CHARSET=utf8 * Remove unused variables * Remove "vim: tabstop=4 shiftwidth=4 softtabstop=4" from headers * Fix database connection string is secret * Cleanup unused log related code * Removed copyright from empty files * Fix the obsolete exception message * Fix filter() usage due to python 3 compability * Use hacking import\_exceptions for gettextutils.\_ * Add docstring for exception handlers of session * Removal of \_REPOSITORY global variable * Remove string.lowercase usage * Remove eventlet tpool from common db.api * Database hook enabling traditional mode at MySQL * Replace xrange in for loop with range * SQLAlchemy error patterns improved * Remove unused import * Correct invalid docstrings * Remove start index 0 in range() * Make \_extra\_keys a property of ModelBase * Fix mis-spellings * Fix violations of H302:import only modules * Enables db2 server disconnects to be handled pessimistically * db.sqlalchemy.session add [sql].idle\_timeout * Use six.iteritems to make dict work on Python2/3 * Trivial: Make vertical white space after license header consistent * Drop dependency on processutils from oslo db code * Fix locking in migration tests * Incorporating MIT licensed code * Typos fix in db and periodic\_task module * Use six.moves.configparser instead of ConfigParser * Drop dependency on fileutils from oslo db tests * fix typo in db session docstring * Added opportunistic DB test cases * The ability to run tests at various backend * Use log.warning() instead of log.warn() in oslo.db * Replace removed items in Python3 * Remove vim header * Use py3kcompat urlutils functions instead of urlparse * Don't use deprecated module commands * Remove sqlalchemy-migrate 0.7.3 patching * SQLite behavior independent DB test cases * Drop dependency on lockutils from oslo db code * Remove lazy loading of database backend * Do not name variables as builtins * Add db2 communication error code when check the db connection * Replace using tests.utils part3 * Add [sql].connection as deprecated opt for db * Modify SQLA session due to dispose of eventlet * Use monkey\_patch() in TestMigrationUtils setUp() * Clean up db.sqla.Models.extra\_keys interface * Use functools.wrap() instead of custom implementation * Move base migration test classes to common code * Bump hacking to 0.7.0 * exception: remove * Replace using tests.utils with openstack.common.test * Use single meta when change column type * Helper function to sanitize db url credentials * BaseException.message is deprecated since Python 2.6 * Add function drop\_unique\_constraint() * Change sqlalchemy/utils.py mode back to 644 * Move sqlalchemy migration from Nova * Allow use of hacking 0.6.0 and enable new checks * Add eclipse project files to .gitignore * Raise ValueError if sort\_dir is unknown * Add tests for cinder/common/sqlalchemyutils.py * python3: Add python3 compatibility support * Add .testrepository to .gitignore * Move \`test\_migrations\` from Nova * Migrate sqlalchemy utils from Nova * Enable H302 hacking check * Add a monkey-patching util for sqlalchemy-migrate * Don't use mixture of cfg.Opt() deprecated args * Allow BaseTestCase use a different conf object * Ensure that DB configuration is backward compatible * Add a fixture for using of SQLite in-memory DB * Enable hacking H404 test * Enable user to configure pool\_timeout * Changed processing unique constraint name * Enable H306 hacking check * Add a slave db handle for the SQLAlchemy backend * Enable hacking H403 test * Changed processing unique constraint name * Ignore backup files in .gitignore * Specify database group instead of DEFAULT * Fixes import order nits * Line wrapper becomes to long when expanded * Convert unicode for python3 portability * Add test coverage for sqlite regexp function * Use range rather than xrange * Add support to clear DB * Add enforcement for foreign key contraints with sqlite * Improve Python 3.x compatibility * Removes metadata from ModelBase * Removes created\_at, updated\_at from ModelBase * Fixes private functions private * Mark sql\_connection with secret flag * Fix Copyright Headers - Rename LLC to Foundation * Fixes import order nits * Clean up sqlalchemy exception code * Move DB thread pooling to DB API loader * Use oslo-config-2013.1b3 * Add join\_consumer\_pool() to RPC connections * Use importutils.try\_import() for MySQLdb * Minor tweak to make update.py happy * Remove pointless use of OpenStackException * Remove unused context from test\_sqlalchemy * Remove openstack.common.db.common * Provide creating real unique constraints for columns * Fix missing wrap\_db\_error for Session.execute() method * Fix eventlet/mysql db pooling code * Add missing DBDuplicateEntry * Be explicit about set\_default() parameters * Remove duplicate DB options * Eliminate gratuitous DB difference vs Nova * Import sqlalchemy session/models/utils * updating sphinx documentation * Correcting openstack-common mv to oslo-incubator * Update .gitreview for oslo * .gitignore updates for generated files * Updated tox config for multi-python testing * Added .gitreview file * ignore cover's html directory * Rajaram/Vinkesh|increased tests for Request and Response serializers * Rajaram/Vinkesh|Added nova's serializaiton classes into common * Initial skeleton project ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/HACKING.rst0000664000175000017500000000017000000000000014755 0ustar00zuulzuul00000000000000Style Commandments ================== Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/LICENSE0000664000175000017500000002363600000000000014200 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1720086402.548608 oslo.db-16.0.0/PKG-INFO0000664000175000017500000000440400000000000014260 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: oslo.db Version: 16.0.0 Summary: Oslo Database library Home-page: https://docs.openstack.org/oslo.db/latest Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/oslo.db.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on =============================================== oslo.db -- OpenStack Database Pattern Library =============================================== .. image:: https://img.shields.io/pypi/v/oslo.db.svg :target: https://pypi.org/project/oslo.db/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.db.svg :target: https://pypi.org/project/oslo.db/ :alt: Downloads The oslo db (database) handling library, provides database connectivity to different database backends and various other helper utils. * Free software: Apache license * Documentation: https://docs.openstack.org/oslo.db/latest * Source: https://opendev.org/openstack/oslo.db * Bugs: https://bugs.launchpad.net/oslo.db * Release notes: https://docs.openstack.org/releasenotes/oslo.db/ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: Implementation :: CPython Requires-Python: >=3.8 Provides-Extra: mysql Provides-Extra: postgresql Provides-Extra: test ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/README.rst0000664000175000017500000000201000000000000014641 0ustar00zuulzuul00000000000000======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/oslo.db.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on =============================================== oslo.db -- OpenStack Database Pattern Library =============================================== .. image:: https://img.shields.io/pypi/v/oslo.db.svg :target: https://pypi.org/project/oslo.db/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.db.svg :target: https://pypi.org/project/oslo.db/ :alt: Downloads The oslo db (database) handling library, provides database connectivity to different database backends and various other helper utils. * Free software: Apache license * Documentation: https://docs.openstack.org/oslo.db/latest * Source: https://opendev.org/openstack/oslo.db * Bugs: https://bugs.launchpad.net/oslo.db * Release notes: https://docs.openstack.org/releasenotes/oslo.db/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/bindep.txt0000664000175000017500000000100100000000000015153 0ustar00zuulzuul00000000000000# This is a cross-platform list tracking distribution packages needed for install and tests; # see https://docs.openstack.org/infra/bindep/ for additional information. mariadb [platform:rpm] mariadb-server [platform:redhat platform:debian] mariadb-devel [platform:redhat] libmariadb-dev-compat [platform:debian] libmysqlclient-dev [platform:ubuntu] mysql-client [platform:dpkg !platform:debian] mysql-server [platform:dpkg !platform:debian] postgresql postgresql-client [platform:dpkg] libpq-dev [platform:dpkg] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5245967 oslo.db-16.0.0/doc/0000775000175000017500000000000000000000000013726 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/doc/requirements.txt0000664000175000017500000000022000000000000017204 0ustar00zuulzuul00000000000000openstackdocstheme>=2.2.0 # Apache-2.0 sphinx>=2.0.0 # BSD doc8>=0.6.0 # Apache-2.0 reno>=3.1.0 # Apache-2.0 sphinxcontrib-apidoc>=0.2.0 # BSD ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5245967 oslo.db-16.0.0/doc/source/0000775000175000017500000000000000000000000015226 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/doc/source/conf.py0000664000175000017500000000410500000000000016525 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright (C) 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinxcontrib.apidoc', 'oslo_config.sphinxext', 'openstackdocstheme', 'stevedore.sphinxext' ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/oslo.db' openstackdocs_bug_project = 'oslo.db' openstackdocs_bug_tag = '' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2014, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['oslo_db.'] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'openstackdocs' # -- sphinxcontrib.apidoc configuration -------------------------------------- apidoc_module_dir = '../../oslo_db' apidoc_output_dir = 'reference/api' apidoc_excluded_paths = [ 'tests', ] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5245967 oslo.db-16.0.0/doc/source/contributor/0000775000175000017500000000000000000000000017600 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/doc/source/contributor/index.rst0000664000175000017500000000004700000000000021442 0ustar00zuulzuul00000000000000.. include:: ../../../CONTRIBUTING.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/doc/source/index.rst0000664000175000017500000000112200000000000017063 0ustar00zuulzuul00000000000000=============================================== oslo.db -- OpenStack Database Pattern Library =============================================== The oslo.db (database) handling library, provides database connectivity to different database backends and various other helper utils. .. toctree:: :maxdepth: 2 install/index contributor/index user/index reference/index Release Notes ============= Read also the `oslo.db Release Notes `_. Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5245967 oslo.db-16.0.0/doc/source/install/0000775000175000017500000000000000000000000016674 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/doc/source/install/index.rst0000664000175000017500000000122600000000000020536 0ustar00zuulzuul00000000000000============ Installation ============ At the command line:: $ pip install oslo.db You will also need to install at least one SQL backend:: $ pip install PyMySQL Or:: $ pip install psycopg2 Or:: $ pip install pysqlite Using with PostgreSQL --------------------- If you are using PostgreSQL make sure to install the PostgreSQL client development package for your distro. On Ubuntu this is done as follows:: $ sudo apt-get install libpq-dev $ pip install psycopg2 The installation of psycopg2 will fail if libpq-dev is not installed first. Note that even in a virtual environment the libpq-dev will be installed system wide. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5245967 oslo.db-16.0.0/doc/source/reference/0000775000175000017500000000000000000000000017164 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/doc/source/reference/index.rst0000664000175000017500000000021000000000000021016 0ustar00zuulzuul00000000000000.. _using: ========= Reference ========= .. toctree:: :maxdepth: 2 opts API === .. toctree:: :maxdepth: 1 api/modules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/doc/source/reference/opts.rst0000664000175000017500000000036400000000000020706 0ustar00zuulzuul00000000000000===================== Configuration Options ===================== oslo.db uses oslo.config to define and manage configuration options to allow the deployer to control how an application uses the underlying database. .. show-options:: oslo.db ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5245967 oslo.db-16.0.0/doc/source/user/0000775000175000017500000000000000000000000016204 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/doc/source/user/history.rst0000664000175000017500000000004000000000000020431 0ustar00zuulzuul00000000000000.. include:: ../../../ChangeLog ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/doc/source/user/index.rst0000664000175000017500000000014000000000000020040 0ustar00zuulzuul00000000000000============== Using oslo.db ============== .. toctree:: :maxdepth: 2 usage history ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/doc/source/user/usage.rst0000664000175000017500000001202600000000000020043 0ustar00zuulzuul00000000000000======= Usage ======= To use oslo.db in a project: Session Handling ================ Session handling is achieved using the :mod:`oslo_db.sqlalchemy.enginefacade` system. This module presents a function decorator as well as a context manager approach to delivering :class:`.session.Session` as well as :class:`.Connection` objects to a function or block. Both calling styles require the use of a context object. This object may be of any class, though when used with the decorator form, requires special instrumentation. The context manager form is as follows: .. code:: python from oslo_db.sqlalchemy import enginefacade class MyContext(object): "User-defined context class." def some_reader_api_function(context): with enginefacade.reader.using(context) as session: return session.query(SomeClass).all() def some_writer_api_function(context, x, y): with enginefacade.writer.using(context) as session: session.add(SomeClass(x, y)) def run_some_database_calls(): context = MyContext() results = some_reader_api_function(context) some_writer_api_function(context, 5, 10) The decorator form accesses attributes off the user-defined context directly; the context must be decorated with the :func:`oslo_db.sqlalchemy.enginefacade.transaction_context_provider` decorator. Each function must receive the context argument: .. code:: python from oslo_db.sqlalchemy import enginefacade @enginefacade.transaction_context_provider class MyContext(object): "User-defined context class." @enginefacade.reader def some_reader_api_function(context): return context.session.query(SomeClass).all() @enginefacade.writer def some_writer_api_function(context, x, y): context.session.add(SomeClass(x, y)) def run_some_database_calls(): context = MyContext() results = some_reader_api_function(context) some_writer_api_function(context, 5, 10) ``connection`` modifier can be used when a :class:`.session.Session` object is not needed, e.g. when `SQLAlchemy Core `_ is preferred: .. code:: python @enginefacade.reader.connection def _refresh_from_db(context, cache): sel = sa.select(table.c.id, table.c.name) res = context.connection.execute(sel).fetchall() cache.id_cache = {r[1]: r[0] for r in res} cache.str_cache = {r[0]: r[1] for r in res} .. note:: The ``context.session`` and ``context.connection`` attributes must be accessed within the scope of an appropriate writer/reader block (either the decorator or contextmanager approach). An AttributeError is raised otherwise. The decorator form can also be used with class and instance methods which implicitly receive the first positional argument: .. code:: python class DatabaseAccessLayer(object): @classmethod @enginefacade.reader def some_reader_api_function(cls, context): return context.session.query(SomeClass).all() @enginefacade.writer def some_writer_api_function(self, context, x, y): context.session.add(SomeClass(x, y)) .. note:: Note that enginefacade decorators must be applied **before** `classmethod`, otherwise you will get a ``TypeError`` at import time (as enginefacade will try to use ``inspect.getargspec()`` on a descriptor, not on a bound method, please refer to the `Data Model `_ section of the Python Language Reference for details). The scope of transaction and connectivity for both approaches is managed transparently. The configuration for the connection comes from the standard :obj:`oslo_config.cfg.CONF` collection. Additional configurations can be established for the enginefacade using the :func:`oslo_db.sqlalchemy.enginefacade.configure` function, before any use of the database begins: .. code:: python from oslo_db.sqlalchemy import enginefacade enginefacade.configure( sqlite_fk=True, max_retries=5, mysql_sql_mode='ANSI' ) Base class for models usage =========================== .. code:: python from oslo_db.sqlalchemy import models class ProjectSomething(models.TimestampMixin, models.ModelBase): id = Column(Integer, primary_key=True) ... DB API backend support ====================== .. code:: python from oslo_config import cfg from oslo_db import api as db_api _BACKEND_MAPPING = {'sqlalchemy': 'project.db.sqlalchemy.api'} IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING) def get_engine(): return IMPL.get_engine() def get_session(): return IMPL.get_session() # DB-API method def do_something(somethind_id): return IMPL.do_something(somethind_id) DB migration extensions ======================= Available extensions for `oslo_db.migration`. .. list-plugins:: oslo_db.sqlalchemy.migration :detailed: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5285985 oslo.db-16.0.0/oslo.db.egg-info/0000775000175000017500000000000000000000000016213 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086402.0 oslo.db-16.0.0/oslo.db.egg-info/PKG-INFO0000664000175000017500000000440400000000000017312 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: oslo.db Version: 16.0.0 Summary: Oslo Database library Home-page: https://docs.openstack.org/oslo.db/latest Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/oslo.db.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on =============================================== oslo.db -- OpenStack Database Pattern Library =============================================== .. image:: https://img.shields.io/pypi/v/oslo.db.svg :target: https://pypi.org/project/oslo.db/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.db.svg :target: https://pypi.org/project/oslo.db/ :alt: Downloads The oslo db (database) handling library, provides database connectivity to different database backends and various other helper utils. * Free software: Apache license * Documentation: https://docs.openstack.org/oslo.db/latest * Source: https://opendev.org/openstack/oslo.db * Bugs: https://bugs.launchpad.net/oslo.db * Release notes: https://docs.openstack.org/releasenotes/oslo.db/ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: Implementation :: CPython Requires-Python: >=3.8 Provides-Extra: mysql Provides-Extra: postgresql Provides-Extra: test ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086402.0 oslo.db-16.0.0/oslo.db.egg-info/SOURCES.txt0000664000175000017500000001271600000000000020106 0ustar00zuulzuul00000000000000.coveragerc .mailmap .pre-commit-config.yaml .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst bindep.txt requirements.txt setup.cfg setup.py test-requirements.txt tox.ini doc/requirements.txt doc/source/conf.py doc/source/index.rst doc/source/contributor/index.rst doc/source/install/index.rst doc/source/reference/index.rst doc/source/reference/opts.rst doc/source/user/history.rst doc/source/user/index.rst doc/source/user/usage.rst oslo.db.egg-info/PKG-INFO oslo.db.egg-info/SOURCES.txt oslo.db.egg-info/dependency_links.txt oslo.db.egg-info/entry_points.txt oslo.db.egg-info/not-zip-safe oslo.db.egg-info/pbr.json oslo.db.egg-info/requires.txt oslo.db.egg-info/top_level.txt oslo_db/__init__.py oslo_db/_i18n.py oslo_db/api.py oslo_db/exception.py oslo_db/options.py oslo_db/warning.py oslo_db/locale/en_GB/LC_MESSAGES/oslo_db.po oslo_db/locale/es/LC_MESSAGES/oslo_db.po oslo_db/locale/fr/LC_MESSAGES/oslo_db.po oslo_db/sqlalchemy/__init__.py oslo_db/sqlalchemy/enginefacade.py oslo_db/sqlalchemy/engines.py oslo_db/sqlalchemy/exc_filters.py oslo_db/sqlalchemy/models.py oslo_db/sqlalchemy/orm.py oslo_db/sqlalchemy/provision.py oslo_db/sqlalchemy/session.py oslo_db/sqlalchemy/test_base.py oslo_db/sqlalchemy/test_fixtures.py oslo_db/sqlalchemy/test_migrations.py oslo_db/sqlalchemy/types.py oslo_db/sqlalchemy/update_match.py oslo_db/sqlalchemy/utils.py oslo_db/sqlalchemy/compat/__init__.py oslo_db/sqlalchemy/migration_cli/README.rst oslo_db/sqlalchemy/migration_cli/__init__.py oslo_db/sqlalchemy/migration_cli/ext_alembic.py oslo_db/sqlalchemy/migration_cli/ext_base.py oslo_db/sqlalchemy/migration_cli/manager.py oslo_db/tests/__init__.py oslo_db/tests/base.py oslo_db/tests/fixtures.py oslo_db/tests/test_api.py oslo_db/tests/utils.py oslo_db/tests/sqlalchemy/__init__.py oslo_db/tests/sqlalchemy/base.py oslo_db/tests/sqlalchemy/test_async_eventlet.py oslo_db/tests/sqlalchemy/test_enginefacade.py oslo_db/tests/sqlalchemy/test_exc_filters.py oslo_db/tests/sqlalchemy/test_fixtures.py oslo_db/tests/sqlalchemy/test_migrate_cli.py oslo_db/tests/sqlalchemy/test_migrations.py oslo_db/tests/sqlalchemy/test_models.py oslo_db/tests/sqlalchemy/test_options.py oslo_db/tests/sqlalchemy/test_provision.py oslo_db/tests/sqlalchemy/test_sqlalchemy.py oslo_db/tests/sqlalchemy/test_types.py oslo_db/tests/sqlalchemy/test_update_match.py oslo_db/tests/sqlalchemy/test_utils.py releasenotes/notes/MySQL-python-no-longer-tested-2a6c32cce6b03215.yaml releasenotes/notes/add-reno-e5c2f63e73c25959.yaml releasenotes/notes/add_connection_parameters-231aa7d8b7d2d416.yaml releasenotes/notes/add_facade_started-14f9bc34fac89371.yaml releasenotes/notes/add_wsrep_sync_wait-e3c5a9f4bc08b203.yaml releasenotes/notes/connection_debug_min_max-bf6d53d49be7ca52.yaml releasenotes/notes/deprecate-TpoolDbapiWrapper-2ce78aa7cbb9e585.yaml releasenotes/notes/deprecate-insert-from-select-ea831381ebd7e7cf.yaml releasenotes/notes/deprecate-mysql-ndb-cluster-support-cdcaa177b6a6773c.yaml releasenotes/notes/deprecate-sqlalchemy-migrate-6f899935615d6984.yaml releasenotes/notes/deprecate_config_sqlite_db-bd41d49343049319.yaml releasenotes/notes/deprecate_idle_timeout-029d9f2cb7184b28.yaml releasenotes/notes/drop-db2-support-6e70fe42268d2238.yaml releasenotes/notes/drop-python27-support-2308d7fbcd66cc22.yaml releasenotes/notes/enginefacade_decorators-4660862fe22d2669.yaml releasenotes/notes/fix-mysql-duplicate-key-error-information-update-548888bc44b8dbd7.yaml releasenotes/notes/fix_mysql_wsrsp-0ef98dec5ea3759f.yaml releasenotes/notes/fix_synchronous_reader-ca442ca9f07470ec.yaml releasenotes/notes/increase-default-max-overflow-0af787268807f926.yaml releasenotes/notes/new-db-fixtures-58223e3926122413.yaml releasenotes/notes/remove-ModelsMigrationsSync-check_foreign_keys-467e0dbeb65a8c86.yaml releasenotes/notes/remove-NotCommitting-utils-fed6df0e2f85edfa.yaml releasenotes/notes/remove-base-test-classes-557889ec4f072781.yaml releasenotes/notes/remove-config-option-sqlite_db-7b7c6459135fd8c9.yaml releasenotes/notes/remove-deprecated-opts-1d095911e82fee3b.yaml releasenotes/notes/remove-mysql-ndb-cluster-support-fdb19029595070fa.yaml releasenotes/notes/remove-sqlalchemy-migrate-f69c805004e6bac1.yaml releasenotes/notes/remove-use_tpool-29a8bf9fc68a9bb2.yaml releasenotes/notes/removed-deprecated-idle-timeout-051a6a9a792bd8de.yaml releasenotes/notes/removed-deprecated-min-pool-size-1f351d79fe232129.yaml releasenotes/notes/removed-deprecated-sql-max-pool-size-c9b7bfc14c3b6b14.yaml releasenotes/notes/sqlalchemy-20-0a193a01c70f805a.yaml releasenotes/notes/warn-incomplete-url-c44cd03baf630c7c.yaml releasenotes/notes/wrap_db_retry-34c7ff2d82afa3f5.yaml releasenotes/source/2023.1.rst releasenotes/source/2023.2.rst releasenotes/source/2024.1.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/yoga.rst releasenotes/source/zed.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po tools/run-pifpaf-tests.sh tools/test-setup.sh././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086402.0 oslo.db-16.0.0/oslo.db.egg-info/dependency_links.txt0000664000175000017500000000000100000000000022261 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086402.0 oslo.db-16.0.0/oslo.db.egg-info/entry_points.txt0000664000175000017500000000022500000000000021510 0ustar00zuulzuul00000000000000[oslo.config.opts] oslo.db = oslo_db.options:list_opts [oslo.db.migration] alembic = oslo_db.sqlalchemy.migration_cli.ext_alembic:AlembicExtension ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086402.0 oslo.db-16.0.0/oslo.db.egg-info/not-zip-safe0000664000175000017500000000000100000000000020441 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086402.0 oslo.db-16.0.0/oslo.db.egg-info/pbr.json0000664000175000017500000000005600000000000017672 0ustar00zuulzuul00000000000000{"git_version": "a59dba4", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086402.0 oslo.db-16.0.0/oslo.db.egg-info/requires.txt0000664000175000017500000000074500000000000020621 0ustar00zuulzuul00000000000000SQLAlchemy>=1.4.0 alembic>=0.9.6 debtcollector>=1.2.0 oslo.config>=5.2.0 oslo.i18n>=3.15.3 oslo.utils>=3.33.0 pbr>=2.0.0 stevedore>=1.20.0 testresources>=2.0.0 testscenarios>=0.4 [mysql] PyMySQL>=0.7.6 [postgresql] psycopg2>=2.8.0 [test] PyMySQL>=0.7.6 bandit<1.8.0,>=1.7.0 coverage>=4.0 eventlet>=0.18.2 fixtures>=3.0.0 hacking<6.2.0,>=6.1.0 oslo.context>=2.19.2 oslotest>=3.2.0 pifpaf>=0.10.0 pre-commit>=2.6.0 psycopg2>=2.8.0 python-subunit>=1.0.0 stestr>=2.0.0 testtools>=2.2.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086402.0 oslo.db-16.0.0/oslo.db.egg-info/top_level.txt0000664000175000017500000000001000000000000020734 0ustar00zuulzuul00000000000000oslo_db ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5285985 oslo.db-16.0.0/oslo_db/0000775000175000017500000000000000000000000014602 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/__init__.py0000664000175000017500000000000000000000000016701 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/_i18n.py0000664000175000017500000000146700000000000016102 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/index.html . """ import oslo_i18n _translators = oslo_i18n.TranslatorFactory(domain='oslo_db') # The primary translation function using the well-known name "_" _ = _translators.primary ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/api.py0000664000175000017500000002670300000000000015735 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ ================================= Multiple DB API backend support. ================================= A DB backend module should implement a method named 'get_backend' which takes no arguments. The method can return any object that implements DB API methods. """ import functools import logging import random import threading import time from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import reflection from oslo_db import exception from oslo_db import options LOG = logging.getLogger(__name__) def safe_for_db_retry(f): """Indicate api method as safe for re-connection to database. Database connection retries will be enabled for the decorated api method. Database connection failure can have many causes, which can be temporary. In such cases retry may increase the likelihood of connection. Usage:: @safe_for_db_retry def api_method(self): self.engine.connect() :param f: database api method. :type f: function. """ f.__dict__['enable_retry_on_disconnect'] = True return f def retry_on_deadlock(f): """Retry a DB API call if Deadlock was received. wrap_db_entry will be applied to all db.api functions marked with this decorator. """ f.__dict__['enable_retry_on_deadlock'] = True return f def retry_on_request(f): """Retry a DB API call if RetryRequest exception was received. wrap_db_entry will be applied to all db.api functions marked with this decorator. """ f.__dict__['enable_retry_on_request'] = True return f class wrap_db_retry(object): """Retry db.api methods, if db_error raised Retry decorated db.api methods. This decorator catches db_error and retries function in a loop until it succeeds, or until maximum retries count will be reached. Keyword arguments: :param retry_interval: seconds between transaction retries :type retry_interval: int or float :param max_retries: max number of retries before an error is raised :type max_retries: int :param inc_retry_interval: determine increase retry interval or not :type inc_retry_interval: bool :param max_retry_interval: max interval value between retries :type max_retry_interval: int or float :param exception_checker: checks if an exception should trigger a retry :type exception_checker: callable :param jitter: determine increase retry interval use jitter or not, jitter is always interpreted as True for a DBDeadlockError :type jitter: bool """ def __init__(self, retry_interval=1, max_retries=20, inc_retry_interval=True, max_retry_interval=10, retry_on_disconnect=False, retry_on_deadlock=False, exception_checker=lambda exc: False, jitter=False): super(wrap_db_retry, self).__init__() self.jitter = jitter self.db_error = (exception.RetryRequest, ) # default is that we re-raise anything unexpected self.exception_checker = exception_checker if retry_on_disconnect: self.db_error += (exception.DBConnectionError, ) if retry_on_deadlock: self.db_error += (exception.DBDeadlock, ) self.retry_interval = retry_interval self.max_retries = max_retries self.inc_retry_interval = inc_retry_interval self.max_retry_interval = max_retry_interval def __call__(self, f): @functools.wraps(f) def wrapper(*args, **kwargs): sleep_time = next_interval = self.retry_interval remaining = self.max_retries while True: try: return f(*args, **kwargs) except Exception as e: with excutils.save_and_reraise_exception() as ectxt: expected = self._is_exception_expected(e) if remaining > 0: ectxt.reraise = not expected else: if expected: LOG.exception('DB exceeded retry limit.') # if it's a RetryRequest, we need to unpack it if isinstance(e, exception.RetryRequest): ectxt.type_ = type(e.inner_exc) ectxt.value = e.inner_exc LOG.debug("Performing DB retry for function %s", reflection.get_callable_name(f)) # NOTE(vsergeyev): We are using patched time module, so # this effectively yields the execution # context to another green thread. time.sleep(sleep_time) if self.inc_retry_interval: # NOTE(jiangyikun): In order to minimize the chance of # regenerating a deadlock and reduce the average sleep # time, we are using jitter by default when the # deadlock is detected. With the jitter, # sleep_time = [0, next_interval), otherwise, without # the jitter, sleep_time = next_interval. if isinstance(e, exception.DBDeadlock): jitter = True else: jitter = self.jitter sleep_time, next_interval = self._get_inc_interval( next_interval, jitter) remaining -= 1 return wrapper def _is_exception_expected(self, exc): if isinstance(exc, self.db_error): # RetryRequest is application-initated exception # and not an error condition in case retries are # not exceeded if not isinstance(exc, exception.RetryRequest): LOG.debug('DB error: %s', exc) return True return self.exception_checker(exc) def _get_inc_interval(self, n, jitter): # NOTE(jiangyikun): The "n" help us to record the 2 ** retry_times. # The "sleep_time" means the real time to sleep: # - Without jitter: sleep_time = 2 ** retry_times = n # - With jitter: sleep_time = [0, 2 ** retry_times) < n n = n * 2 if jitter: sleep_time = random.uniform(0, n) else: sleep_time = n return min(sleep_time, self.max_retry_interval), n class DBAPI(object): """Initialize the chosen DB API backend. After initialization API methods is available as normal attributes of ``DBAPI`` subclass. Database API methods are supposed to be called as DBAPI instance methods. :param backend_name: name of the backend to load :type backend_name: str :param backend_mapping: backend name -> module/class to load mapping :type backend_mapping: dict :default backend_mapping: None :param lazy: load the DB backend lazily on the first DB API method call :type lazy: bool :default lazy: False :keyword use_db_reconnect: retry DB transactions on disconnect or not :type use_db_reconnect: bool :keyword retry_interval: seconds between transaction retries :type retry_interval: int :keyword inc_retry_interval: increase retry interval or not :type inc_retry_interval: bool :keyword max_retry_interval: max interval value between retries :type max_retry_interval: int :keyword max_retries: max number of retries before an error is raised :type max_retries: int """ def __init__(self, backend_name, backend_mapping=None, lazy=False, **kwargs): self._backend = None self._backend_name = backend_name self._backend_mapping = backend_mapping or {} self._lock = threading.Lock() if not lazy: self._load_backend() self.use_db_reconnect = kwargs.get('use_db_reconnect', False) self._wrap_db_kwargs = {k: v for k, v in kwargs.items() if k in ('retry_interval', 'inc_retry_interval', 'max_retry_interval', 'max_retries')} def _load_backend(self): with self._lock: if not self._backend: # Import the untranslated name if we don't have a mapping backend_path = self._backend_mapping.get(self._backend_name, self._backend_name) LOG.debug('Loading backend %(name)r from %(path)r', {'name': self._backend_name, 'path': backend_path}) backend_mod = importutils.import_module(backend_path) self._backend = backend_mod.get_backend() def __getattr__(self, key): if not self._backend: self._load_backend() attr = getattr(self._backend, key) if not hasattr(attr, '__call__'): return attr # NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry # DB API methods, decorated with @safe_for_db_retry # on disconnect. retry_on_disconnect = self.use_db_reconnect and attr.__dict__.get( 'enable_retry_on_disconnect', False) retry_on_deadlock = attr.__dict__.get('enable_retry_on_deadlock', False) retry_on_request = attr.__dict__.get('enable_retry_on_request', False) if retry_on_disconnect or retry_on_deadlock or retry_on_request: attr = wrap_db_retry( retry_on_disconnect=retry_on_disconnect, retry_on_deadlock=retry_on_deadlock, **self._wrap_db_kwargs)(attr) return attr @classmethod def from_config(cls, conf, backend_mapping=None, lazy=False): """Initialize DBAPI instance given a config instance. :param conf: oslo.config config instance :type conf: oslo.config.cfg.ConfigOpts :param backend_mapping: backend name -> module/class to load mapping :type backend_mapping: dict :param lazy: load the DB backend lazily on the first DB API method call :type lazy: bool """ conf.register_opts(options.database_opts, 'database') return cls(backend_name=conf.database.backend, backend_mapping=backend_mapping, lazy=lazy, use_db_reconnect=conf.database.use_db_reconnect, retry_interval=conf.database.db_retry_interval, inc_retry_interval=conf.database.db_inc_retry_interval, max_retry_interval=conf.database.db_max_retry_interval, max_retries=conf.database.db_max_retries) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/exception.py0000664000175000017500000002123100000000000017151 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """DB related custom exceptions. Custom exceptions intended to determine the causes of specific database errors. This module provides more generic exceptions than the database-specific driver libraries, and so users of oslo.db can catch these no matter which database the application is using. Most of the exceptions are wrappers. Wrapper exceptions take an original exception as positional argument and keep it for purposes of deeper debug. Example:: try: statement(arg) except sqlalchemy.exc.OperationalError as e: raise DBDuplicateEntry(e) This is useful to determine more specific error cases further at execution, when you need to add some extra information to an error message. Wrapper exceptions takes care about original error message displaying to not to loose low level cause of an error. All the database api exceptions wrapped into the specific exceptions provided belove. Please use only database related custom exceptions with database manipulations with `try/except` statement. This is required for consistent handling of database errors. """ from oslo_utils.excutils import CausedByException from oslo_db._i18n import _ class DBError(CausedByException): """Base exception for all custom database exceptions. :kwarg inner_exception: an original exception which was wrapped with DBError or its subclasses. """ def __init__(self, inner_exception=None, cause=None): self.inner_exception = inner_exception super(DBError, self).__init__(str(inner_exception), cause) class DBDuplicateEntry(DBError): """Duplicate entry at unique column error. Raised when made an attempt to write to a unique column the same entry as existing one. :attr: `columns` available on an instance of the exception and could be used at error handling:: try: instance_type_ref.save() except DBDuplicateEntry as e: if 'colname' in e.columns: # Handle error. :kwarg columns: a list of unique columns have been attempted to write a duplicate entry. :type columns: list :kwarg value: a value which has been attempted to write. The value will be None, if we can't extract it for a particular database backend. Only MySQL and PostgreSQL 9.x are supported right now. """ def __init__(self, columns=None, inner_exception=None, value=None): self.columns = columns or [] self.value = value super(DBDuplicateEntry, self).__init__(inner_exception) class DBConstraintError(DBError): """Check constraint fails for column error. Raised when made an attempt to write to a column a value that does not satisfy a CHECK constraint. :kwarg table: the table name for which the check fails :type table: str :kwarg check_name: the table of the check that failed to be satisfied :type check_name: str """ def __init__(self, table, check_name, inner_exception=None): self.table = table self.check_name = check_name super(DBConstraintError, self).__init__(inner_exception) class DBReferenceError(DBError): """Foreign key violation error. :param table: a table name in which the reference is directed. :type table: str :param constraint: a problematic constraint name. :type constraint: str :param key: a broken reference key name. :type key: str :param key_table: a table name which contains the key. :type key_table: str """ def __init__(self, table, constraint, key, key_table, inner_exception=None): self.table = table self.constraint = constraint self.key = key self.key_table = key_table super(DBReferenceError, self).__init__(inner_exception) class DBNonExistentConstraint(DBError): """Constraint does not exist. :param table: table name :type table: str :param constraint: constraint name :type table: str """ def __init__(self, table, constraint, inner_exception=None): self.table = table self.constraint = constraint super(DBNonExistentConstraint, self).__init__(inner_exception) class DBNonExistentTable(DBError): """Table does not exist. :param table: table name :type table: str """ def __init__(self, table, inner_exception=None): self.table = table super(DBNonExistentTable, self).__init__(inner_exception) class DBNonExistentDatabase(DBError): """Database does not exist. :param database: database name :type database: str """ def __init__(self, database, inner_exception=None): self.database = database super(DBNonExistentDatabase, self).__init__(inner_exception) class DBDeadlock(DBError): """Database dead lock error. Deadlock is a situation that occurs when two or more different database sessions have some data locked, and each database session requests a lock on the data that another, different, session has already locked. """ def __init__(self, inner_exception=None): super(DBDeadlock, self).__init__(inner_exception) class DBInvalidUnicodeParameter(Exception): """Database unicode error. Raised when unicode parameter is passed to a database without encoding directive. """ def __init__(self): super(DBInvalidUnicodeParameter, self).__init__( _("Invalid Parameter: Encoding directive wasn't provided.")) class DBMigrationError(DBError): """Wrapped migration specific exception. Raised when migrations couldn't be completed successfully. """ def __init__(self, message): super(DBMigrationError, self).__init__(message) class DBConnectionError(DBError): """Wrapped connection specific exception. Raised when database connection is failed. """ pass class DBDataError(DBError): """Raised for errors that are due to problems with the processed data. E.g. division by zero, numeric value out of range, incorrect data type, etc """ class DBNotSupportedError(DBError): """Raised when a database backend has raised sqla.exc.NotSupportedError""" class InvalidSortKey(Exception): """A sort key destined for database query usage is invalid.""" def __init__(self, key=None): super(InvalidSortKey, self).__init__( _("Sort key supplied is invalid: %s") % key) self.key = key class ColumnError(Exception): """Error raised when no column or an invalid column is found.""" class BackendNotAvailable(Exception): """Error raised when a particular database backend is not available within a test suite. """ class RetryRequest(Exception): """Error raised when DB operation needs to be retried. That could be intentionally raised by the code without any real DB errors. """ def __init__(self, inner_exc): self.inner_exc = inner_exc class NoEngineContextEstablished(AttributeError): """Error raised for enginefacade attribute access with no context. This applies to the ``session`` and ``connection`` attributes of a user-defined context and/or RequestContext object, when they are accessed *outside* of the scope of an enginefacade decorator or context manager. The exception is a subclass of AttributeError so that normal Python missing attribute behaviors are maintained, such as support for ``getattr(context, 'session', None)``. """ class ContextNotRequestedError(AttributeError): """Error raised when requesting a not-setup enginefacade attribute. This applies to the ``session`` and ``connection`` attributes of a user-defined context and/or RequestContext object, when they are accessed *within* the scope of an enginefacade decorator or context manager, but the context has not requested that attribute (e.g. like "with enginefacade.connection.using(context)" and "context.session" is requested). """ class CantStartEngineError(Exception): """Error raised when the enginefacade cannot start up correctly.""" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5205948 oslo.db-16.0.0/oslo_db/locale/0000775000175000017500000000000000000000000016041 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5205948 oslo.db-16.0.0/oslo_db/locale/en_GB/0000775000175000017500000000000000000000000017013 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5285985 oslo.db-16.0.0/oslo_db/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000020600 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/locale/en_GB/LC_MESSAGES/oslo_db.po0000664000175000017500000000310600000000000022561 0ustar00zuulzuul00000000000000# Translations template for oslo.db. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the oslo.db project. # # Translators: # Andi Chandler , 2014-2015 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: oslo.db VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2023-05-08 10:55+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-06-20 06:31+0000\n" "Last-Translator: Andreas Jaeger \n" "Language: en_GB\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: English (United Kingdom)\n" msgid "Invalid Parameter: Encoding directive wasn't provided." msgstr "Invalid Parameter: Encoding directive wasn't provided." #, python-format msgid "Sort key supplied is invalid: %s" msgstr "Sort key supplied is invalid: %s" #, python-format msgid "" "There is no `deleted` column in `%s` table. Project doesn't use soft-deleted " "feature." msgstr "" "There is no `deleted` column in `%s` table. Project doesn't use soft-deleted " "feature." #, python-format msgid "There is no `project_id` column in `%s` table." msgstr "There is no `project_id` column in `%s` table." #, python-format msgid "Unknown sort direction, must be one of: %s" msgstr "Unknown sort direction, must be one of: %s" msgid "model should be a subclass of ModelBase" msgstr "model should be a subclass of ModelBase" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5205948 oslo.db-16.0.0/oslo_db/locale/es/0000775000175000017500000000000000000000000016450 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5285985 oslo.db-16.0.0/oslo_db/locale/es/LC_MESSAGES/0000775000175000017500000000000000000000000020235 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/locale/es/LC_MESSAGES/oslo_db.po0000664000175000017500000000311700000000000022220 0ustar00zuulzuul00000000000000# Translations template for oslo.db. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the oslo.db project. # # Translators: # Adriana Chisco Landazábal , 2015 # Miriam Godinez , 2015 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: oslo.db VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2023-05-08 10:55+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:16+0000\n" "Last-Translator: Copied by Zanata \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Spanish\n" msgid "Invalid Parameter: Encoding directive wasn't provided." msgstr "Parámetro no válido: No se proporcionó directiva de codificación." #, python-format msgid "" "There is no `deleted` column in `%s` table. Project doesn't use soft-deleted " "feature." msgstr "" "No existe la columna `deleted` en la tabla `%s`. El projecto no utiliza la " "característica de eliminación suave." #, python-format msgid "There is no `project_id` column in `%s` table." msgstr "No existe la columna `project_id` en la tabla `%s`." #, python-format msgid "Unknown sort direction, must be one of: %s" msgstr "Clase de dirección desconocida, debe ser una de: %s" msgid "model should be a subclass of ModelBase" msgstr "el modelo debe ser una subclase del ModelBase" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5205948 oslo.db-16.0.0/oslo_db/locale/fr/0000775000175000017500000000000000000000000016450 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5285985 oslo.db-16.0.0/oslo_db/locale/fr/LC_MESSAGES/0000775000175000017500000000000000000000000020235 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/locale/fr/LC_MESSAGES/oslo_db.po0000664000175000017500000000310100000000000022211 0ustar00zuulzuul00000000000000# Translations template for oslo.db. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the oslo.db project. # # Translators: # Lucas Mascaro , 2015 # Maxime COQUEREL , 2014-2015 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: oslo.db VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2023-05-08 10:55+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:16+0000\n" "Last-Translator: Copied by Zanata \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: French\n" msgid "Invalid Parameter: Encoding directive wasn't provided." msgstr "Paramètre non valide : La directive encodage n'a pas été fourni." #, python-format msgid "" "There is no `deleted` column in `%s` table. Project doesn't use soft-deleted " "feature." msgstr "" "Il n'y a aucune colonne `deleted` dans la table `%s`. Le projet ne peut pas " "utiliser cette fonctionnalité." #, python-format msgid "There is no `project_id` column in `%s` table." msgstr "Il n'y a pas de colonne `project_id` dans la table `%s`." #, python-format msgid "Unknown sort direction, must be one of: %s" msgstr "Ordre de tris inconnu, il doit être un de: %s" msgid "model should be a subclass of ModelBase" msgstr "model doit etre une sous-classe de ModelBase" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/options.py0000664000175000017500000001776600000000000016670 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg database_opts = [ cfg.BoolOpt( 'sqlite_synchronous', default=True, help='If True, SQLite uses synchronous mode.', ), cfg.StrOpt( 'backend', default='sqlalchemy', help='The back end to use for the database.', ), cfg.StrOpt( 'connection', help=( 'The SQLAlchemy connection string to use to connect to ' 'the database.' ), secret=True, ), cfg.StrOpt( 'slave_connection', secret=True, help=( 'The SQLAlchemy connection string to use to connect to the ' 'slave database.' ), ), cfg.StrOpt( 'mysql_sql_mode', default='TRADITIONAL', help=( 'The SQL mode to be used for MySQL sessions. ' 'This option, including the default, overrides any ' 'server-set SQL mode. To use whatever SQL mode ' 'is set by the server configuration, ' 'set this to no value. Example: mysql_sql_mode=' ), ), cfg.IntOpt( 'mysql_wsrep_sync_wait', default=None, help=( 'For Galera only, configure wsrep_sync_wait causality ' 'checks on new connections. Default is None, meaning don\'t ' 'configure any setting.' ), ), cfg.IntOpt( 'connection_recycle_time', default=3600, help=( 'Connections which have been present in the connection ' 'pool longer than this number of seconds will be replaced ' 'with a new one the next time they are checked out from ' 'the pool.' ), ), cfg.IntOpt( 'max_pool_size', default=5, help=( 'Maximum number of SQL connections to keep open in a pool. ' 'Setting a value of 0 indicates no limit.' ), ), cfg.IntOpt( 'max_retries', default=10, help=( 'Maximum number of database connection retries during startup. ' 'Set to -1 to specify an infinite retry count.' ), ), cfg.IntOpt( 'retry_interval', default=10, help='Interval between retries of opening a SQL connection.', ), cfg.IntOpt( 'max_overflow', default=50, help='If set, use this value for max_overflow with SQLAlchemy.', ), cfg.IntOpt( 'connection_debug', default=0, min=0, max=100, help=( 'Verbosity of SQL debugging information: 0=None, ' '100=Everything.' ), ), cfg.BoolOpt( 'connection_trace', default=False, help='Add Python stack traces to SQL as comment strings.', ), cfg.IntOpt( 'pool_timeout', help='If set, use this value for pool_timeout with SQLAlchemy.', ), cfg.BoolOpt( 'use_db_reconnect', default=False, help=( 'Enable the experimental use of database reconnect ' 'on connection lost.' ), ), cfg.IntOpt( 'db_retry_interval', default=1, help='Seconds between retries of a database transaction.', ), cfg.BoolOpt( 'db_inc_retry_interval', default=True, help=( 'If True, increases the interval between retries ' 'of a database operation up to db_max_retry_interval.' ), ), cfg.IntOpt( 'db_max_retry_interval', default=10, help=( 'If db_inc_retry_interval is set, the ' 'maximum seconds between retries of a ' 'database operation.' ), ), cfg.IntOpt( 'db_max_retries', default=20, help=( 'Maximum retries in case of connection error or deadlock ' 'error before error is ' 'raised. Set to -1 to specify an infinite retry ' 'count.' ), ), cfg.StrOpt( 'connection_parameters', default='', help=( 'Optional URL parameters to append onto the connection ' 'URL at connect time; specify as ' 'param1=value1¶m2=value2&...' ), ), ] def set_defaults( conf, connection=None, max_pool_size=None, max_overflow=None, pool_timeout=None, ): """Set defaults for configuration variables. Overrides default options values. :param conf: Config instance specified to set default options in it. Using of instances instead of a global config object prevents conflicts between options declaration. :type conf: oslo.config.cfg.ConfigOpts instance. :keyword connection: SQL connection string. Valid SQLite URL forms are: * sqlite:///:memory: (or, sqlite://) * sqlite:///relative/path/to/file.db * sqlite:////absolute/path/to/file.db :type connection: str :keyword max_pool_size: maximum connections pool size. The size of the pool to be maintained, defaults to 5. This is the largest number of connections that will be kept persistently in the pool. Note that the pool begins with no connections; once this number of connections is requested, that number of connections will remain. :type max_pool_size: int :default max_pool_size: 5 :keyword max_overflow: The maximum overflow size of the pool. When the number of checked-out connections reaches the size set in pool_size, additional connections will be returned up to this limit. When those additional connections are returned to the pool, they are disconnected and discarded. It follows then that the total number of simultaneous connections the pool will allow is pool_size + max_overflow, and the total number of "sleeping" connections the pool will allow is pool_size. max_overflow can be set to -1 to indicate no overflow limit; no limit will be placed on the total number of concurrent connections. Defaults to 10, will be used if value of the parameter in `None`. :type max_overflow: int :default max_overflow: None :keyword pool_timeout: The number of seconds to wait before giving up on returning a connection. Defaults to 30, will be used if value of the parameter is `None`. :type pool_timeout: int :default pool_timeout: None """ conf.register_opts(database_opts, group='database') if connection is not None: conf.set_default('connection', connection, group='database') if max_pool_size is not None: conf.set_default('max_pool_size', max_pool_size, group='database') if max_overflow is not None: conf.set_default('max_overflow', max_overflow, group='database') if pool_timeout is not None: conf.set_default('pool_timeout', pool_timeout, group='database') def list_opts(): """Returns a list of oslo.config options available in the library. The returned list includes all oslo.config options which may be registered at runtime by the library. Each element of the list is a tuple. The first element is the name of the group under which the list of elements in the second element will be registered. A group name of None corresponds to the [DEFAULT] group in config files. The purpose of this is to allow tools like the Oslo sample config file generator to discover the options exposed to users by this library. :returns: a list of (group_name, opts) tuples """ return [('database', database_opts)] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5326004 oslo.db-16.0.0/oslo_db/sqlalchemy/0000775000175000017500000000000000000000000016744 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/__init__.py0000664000175000017500000000000000000000000021043 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5326004 oslo.db-16.0.0/oslo_db/sqlalchemy/compat/0000775000175000017500000000000000000000000020227 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/compat/__init__.py0000664000175000017500000000234000000000000022337 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import versionutils from sqlalchemy import __version__ _vers = versionutils.convert_version_to_tuple(__version__) sqla_2 = _vers >= (2, ) native_pre_ping_event_support = _vers >= (2, 0, 5) def dialect_from_exception_context(ctx): if sqla_2: # SQLAlchemy 2.0 still has context.engine, however if the # exception context is called in the context of a ping handler, # engine is not present. need to use dialect instead return ctx.dialect else: return ctx.engine.dialect def driver_connection(connection): if sqla_2: return connection.connection.driver_connection else: return connection.connection.connection ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/enginefacade.py0000664000175000017500000014070200000000000021713 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import functools import inspect import operator import threading import warnings import debtcollector.moves import debtcollector.removals import debtcollector.renames from oslo_config import cfg from oslo_utils import excutils from oslo_db import exception from oslo_db import options from oslo_db.sqlalchemy import engines from oslo_db.sqlalchemy import orm from oslo_db import warning class _symbol(object): """represent a fixed symbol.""" __slots__ = 'name', def __init__(self, name): self.name = name def __repr__(self): return "symbol(%r)" % self.name _ASYNC_READER = _symbol('ASYNC_READER') """Represent the transaction state of "async reader". This state indicates that the transaction is a read-only and is safe to use on an asynchronously updated slave database. """ _READER = _symbol('READER') """Represent the transaction state of "reader". This state indicates that the transaction is a read-only and is only safe to use on a synchronously updated slave database; otherwise the master database should be used. """ _WRITER = _symbol('WRITER') """Represent the transaction state of "writer". This state indicates that the transaction writes data and should be directed at the master database. """ class _Default: """Mark a value as a default value. A value in the local configuration dictionary wrapped with _Default() will not take precedence over a value that is specified in cfg.CONF. Values that are set after the fact using configure() will supersede those in cfg.CONF. """ __slots__ = 'value', _notset = _symbol("NOTSET") def __init__(self, value=_notset): self.value = value @classmethod def resolve(cls, value): if isinstance(value, _Default): v = value.value if v is cls._notset: return None else: return v else: return value @classmethod def resolve_w_conf(cls, value, conf, key): if isinstance(value, _Default): v = getattr(conf.database, key, value.value) if v is cls._notset: return None else: return v else: return value @classmethod def is_set(cls, value): if not isinstance(value, _Default): return True return value.value is not cls._notset @classmethod def is_set_w_conf(cls, value, conf, key): if hasattr(conf.database, key): # If the option is set via configuration then we should always # respect it...unless that option is deprecated and hasn't been set # by user, in which case we should ignore it. # oslo.config doesn't provide a public API to retrieve the opt # itself, as opposed to the value of the opt :( opt = conf.database._group._opts[key]['opt'] # ditto for the group group = conf.database._group if ( opt.deprecated_for_removal and conf.get_location(key, group=group.name).location == cfg.Locations.opt_default ): return False return True return cls.is_set(value) class AlreadyStartedError(TypeError): """Raises when a factory is being asked to initialize a second time. Subclasses :class:`.TypeError` for legacy support. """ class _TransactionFactory: """A factory for :class:`._TransactionContext` objects. By default, there is just one of these, set up based on CONF, however instance-level :class:`._TransactionFactory` objects can be made, as is the case with the :class:`._TestTransactionFactory` subclass used by the oslo.db test suite. """ def __init__(self): self._url_cfg = { 'connection': _Default(), 'slave_connection': _Default(), } self._engine_cfg = { 'sqlite_fk': _Default(False), 'mysql_sql_mode': _Default('TRADITIONAL'), 'mysql_wsrep_sync_wait': _Default(), 'connection_recycle_time': _Default(3600), 'connection_debug': _Default(0), 'max_pool_size': _Default(), 'max_overflow': _Default(), 'pool_timeout': _Default(), 'sqlite_synchronous': _Default(True), 'connection_trace': _Default(False), 'max_retries': _Default(10), 'retry_interval': _Default(10), 'thread_checkin': _Default(True), 'json_serializer': _Default(None), 'json_deserializer': _Default(None), 'logging_name': _Default(None), 'connection_parameters': _Default(None) } self._maker_cfg = { 'expire_on_commit': _Default(False), } self._transaction_ctx_cfg = { 'rollback_reader_sessions': False, 'flush_on_subtransaction': False, } self._facade_cfg = { 'synchronous_reader': True, 'on_engine_create': [], } # other options that are defined in oslo_db.options.database_opts # but do not apply to the standard enginefacade arguments (most seem # to apply to api.DBAPI). self._ignored_cfg = dict( (k, _Default(None)) for k in [ 'db_max_retries', 'db_inc_retry_interval', 'use_db_reconnect', 'db_retry_interval', 'db_max_retry_interval', 'backend', ] ) self._started = False self._legacy_facade = None self._start_lock = threading.Lock() def configure_defaults(self, **kw): """Apply default configurational options. This method can only be called before any specific transaction-beginning methods have been called. Configurational options are within a fixed set of keys, and fall under three categories: URL configuration, engine configuration, and session configuration. Each key given will be tested against these three configuration sets to see which one is applicable; if it is not applicable to any set, an exception is raised. The configurational options given here act as **defaults** when the :class:`._TransactionFactory` is configured using a :class:`oslo_config.cfg.ConfigOpts` object; the options present within the :class:`oslo_config.cfg.ConfigOpts` **take precedence** versus the arguments passed here. By default, the :class:`._TransactionFactory` loads in the configuration from :data:`oslo_config.cfg.CONF`, after applying the :data:`oslo_db.options.database_opts` configurational defaults to it. :param connection: database URL :param slave_connection: database URL :param sqlite_fk: whether to enable SQLite foreign key pragma; default False :param mysql_sql_mode: MySQL SQL mode, defaults to TRADITIONAL :param mysql_wsrep_sync_wait: MySQL wsrep_sync_wait, defaults to None, which indicates no setting will be passed :param connection_recycle_time: connection pool recycle time, defaults to 3600. Note the connection does not actually have to be "idle" to be recycled. :param connection_debug: engine logging level, defaults to 0. set to 50 for INFO, 100 for DEBUG. :param connection_parameters: additional parameters to append onto the database URL query string, pass as "param1=value1¶m2=value2&..." :param max_pool_size: max size of connection pool, uses CONF for default :param max_overflow: max overflow for connection pool, uses CONF for default :param sqlite_synchronous: disable SQLite SYNCHRONOUS pragma if False; defaults to True :param connection_trace: enable tracing comments in logging :param max_retries: max retries to connect, defaults to !0 :param retry_interval: time in seconds between retries, defaults to 10 :param thread_checkin: add sleep(0) on connection checkin to allow greenlet yields, defaults to True :param json_serializer: JSON serializer for PostgreSQL connections :param json_deserializer: JSON deserializer for PostgreSQL connections :param logging_name: logging name for engine :param expire_on_commit: sets expire_on_commit for SQLAlchemy sessionmaker; defaults to False :param rollback_reader_sessions: if True, a :class:`.Session` object will have its :meth:`.Session.rollback` method invoked at the end of a ``@reader`` block, actively rolling back the transaction and expiring the objects within, before the :class:`.Session` moves on to be closed, which has the effect of releasing connection resources back to the connection pool and detaching all objects. If False, the :class:`.Session` is not affected at the end of a ``@reader`` block; the underlying connection referred to by this :class:`.Session` will still be released in the enclosing context via the :meth:`.Session.close` method, which still ensures that the DBAPI connection is rolled back, however the objects associated with the :class:`.Session` retain their database-persisted contents after they are detached. .. seealso:: http://docs.sqlalchemy.org/en/rel_0_9/glossary.html#term-released\ SQLAlchemy documentation on what "releasing resources" means. :param synchronous_reader: whether or not to assume a "reader" context needs to guarantee it can read data committed by a "writer" assuming replication lag is present; defaults to True. When False, a @reader context works the same as @async_reader and will select the "slave" database if present. :param flush_on_subtransaction: if True, a :class:`.Session` object will have its :meth:`.Session.flush` method invoked whenever a context manager or decorator that is not itself the originator of the top- level or savepoint :class:`.Session` transaction exits - in this way it behaves like a "subtransaction" from a :class:`.Session` perspective. .. seealso:: :meth:`._TransactionFactory.configure` """ self._configure(True, kw) def configure(self, **kw): """Apply configurational options. This method can only be called before any specific transaction-beginning methods have been called. Behavior here is the same as that of :meth:`._TransactionFactory.configure_defaults`, with the exception that values specified here will **supersede** those setup in the :class:`oslo_config.cfg.ConfigOpts` options. See that method for a listing of all keyword arguments. .. seealso:: :meth:`._TransactionFactory.configure_defaults` """ self._configure(False, kw) def _configure(self, as_defaults, kw): if self._started: raise AlreadyStartedError( "this TransactionFactory is already started" ) not_supported = [] for k, v in kw.items(): for dict_ in ( self._url_cfg, self._engine_cfg, self._maker_cfg, self._ignored_cfg, self._facade_cfg, self._transaction_ctx_cfg, ): if k in dict_: dict_[k] = _Default(v) if as_defaults else v break else: not_supported.append(k) if not_supported: # would like to raise ValueError here, but there are just # too many unrecognized (obsolete?) configuration options # coming in from projects warnings.warn( "Configuration option(s) %r not supported" % sorted(not_supported), warning.NotSupportedWarning ) def get_legacy_facade(self): """Return a :class:`.LegacyEngineFacade` for this factory. This facade will make use of the same engine and sessionmaker as this factory, however will not share the same transaction context; the legacy facade continues to work the old way of returning a new Session each time get_session() is called. """ if not self._legacy_facade: self._legacy_facade = LegacyEngineFacade(None, _factory=self) if not self._started: self._start() return self._legacy_facade def get_writer_engine(self): """Return the writer engine for this factory. Implies start. """ if not self._started: self._start() return self._writer_engine def get_reader_engine(self): """Return the reader engine for this factory. Implies start. """ if not self._started: self._start() return self._reader_engine def get_writer_maker(self): """Return the writer sessionmaker for this factory. Implies start. """ if not self._started: self._start() return self._writer_maker def get_reader_maker(self): """Return the reader sessionmaker for this factory. Implies start. """ if not self._started: self._start() return self._reader_maker def _create_connection(self, mode): if not self._started: self._start() if mode is _WRITER: return self._writer_engine.connect() elif mode is _ASYNC_READER or \ (mode is _READER and not self.synchronous_reader): return self._reader_engine.connect() else: return self._writer_engine.connect() def _create_session(self, mode, bind=None): if not self._started: self._start() kw = {} # don't pass 'bind' if bind is None; the sessionmaker # already has a bind to the engine. if bind: kw['bind'] = bind if mode is _WRITER: return self._writer_maker(**kw) elif mode is _ASYNC_READER or \ (mode is _READER and not self.synchronous_reader): return self._reader_maker(**kw) else: return self._writer_maker(**kw) def _create_factory_copy(self): factory = _TransactionFactory() factory._url_cfg.update(self._url_cfg) factory._engine_cfg.update(self._engine_cfg) factory._maker_cfg.update(self._maker_cfg) factory._transaction_ctx_cfg.update(self._transaction_ctx_cfg) factory._facade_cfg.update(self._facade_cfg) return factory def _args_for_conf(self, default_cfg, conf): if conf is None: return { key: _Default.resolve(value) for key, value in default_cfg.items() if _Default.is_set(value) } else: return { key: _Default.resolve_w_conf(value, conf, key) for key, value in default_cfg.items() if _Default.is_set_w_conf(value, conf, key) } def _url_args_for_conf(self, conf): return self._args_for_conf(self._url_cfg, conf) def _engine_args_for_conf(self, conf): return self._args_for_conf(self._engine_cfg, conf) def _maker_args_for_conf(self, conf): maker_args = self._args_for_conf(self._maker_cfg, conf) return maker_args def dispose_pool(self): """Call engine.pool.dispose() on underlying Engine objects.""" with self._start_lock: if not self._started: return self._writer_engine.pool.dispose() if self._reader_engine is not self._writer_engine: self._reader_engine.pool.dispose() @property def is_started(self): """True if this :class:`._TransactionFactory` is already started.""" return self._started def _start(self, conf=False, connection=None, slave_connection=None): with self._start_lock: # self._started has been checked on the outside # when _start() was called. Within the lock, # check the flag once more to detect the case where # the start process proceeded while this thread was waiting # for the lock. if self._started: return if conf is False: conf = cfg.CONF # perform register_opts() local to actually using # the cfg.CONF to maintain exact compatibility with # the EngineFacade design. This can be changed if needed. if conf is not None: conf.register_opts(options.database_opts, 'database') url_args = self._url_args_for_conf(conf) if connection: url_args['connection'] = connection if slave_connection: url_args['slave_connection'] = slave_connection engine_args = self._engine_args_for_conf(conf) maker_args = self._maker_args_for_conf(conf) self._writer_engine, self._writer_maker = \ self._setup_for_connection( url_args['connection'], engine_args, maker_args) if url_args.get('slave_connection'): self._reader_engine, self._reader_maker = \ self._setup_for_connection( url_args['slave_connection'], engine_args, maker_args) else: self._reader_engine, self._reader_maker = \ self._writer_engine, self._writer_maker self.synchronous_reader = self._facade_cfg['synchronous_reader'] # set up _started last, so that in case of exceptions # we try the whole thing again and report errors # correctly self._started = True def _setup_for_connection( self, sql_connection, engine_kwargs, maker_kwargs, ): if sql_connection is None: raise exception.CantStartEngineError( "No sql_connection parameter is established") engine = engines.create_engine( sql_connection=sql_connection, **engine_kwargs) for hook in self._facade_cfg['on_engine_create']: hook(engine) sessionmaker = orm.get_maker(engine=engine, **maker_kwargs) return engine, sessionmaker class _TestTransactionFactory(_TransactionFactory): """A :class:`._TransactionFactory` used by test suites. This is a :class:`._TransactionFactory` that can be directly injected with an existing engine and sessionmaker. Note that while this is used by oslo.db's own tests of the enginefacade system, it is also exported for use by the test suites of other projects, first as an element of the oslo_db.sqlalchemy.test_fixtures module, and secondly may be used by external test suites directly. Includes a feature to inject itself temporarily as the factory within the global :class:`._TransactionContextManager`. """ @debtcollector.removals.removed_kwarg( 'synchronous_reader', 'argument value is propagated from the parent _TransactionFactory') def __init__(self, engine, maker, apply_global, from_factory=None, **kw): # NOTE(zzzeek): **kw needed for backwards compability self._reader_engine = self._writer_engine = engine self._reader_maker = self._writer_maker = maker self._started = True self._legacy_facade = None if from_factory is None: from_factory = _context_manager._factory self._facade_cfg = from_factory._facade_cfg self._transaction_ctx_cfg = from_factory._transaction_ctx_cfg self.synchronous_reader = self._facade_cfg['synchronous_reader'] if apply_global: self.existing_factory = _context_manager._factory _context_manager._root_factory = self def dispose_global(self): _context_manager._root_factory = self.existing_factory class _TransactionContext(object): """Represent a single database transaction in progress.""" def __init__(self, factory, global_factory=None): """Construct a new :class:`.TransactionContext`. :param factory: the :class:`.TransactionFactory` which will serve as a source of connectivity. :param global_factory: the "global" factory which will be used by the global ``_context_manager`` for new ``_TransactionContext`` objects created under this one. When left as None the actual "global" factory is used. """ self.factory = factory self.global_factory = global_factory self.mode = None self.session = None self.connection = None self.transaction = None kw = self.factory._transaction_ctx_cfg self.rollback_reader_sessions = kw['rollback_reader_sessions'] self.flush_on_subtransaction = kw['flush_on_subtransaction'] @contextlib.contextmanager def _connection(self, savepoint=False, context=None): if self.connection is None: try: if self.session is not None: # use existing session, which is outer to us self.connection = self.session.connection() if savepoint: with self.connection.begin_nested(), \ self._add_context(self.connection, context): yield self.connection else: with self._add_context(self.connection, context): yield self.connection else: # is outermost self.connection = self.factory._create_connection( mode=self.mode) self.transaction = self.connection.begin() try: with self._add_context(self.connection, context): yield self.connection self._end_connection_transaction(self.transaction) except Exception: self.transaction.rollback() # TODO(zzzeek) do we need save_and_reraise() here, # or do newer eventlets not have issues? we are using # raw "raise" in many other places in oslo.db already raise finally: self.transaction = None self.connection.close() finally: self.connection = None else: # use existing connection, which is outer to us if savepoint: with self.connection.begin_nested(), \ self._add_context(self.connection, context): yield self.connection else: with self._add_context(self.connection, context): yield self.connection @contextlib.contextmanager def _session(self, savepoint=False, context=None): if self.session is None: self.session = self.factory._create_session( bind=self.connection, mode=self.mode) try: self.session.begin() with self._add_context(self.session, context): yield self.session self._end_session_transaction(self.session) except Exception: with excutils.save_and_reraise_exception(): self.session.rollback() finally: self.session.close() self.session = None else: # use existing session, which is outer to us if savepoint: with self.session.begin_nested(): with self._add_context(self.session, context): yield self.session else: with self._add_context(self.session, context): yield self.session if self.flush_on_subtransaction: self.session.flush() @contextlib.contextmanager def _add_context(self, connection, context): restore_context = connection.info.get('using_context') connection.info['using_context'] = context yield connection connection.info['using_context'] = restore_context def _end_session_transaction(self, session): if self.mode is _WRITER: session.commit() elif self.rollback_reader_sessions: session.rollback() # In the absence of calling session.rollback(), # the next call is session.close(). This releases all # objects from the session into the detached state, and # releases the connection as well; the connection when returned # to the pool is either rolled back in any case, or closed fully. def _end_connection_transaction(self, transaction): if self.mode is _WRITER: transaction.commit() else: transaction.rollback() def _produce_block(self, mode, connection, savepoint, allow_async=False, context=None): if mode is _WRITER: self._writer() elif mode is _ASYNC_READER: self._async_reader() else: self._reader(allow_async) if connection: return self._connection(savepoint, context=context) else: return self._session(savepoint, context=context) def _writer(self): if self.mode is None: self.mode = _WRITER elif self.mode is _READER: raise TypeError( "Can't upgrade a READER transaction " "to a WRITER mid-transaction") elif self.mode is _ASYNC_READER: raise TypeError( "Can't upgrade an ASYNC_READER transaction " "to a WRITER mid-transaction") def _reader(self, allow_async=False): if self.mode is None: self.mode = _READER elif self.mode is _ASYNC_READER and not allow_async: raise TypeError( "Can't upgrade an ASYNC_READER transaction " "to a READER mid-transaction") def _async_reader(self): if self.mode is None: self.mode = _ASYNC_READER class _TransactionContextTLocal(threading.local): def __deepcopy__(self, memo): return self def __reduce__(self): return _TransactionContextTLocal, () class _TransactionContextManager(object): """Provide context-management and decorator patterns for transactions. This object integrates user-defined "context" objects with the :class:`._TransactionContext` class, on behalf of a contained :class:`._TransactionFactory`. """ def __init__( self, root=None, mode=None, independent=False, savepoint=False, connection=False, replace_global_factory=None, _is_global_manager=False, allow_async=False): if root is None: self._root = self self._root_factory = _TransactionFactory() else: self._root = root self._replace_global_factory = replace_global_factory self._is_global_manager = _is_global_manager self._mode = mode self._independent = independent self._savepoint = savepoint if self._savepoint and self._independent: raise TypeError( "setting savepoint and independent makes no sense.") self._connection = connection self._allow_async = allow_async @property def _factory(self): """The :class:`._TransactionFactory` associated with this context.""" return self._root._root_factory @property def is_started(self): """True if this manager is already started.""" return self._factory.is_started def configure(self, **kw): """Apply configurational options to the factory. This method can only be called before any specific transaction-beginning methods have been called. """ self._factory.configure(**kw) def append_on_engine_create(self, fn): """Append a listener function to _facade_cfg["on_engine_create"]""" self._factory._facade_cfg['on_engine_create'].append(fn) def get_legacy_facade(self): """Return a :class:`.LegacyEngineFacade` for factory from this context. This facade will make use of the same engine and sessionmaker as this factory, however will not share the same transaction context; the legacy facade continues to work the old way of returning a new Session each time get_session() is called. """ return self._factory.get_legacy_facade() def get_engine(self): """Return the Engine in use. This will be based on the state being WRITER or READER. This implies a start operation. """ if self._mode is _WRITER: return self._factory.get_writer_engine() elif self._mode is _READER: return self._factory.get_reader_engine() else: raise ValueError("mode should be WRITER or READER") def get_sessionmaker(self): """Return the sessionmaker in use. This will be based on the state being WRITER or READER. This implies a start operation. """ if self._mode is _WRITER: return self._factory.get_writer_maker() elif self._mode is _READER: return self._factory.get_reader_maker() else: raise ValueError("mode should be WRITER or READER") def dispose_pool(self): """Call engine.pool.dispose() on underlying Engine objects.""" self._factory.dispose_pool() def make_new_manager(self): """Create a new, independent _TransactionContextManager from this one. Copies the underlying _TransactionFactory to a new one, so that it can be further configured with new options. Used for test environments where the application-wide _TransactionContextManager may be used as a factory for test-local managers. """ new = self._clone() new._root = new new._root_factory = self._root_factory._create_factory_copy() if new._factory._started: raise AssertionError('TransactionFactory is already started') return new def patch_factory(self, factory_or_manager): """Patch a _TransactionFactory into this manager. Replaces this manager's factory with the given one, and returns a callable that will reset the factory back to what we started with. Only works for root factories. Is intended for test suites that need to patch in alternate database configurations. The given argument may be a _TransactionContextManager or a _TransactionFactory. """ if isinstance(factory_or_manager, _TransactionContextManager): factory = factory_or_manager._factory elif isinstance(factory_or_manager, _TransactionFactory): factory = factory_or_manager else: raise ValueError( "_TransactionContextManager or " "_TransactionFactory expected.") if self._root is not self: raise AssertionError('patch_factory only works for root factory.') existing_factory = self._root_factory self._root_factory = factory def reset(): self._root_factory = existing_factory return reset def patch_engine(self, engine): """Patch an Engine into this manager. Replaces this manager's factory with a _TestTransactionFactory that will use the given Engine, and returns a callable that will reset the factory back to what we started with. Only works for root factories. Is intended for test suites that need to patch in alternate database configurations. """ existing_factory = self._factory if not existing_factory._started: existing_factory._start() maker = existing_factory._writer_maker maker_kwargs = existing_factory._maker_args_for_conf(cfg.CONF) maker = orm.get_maker(engine=engine, **maker_kwargs) factory = _TestTransactionFactory( engine, maker, apply_global=False, from_factory=existing_factory ) return self.patch_factory(factory) @property def replace(self): """Modifier to replace the global transaction factory with this one.""" return self._clone(replace_global_factory=self._factory) @property def writer(self): """Modifier to set the transaction to WRITER.""" return self._clone(mode=_WRITER) @property def reader(self): """Modifier to set the transaction to READER.""" return self._clone(mode=_READER) @property def allow_async(self): """Modifier to allow async operations Allows async operations if asynchronous session is already started in this context. Marking DB API methods with READER would make it impossible to use them in ASYNC_READER transactions, and marking them with ASYNC_READER would require a modification of all the places these DB API methods are called to force READER mode, where the latest DB state is required. In Nova DB API methods should have a 'safe' default (i.e. READER), so that they can start sessions on their own, but it would also be useful for them to be able to participate in an existing ASYNC_READER session, if one was started up the stack. """ if self._mode is _WRITER: raise TypeError("Setting async on a WRITER makes no sense") return self._clone(allow_async=True) @property def independent(self): """Modifier to start a transaction independent from any enclosing.""" return self._clone(independent=True) @property def savepoint(self): """Modifier to start a SAVEPOINT if a transaction already exists.""" return self._clone(savepoint=True) @property def connection(self): """Modifier to return a core Connection object instead of Session.""" return self._clone(connection=True) @property def async_(self): """Modifier to set a READER operation to ASYNC_READER.""" if self._mode is _WRITER: raise TypeError("Setting async on a WRITER makes no sense") return self._clone(mode=_ASYNC_READER) def using(self, context): """Provide a context manager block that will use the given context.""" return self._transaction_scope(context) def __call__(self, fn): """Decorate a function.""" argspec = inspect.getfullargspec(fn) if argspec.args[0] == 'self' or argspec.args[0] == 'cls': context_index = 1 else: context_index = 0 context_kw = argspec.args[context_index] @functools.wraps(fn) def wrapper(*args, **kwargs): context = kwargs.get(context_kw, None) if not context: context = args[context_index] with self._transaction_scope(context): return fn(*args, **kwargs) return wrapper def _clone(self, **kw): default_kw = { "independent": self._independent, "mode": self._mode, "connection": self._connection } default_kw.update(kw) return _TransactionContextManager(root=self._root, **default_kw) @contextlib.contextmanager def _transaction_scope(self, context): new_transaction = self._independent transaction_contexts_by_thread = \ _transaction_contexts_by_thread(context) current = restore = getattr( transaction_contexts_by_thread, "current", None) use_factory = self._factory global_factory = None if self._replace_global_factory: use_factory = global_factory = self._replace_global_factory elif current is not None and current.global_factory: global_factory = current.global_factory if self._root._is_global_manager: use_factory = global_factory if current is not None and ( new_transaction or current.factory is not use_factory ): current = None if current is None: current = transaction_contexts_by_thread.current = \ _TransactionContext(use_factory, global_factory=global_factory) try: if self._mode is not None: with current._produce_block( mode=self._mode, connection=self._connection, savepoint=self._savepoint, allow_async=self._allow_async, context=context) as resource: yield resource else: yield finally: if restore is None: del transaction_contexts_by_thread.current elif current is not restore: transaction_contexts_by_thread.current = restore @property @debtcollector.moves.moved_property("async_") def async_compat(self): return self.async_ setattr( _TransactionContextManager, "async", async_compat ) def _context_descriptor(attr=None): getter = operator.attrgetter(attr) def _property_for_context(context): try: transaction_context = context.transaction_ctx except exception.NoEngineContextEstablished: raise exception.NoEngineContextEstablished( "No TransactionContext is established for " "this %s object within the current thread; " "the %r attribute is unavailable." % (context, attr) ) else: result = getter(transaction_context) if result is None: raise exception.ContextNotRequestedError( "The '%s' context attribute was requested but " "it has not been established for this context." % attr ) return result return property(_property_for_context) def _transaction_ctx_for_context(context): by_thread = _transaction_contexts_by_thread(context) try: return by_thread.current except AttributeError: raise exception.NoEngineContextEstablished( "No TransactionContext is established for " "this %s object within the current thread. " % context ) def _transaction_contexts_by_thread(context): transaction_contexts_by_thread = getattr( context, '_enginefacade_context', None) if transaction_contexts_by_thread is None: transaction_contexts_by_thread = \ context._enginefacade_context = _TransactionContextTLocal() return transaction_contexts_by_thread def transaction_context_provider(klass): """Decorate a class with ``session`` and ``connection`` attributes.""" setattr( klass, 'transaction_ctx', property(_transaction_ctx_for_context)) # Graft transaction context attributes as context properties for attr in ('session', 'connection', 'transaction'): setattr(klass, attr, _context_descriptor(attr)) return klass _context_manager = _TransactionContextManager(_is_global_manager=True) """default context manager.""" def transaction_context(): """Construct a local transaction context. """ return _TransactionContextManager() def configure(**kw): """Apply configurational options to the global factory. This method can only be called before any specific transaction-beginning methods have been called. .. seealso:: :meth:`._TransactionFactory.configure` """ _context_manager._factory.configure(**kw) def get_legacy_facade(): """Return a :class:`.LegacyEngineFacade` for the global factory. This facade will make use of the same engine and sessionmaker as this factory, however will not share the same transaction context; the legacy facade continues to work the old way of returning a new Session each time get_session() is called. """ return _context_manager.get_legacy_facade() reader = _context_manager.reader """The global 'reader' starting point.""" writer = _context_manager.writer """The global 'writer' starting point.""" class LegacyEngineFacade(object): """A helper class for removing of global engine instances from oslo.db. .. deprecated:: 1.12.0 Please use :mod:`oslo_db.sqlalchemy.enginefacade` for new development. As a library, oslo.db can't decide where to store/when to create engine and sessionmaker instances, so this must be left for a target application. On the other hand, in order to simplify the adoption of oslo.db changes, we'll provide a helper class, which creates engine and sessionmaker on its instantiation and provides get_engine()/get_session() methods that are compatible with corresponding utility functions that currently exist in target projects, e.g. in Nova. engine/sessionmaker instances will still be global (and they are meant to be global), but they will be stored in the app context, rather that in the oslo.db context. Two important things to remember: 1. An Engine instance is effectively a pool of DB connections, so it's meant to be shared (and it's thread-safe). 2. A Session instance is not meant to be shared and represents a DB transactional context (i.e. it's not thread-safe). sessionmaker is a factory of sessions. :param sql_connection: the connection string for the database to use :type sql_connection: string :param slave_connection: the connection string for the 'slave' database to use. If not provided, the master database will be used for all operations. Note: this is meant to be used for offloading of read operations to asynchronously replicated slaves to reduce the load on the master database. :type slave_connection: string :param sqlite_fk: enable foreign keys in SQLite :type sqlite_fk: bool :param expire_on_commit: expire session objects on commit :type expire_on_commit: bool Keyword arguments: :keyword mysql_sql_mode: the SQL mode to be used for MySQL sessions. (defaults to TRADITIONAL) :keyword mysql_wsrep_sync_wait: value of wsrep_sync_wait for Galera (defaults to None, which indicates no setting will be passed) :keyword connection_recycle_time: Time period for connections to be recycled upon checkout (defaults to 3600) :keyword connection_debug: verbosity of SQL debugging information. -1=Off, 0=None, 100=Everything (defaults to 0) :keyword max_pool_size: maximum number of SQL connections to keep open in a pool (defaults to SQLAlchemy settings) :keyword max_overflow: if set, use this value for max_overflow with sqlalchemy (defaults to SQLAlchemy settings) :keyword pool_timeout: if set, use this value for pool_timeout with sqlalchemy (defaults to SQLAlchemy settings) :keyword sqlite_synchronous: if True, SQLite uses synchronous mode (defaults to True) :keyword connection_trace: add python stack traces to SQL as comment strings (defaults to False) :keyword max_retries: maximum db connection retries during startup. (setting -1 implies an infinite retry count) (defaults to 10) :keyword retry_interval: interval between retries of opening a sql connection (defaults to 10) :keyword thread_checkin: boolean that indicates that between each engine checkin event a sleep(0) will occur to allow other greenthreads to run (defaults to True) """ def __init__(self, sql_connection, slave_connection=None, sqlite_fk=False, expire_on_commit=False, _conf=None, _factory=None, **kwargs): warnings.warn( "EngineFacade is deprecated; please use " "oslo_db.sqlalchemy.enginefacade", warning.OsloDBDeprecationWarning, stacklevel=2) if _factory: self._factory = _factory else: self._factory = _TransactionFactory() self._factory.configure( sqlite_fk=sqlite_fk, expire_on_commit=expire_on_commit, **kwargs ) # make sure passed-in urls are favored over that # of config self._factory._start( _conf, connection=sql_connection, slave_connection=slave_connection) def _check_factory_started(self): if not self._factory._started: self._factory._start() def get_engine(self, use_slave=False): """Get the engine instance (note, that it's shared). :param use_slave: if possible, use 'slave' database for this engine. If the connection string for the slave database wasn't provided, 'master' engine will be returned. (defaults to False) :type use_slave: bool """ self._check_factory_started() if use_slave: return self._factory._reader_engine else: return self._factory._writer_engine def get_session(self, use_slave=False, **kwargs): """Get a Session instance. :param use_slave: if possible, use 'slave' database connection for this session. If the connection string for the slave database wasn't provided, a session bound to the 'master' engine will be returned. (defaults to False) :type use_slave: bool Keyword arguments will be passed to a sessionmaker instance as is (if passed, they will override the ones used when the sessionmaker instance was created). See SQLAlchemy Session docs for details. """ self._check_factory_started() if use_slave: return self._factory._reader_maker(**kwargs) else: return self._factory._writer_maker(**kwargs) def get_sessionmaker(self, use_slave=False): """Get the sessionmaker instance used to create a Session. This can be called for those cases where the sessionmaker() is to be temporarily injected with some state such as a specific connection. """ self._check_factory_started() if use_slave: return self._factory._reader_maker else: return self._factory._writer_maker @classmethod def from_config(cls, conf, sqlite_fk=False, expire_on_commit=False): """Initialize EngineFacade using oslo.config config instance options. :param conf: oslo.config config instance :type conf: oslo_config.cfg.ConfigOpts :param sqlite_fk: enable foreign keys in SQLite :type sqlite_fk: bool :param expire_on_commit: expire session objects on commit :type expire_on_commit: bool """ return cls( None, sqlite_fk=sqlite_fk, expire_on_commit=expire_on_commit, _conf=conf) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/engines.py0000664000175000017500000004377600000000000020767 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Core SQLAlchemy connectivity routines. """ import functools import itertools import logging import os import re import time import debtcollector.removals import debtcollector.renames import sqlalchemy from sqlalchemy import event from sqlalchemy import exc from sqlalchemy import pool from sqlalchemy import select from oslo_db import exception from oslo_db.sqlalchemy import compat from oslo_db.sqlalchemy import exc_filters from oslo_db.sqlalchemy import utils LOG = logging.getLogger(__name__) def _thread_yield(dbapi_con, con_record): """Ensure other greenthreads get a chance to be executed. If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will execute instead of time.sleep(0). Force a context switch. With common database backends (eg MySQLdb and sqlite), there is no implicit yield caused by network I/O since they are implemented by C libraries that eventlet cannot monkey patch. """ time.sleep(0) def _connect_ping_listener(connection, branch): """Ping the server at connection startup. Ping the server at transaction begin and transparently reconnect if a disconnect exception occurs. This listener is used up until SQLAlchemy 2.0.5. At 2.0.5, we use the ``pool_pre_ping`` parameter instead of this event handler. Note the current test suite in test_exc_filters still **tests** this handler using all SQLAlchemy versions including 2.0.5 and greater. """ if branch: return # turn off "close with result". This can also be accomplished # by branching the connection, however just setting the flag is # more performant and also doesn't get involved with some # connection-invalidation awkardness that occurs (see # https://bitbucket.org/zzzeek/sqlalchemy/issue/3215/) save_should_close_with_result = connection.should_close_with_result connection.should_close_with_result = False try: # run a SELECT 1. use a core select() so that # any details like that needed by the backend are handled. connection.scalar(select(1)) except exception.DBConnectionError: # catch DBConnectionError, which is raised by the filter # system. # disconnect detected. The connection is now # "invalid", but the pool should be ready to return # new connections assuming they are good now. # run the select again to re-validate the Connection. LOG.exception( 'Database connection was found disconnected; reconnecting') # TODO(ralonsoh): drop this attr check once SQLAlchemy minimum version # is 2.0. if hasattr(connection, 'rollback'): connection.rollback() connection.scalar(select(1)) finally: connection.should_close_with_result = save_should_close_with_result # TODO(ralonsoh): drop this attr check once SQLAlchemy minimum version # is 2.0. if hasattr(connection, 'rollback'): connection.rollback() # SQLAlchemy 2.0 is compatible here, however oslo.db's test suite # raises for all deprecation errors, so we have to check for 2.0 # and wrap out a parameter that is deprecated if compat.sqla_2: _connect_ping_listener = functools.partial( _connect_ping_listener, branch=False) def _setup_logging(connection_debug=0): """setup_logging function maps SQL debug level to Python log level. Connection_debug is a verbosity of SQL debugging information. 0=None(default value), 1=Processed only messages with WARNING level or higher 50=Processed only messages with INFO level or higher 100=Processed only messages with DEBUG level """ if connection_debug >= 0: logger = logging.getLogger('sqlalchemy.engine') if connection_debug == 100: logger.setLevel(logging.DEBUG) elif connection_debug >= 50: logger.setLevel(logging.INFO) else: logger.setLevel(logging.WARNING) def _vet_url(url): if "+" not in url.drivername and not url.drivername.startswith("sqlite"): if url.drivername.startswith("mysql"): LOG.warning( "URL %r does not contain a '+drivername' portion, " "and will make use of a default driver. " "A full dbname+drivername:// protocol is recommended. " "For MySQL, it is strongly recommended that mysql+pymysql:// " "be specified for maximum service compatibility", url ) else: LOG.warning( "URL %r does not contain a '+drivername' portion, " "and will make use of a default driver. " "A full dbname+drivername:// protocol is recommended.", url ) @debtcollector.renames.renamed_kwarg( 'idle_timeout', 'connection_recycle_time', replace=True, ) def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None, mysql_wsrep_sync_wait=None, connection_recycle_time=3600, connection_debug=0, max_pool_size=None, max_overflow=None, pool_timeout=None, sqlite_synchronous=True, connection_trace=False, max_retries=10, retry_interval=10, thread_checkin=True, logging_name=None, json_serializer=None, json_deserializer=None, connection_parameters=None): """Return a new SQLAlchemy engine.""" url = utils.make_url(sql_connection) if connection_parameters: url = url.update_query_string(connection_parameters, append=True) _vet_url(url) _native_pre_ping = compat.native_pre_ping_event_support engine_args = { 'pool_recycle': connection_recycle_time, 'pool_pre_ping': _native_pre_ping, 'connect_args': {}, 'logging_name': logging_name } _setup_logging(connection_debug) _init_connection_args( url, engine_args, dict( max_pool_size=max_pool_size, max_overflow=max_overflow, pool_timeout=pool_timeout, json_serializer=json_serializer, json_deserializer=json_deserializer, ) ) engine = sqlalchemy.create_engine(url, **engine_args) _init_events( engine, mysql_sql_mode=mysql_sql_mode, mysql_wsrep_sync_wait=mysql_wsrep_sync_wait, sqlite_synchronous=sqlite_synchronous, sqlite_fk=sqlite_fk, thread_checkin=thread_checkin, connection_trace=connection_trace ) # register alternate exception handler exc_filters.register_engine(engine) if not _native_pre_ping: # register engine connect handler. event.listen(engine, "engine_connect", _connect_ping_listener) # initial connect + test # NOTE(viktors): the current implementation of _test_connection() # does nothing, if max_retries == 0, so we can skip it if max_retries: test_conn = _test_connection(engine, max_retries, retry_interval) test_conn.close() return engine @utils.dispatch_for_dialect('*', multiple=True) def _init_connection_args(url, engine_args, kw): # (zzzeek) kw is passed by reference rather than as **kw so that the # init_connection_args routines can modify the contents of what # will be passed to create_engine, including removing arguments that # don't apply. This allows things such as replacing QueuePool with # NUllPool, for example, as the latter pool would reject these parameters. max_pool_size = kw.get("max_pool_size", None) max_overflow = kw.get("max_overflow", None) pool_timeout = kw.get("pool_timeout", None) pool_class = url.get_dialect().get_pool_class(url) if issubclass(pool_class, pool.QueuePool): if max_pool_size is not None: engine_args['pool_size'] = max_pool_size if max_overflow is not None: engine_args['max_overflow'] = max_overflow if pool_timeout is not None: engine_args['pool_timeout'] = pool_timeout @_init_connection_args.dispatch_for("sqlite") def _init_connection_args(url, engine_args, kw): pool_class = url.get_dialect().get_pool_class(url) if issubclass(pool_class, pool.SingletonThreadPool): # singletonthreadpool is used for :memory: connections; # replace it with StaticPool. engine_args["poolclass"] = pool.StaticPool engine_args['connect_args']['check_same_thread'] = False elif issubclass(pool_class, pool.QueuePool): # SQLAlchemy 2.0 uses QueuePool for sqlite file DBs; put NullPool # back to avoid compatibility issues kw.pop("max_pool_size", None) kw.pop("max_overflow", None) engine_args.pop("max_pool_size", None) engine_args.pop("max_overflow", None) engine_args["poolclass"] = pool.NullPool @_init_connection_args.dispatch_for("postgresql") def _init_connection_args(url, engine_args, kw): if 'client_encoding' not in url.query: # Set encoding using engine_args instead of connect_args since # it's supported for PostgreSQL 8.*. More details at: # http://docs.sqlalchemy.org/en/rel_0_9/dialects/postgresql.html engine_args['client_encoding'] = 'utf8' engine_args['json_serializer'] = kw.get('json_serializer') engine_args['json_deserializer'] = kw.get('json_deserializer') @_init_connection_args.dispatch_for("mysql") def _init_connection_args(url, engine_args, kw): if 'charset' not in url.query: engine_args['connect_args']['charset'] = 'utf8' @_init_connection_args.dispatch_for("mysql+mysqlconnector") def _init_connection_args(url, engine_args, kw): # mysqlconnector engine (<1.0) incorrectly defaults to # raise_on_warnings=True # https://bitbucket.org/zzzeek/sqlalchemy/issue/2515 if 'raise_on_warnings' not in url.query: engine_args['connect_args']['raise_on_warnings'] = False @_init_connection_args.dispatch_for("mysql+mysqldb") def _init_connection_args(url, engine_args, kw): # Those drivers require use_unicode=0 to avoid performance drop due # to internal usage of Python unicode objects in the driver # http://docs.sqlalchemy.org/en/rel_0_9/dialects/mysql.html if 'use_unicode' not in url.query: engine_args['connect_args']['use_unicode'] = 1 @utils.dispatch_for_dialect('*', multiple=True) def _init_events(engine, thread_checkin=True, connection_trace=False, **kw): """Set up event listeners for all database backends.""" _add_process_guards(engine) if connection_trace: _add_trace_comments(engine) if thread_checkin: sqlalchemy.event.listen(engine, 'checkin', _thread_yield) @_init_events.dispatch_for("mysql") def _init_events( engine, mysql_sql_mode=None, mysql_wsrep_sync_wait=None, **kw): """Set up event listeners for MySQL.""" if mysql_sql_mode is not None or mysql_wsrep_sync_wait is not None: @sqlalchemy.event.listens_for(engine, "connect") def _set_session_variables(dbapi_con, connection_rec): cursor = dbapi_con.cursor() if mysql_sql_mode is not None: cursor.execute("SET SESSION sql_mode = %s", [mysql_sql_mode]) if mysql_wsrep_sync_wait is not None: cursor.execute( "SET SESSION wsrep_sync_wait = %s", [mysql_wsrep_sync_wait] ) @sqlalchemy.event.listens_for(engine, "first_connect") def _check_effective_sql_mode(dbapi_con, connection_rec): if mysql_sql_mode is not None or mysql_wsrep_sync_wait is not None: _set_session_variables(dbapi_con, connection_rec) cursor = dbapi_con.cursor() cursor.execute("SHOW VARIABLES LIKE 'sql_mode'") realmode = cursor.fetchone() if realmode is None: LOG.warning('Unable to detect effective SQL mode') else: realmode = realmode[1] LOG.debug('MySQL server mode set to %s', realmode) if 'TRADITIONAL' not in realmode.upper() and \ 'STRICT_ALL_TABLES' not in realmode.upper(): LOG.warning( "MySQL SQL mode is '%s', " "consider enabling TRADITIONAL or STRICT_ALL_TABLES", realmode) @_init_events.dispatch_for("sqlite") def _init_events(engine, sqlite_synchronous=True, sqlite_fk=False, **kw): """Set up event listeners for SQLite. This includes several settings made on connections as they are created, as well as transactional control extensions. """ def regexp(expr, item): reg = re.compile(expr) return reg.search(str(item)) is not None @sqlalchemy.event.listens_for(engine, "connect") def _sqlite_connect_events(dbapi_con, con_record): # Add REGEXP functionality on SQLite connections dbapi_con.create_function('regexp', 2, regexp) if not sqlite_synchronous: # Switch sqlite connections to non-synchronous mode dbapi_con.execute("PRAGMA synchronous = OFF") # Disable pysqlite's emitting of the BEGIN statement entirely. # Also stops it from emitting COMMIT before any DDL. # below, we emit BEGIN ourselves. # see http://docs.sqlalchemy.org/en/rel_0_9/dialects/\ # sqlite.html#serializable-isolation-savepoints-transactional-ddl dbapi_con.isolation_level = None if sqlite_fk: # Ensures that the foreign key constraints are enforced in SQLite. dbapi_con.execute('pragma foreign_keys=ON') @sqlalchemy.event.listens_for(engine, "begin") def _sqlite_emit_begin(conn): # emit our own BEGIN, checking for existing # transactional state if 'in_transaction' not in conn.info: conn.execute(sqlalchemy.text("BEGIN")) conn.info['in_transaction'] = True @sqlalchemy.event.listens_for(engine, "rollback") @sqlalchemy.event.listens_for(engine, "commit") def _sqlite_end_transaction(conn): # remove transactional marker conn.info.pop('in_transaction', None) def _test_connection(engine, max_retries, retry_interval): if max_retries == -1: attempts = itertools.count() else: attempts = range(max_retries) # See: http://legacy.python.org/dev/peps/pep-3110/#semantic-changes for # why we are not using 'de' directly (it can be removed from the local # scope). de_ref = None for attempt in attempts: try: return engine.connect() except exception.DBConnectionError as de: msg = 'SQL connection failed. %s attempts left.' LOG.warning(msg, max_retries - attempt) time.sleep(retry_interval) de_ref = de else: if de_ref is not None: raise de_ref def _add_process_guards(engine): """Add multiprocessing guards. Forces a connection to be reconnected if it is detected as having been shared to a sub-process. """ @sqlalchemy.event.listens_for(engine, "connect") def connect(dbapi_connection, connection_record): connection_record.info['pid'] = os.getpid() @sqlalchemy.event.listens_for(engine, "checkout") def checkout(dbapi_connection, connection_record, connection_proxy): pid = os.getpid() if connection_record.info['pid'] != pid: LOG.debug( "Parent process %(orig)s forked (%(newproc)s) with an open " "database connection, " "which is being discarded and recreated.", {"newproc": pid, "orig": connection_record.info['pid']}) raise exc.DisconnectionError( "Connection record belongs to pid %s, " "attempting to check out in pid %s" % (connection_record.info['pid'], pid) ) def _add_trace_comments(engine): """Add trace comments. Augment statements with a trace of the immediate calling code for a given statement. """ import os import sys import traceback target_paths = set([ os.path.dirname(sys.modules['oslo_db'].__file__), os.path.dirname(sys.modules['sqlalchemy'].__file__) ]) try: skip_paths = set([ os.path.dirname(sys.modules['oslo_db.tests'].__file__), ]) except KeyError: skip_paths = set() @sqlalchemy.event.listens_for(engine, "before_cursor_execute", retval=True) def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): # NOTE(zzzeek) - if different steps per DB dialect are desirable # here, switch out on engine.name for now. stack = traceback.extract_stack() our_line = None for idx, (filename, line, method, function) in enumerate(stack): for tgt in skip_paths: if filename.startswith(tgt): break else: for tgt in target_paths: if filename.startswith(tgt): our_line = idx break if our_line: break if our_line: trace = "; ".join( "File: %s (%s) %s" % ( line[0], line[1], line[2] ) # include three lines of context. for line in stack[our_line - 3:our_line] ) statement = "%s -- %s" % (statement, trace) return statement, parameters ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/exc_filters.py0000664000175000017500000004740200000000000021634 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Define exception redefinitions for SQLAlchemy DBAPI exceptions.""" import collections import logging import re import sys from sqlalchemy import event from sqlalchemy import exc as sqla_exc from oslo_db import exception from oslo_db.sqlalchemy import compat LOG = logging.getLogger(__name__) _registry = collections.defaultdict( lambda: collections.defaultdict( list ) ) def filters(dbname, exception_type, regex): """Mark a function as receiving a filtered exception. :param dbname: string database name, e.g. 'mysql' :param exception_type: a SQLAlchemy database exception class, which extends from :class:`sqlalchemy.exc.DBAPIError`. :param regex: a string, or a tuple of strings, that will be processed as matching regular expressions. """ def _receive(fn): _registry[dbname][exception_type].extend( (fn, re.compile(reg, re.DOTALL)) for reg in ((regex,) if not isinstance(regex, tuple) else regex) ) return fn return _receive # NOTE(zzzeek) - for Postgresql, catch both OperationalError, as the # actual error is # psycopg2.extensions.TransactionRollbackError(OperationalError), # as well as sqlalchemy.exc.DBAPIError, as SQLAlchemy will reraise it # as this until issue #3075 is fixed. @filters("mysql", sqla_exc.OperationalError, r"^.*\b1213\b.*Deadlock found.*") @filters("mysql", sqla_exc.DatabaseError, r"^.*\b1205\b.*Lock wait timeout exceeded.*") @filters("mysql", sqla_exc.InternalError, r"^.*\b1213\b.*Deadlock found.*") @filters("mysql", sqla_exc.InternalError, r"^.*\b1213\b.*detected deadlock/conflict.*") @filters("mysql", sqla_exc.InternalError, r"^.*\b1213\b.*Deadlock: wsrep aborted.*") @filters("mysql", sqla_exc.OperationalError, r"^.*\b1213\b.*Deadlock: wsrep aborted.*") @filters("postgresql", sqla_exc.OperationalError, r"^.*deadlock detected.*") @filters("postgresql", sqla_exc.DBAPIError, r"^.*deadlock detected.*") def _deadlock_error(operational_error, match, engine_name, is_disconnect): """Filter for MySQL or Postgresql deadlock error. NOTE(comstud): In current versions of DB backends, Deadlock violation messages follow the structure: mysql+mysqldb:: (OperationalError) (1213, 'Deadlock found when trying to get lock; ' 'try restarting transaction') mysql+mysqlconnector:: (InternalError) 1213 (40001): Deadlock found when trying to get lock; try restarting transaction postgresql:: (TransactionRollbackError) deadlock detected """ raise exception.DBDeadlock(operational_error) @filters("mysql", sqla_exc.IntegrityError, r"^.*\b1062\b.*Duplicate entry '(?P.*)'" r" for key '(?P[^']+)'.*$") # NOTE(jd) For binary types @filters("mysql", sqla_exc.IntegrityError, r"^.*\b1062\b.*Duplicate entry \\'(?P.*)\\'" r" for key \\'(?P.+)\\'.*$") # NOTE(pkholkin): the first regex is suitable only for PostgreSQL 9.x versions # the second regex is suitable for PostgreSQL 8.x versions @filters("postgresql", sqla_exc.IntegrityError, (r'^.*duplicate\s+key.*"(?P[^"]+)"\s*\n.*' r'Key\s+\((?P.*)\)=\((?P.*)\)\s+already\s+exists.*$', r"^.*duplicate\s+key.*\"(?P[^\"]+)\"\s*\n.*$")) def _default_dupe_key_error(integrity_error, match, engine_name, is_disconnect): """Filter for MySQL or Postgresql duplicate key error. note(boris-42): In current versions of DB backends unique constraint violation messages follow the structure: postgres: 1 column - (IntegrityError) duplicate key value violates unique constraint "users_c1_key" N columns - (IntegrityError) duplicate key value violates unique constraint "name_of_our_constraint" mysql since 8.0.19: 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key 'table_name.c1'") N columns - (IntegrityError) (1062, "Duplicate entry 'values joined with -' for key 'table_name.name_of_our_constraint'") mysql+mysqldb: 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key 'c1'") N columns - (IntegrityError) (1062, "Duplicate entry 'values joined with -' for key 'name_of_our_constraint'") mysql+mysqlconnector: 1 column - (IntegrityError) 1062 (23000): Duplicate entry 'value_of_c1' for key 'c1' N columns - (IntegrityError) 1062 (23000): Duplicate entry 'values joined with -' for key 'name_of_our_constraint' """ columns = match.group('columns') # note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2" # where `t` it is table name and columns `c1`, `c2` # are in UniqueConstraint. uniqbase = "uniq_" if not columns.startswith(uniqbase): if engine_name == "postgresql": columns = [columns[columns.index("_") + 1:columns.rindex("_")]] elif (engine_name == "mysql") and \ (uniqbase in str(columns.split("0")[:1])): columns = columns.split("0")[1:] else: columns = [columns] else: columns = columns[len(uniqbase):].split("0")[1:] value = match.groupdict().get('value') raise exception.DBDuplicateEntry(columns, integrity_error, value) @filters("sqlite", sqla_exc.IntegrityError, (r"^.*columns?(?P[^)]+)(is|are)\s+not\s+unique$", r"^.*UNIQUE\s+constraint\s+failed:\s+(?P.+)$", r"^.*PRIMARY\s+KEY\s+must\s+be\s+unique.*$")) def _sqlite_dupe_key_error(integrity_error, match, engine_name, is_disconnect): """Filter for SQLite duplicate key error. note(boris-42): In current versions of DB backends unique constraint violation messages follow the structure: sqlite: 1 column - (IntegrityError) column c1 is not unique N columns - (IntegrityError) column c1, c2, ..., N are not unique sqlite since 3.7.16: 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1 N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2 sqlite since 3.8.2: (IntegrityError) PRIMARY KEY must be unique """ columns = [] # NOTE(ochuprykov): We can get here by last filter in which there are no # groups. Trying to access the substring that matched by # the group will lead to IndexError. In this case just # pass empty list to exception.DBDuplicateEntry try: columns = match.group('columns') columns = [c.split('.')[-1] for c in columns.strip().split(", ")] except IndexError: pass raise exception.DBDuplicateEntry(columns, integrity_error) @filters("sqlite", sqla_exc.IntegrityError, r"(?i).*foreign key constraint failed") @filters("postgresql", sqla_exc.IntegrityError, r".*on table \"(?P[^\"]+)\" violates " r"foreign key constraint \"(?P[^\"]+)\".*\n" r"DETAIL: Key \((?P.+)\)=\(.+\) " r"is (not present in|still referenced from) table " r"\"(?P[^\"]+)\".") @filters( "mysql", sqla_exc.IntegrityError, r".*Cannot (add|delete) or update a (child|parent) row: " r'a foreign key constraint fails \([`"].+[`"]\.[`"](?P
.+)[`"], ' r'CONSTRAINT [`"](?P.+)[`"] FOREIGN KEY ' r'\([`"](?P.+)[`"]\) REFERENCES [`"](?P.+)[`"] ') def _foreign_key_error(integrity_error, match, engine_name, is_disconnect): """Filter for foreign key errors.""" try: table = match.group("table") except IndexError: table = None try: constraint = match.group("constraint") except IndexError: constraint = None try: key = match.group("key") except IndexError: key = None try: key_table = match.group("key_table") except IndexError: key_table = None raise exception.DBReferenceError(table, constraint, key, key_table, integrity_error) @filters("postgresql", sqla_exc.IntegrityError, r".*new row for relation \"(?P
.+)\" " "violates check constraint " "\"(?P.+)\"") def _check_constraint_error( integrity_error, match, engine_name, is_disconnect): """Filter for check constraint errors.""" try: table = match.group("table") except IndexError: table = None try: check_name = match.group("check_name") except IndexError: check_name = None raise exception.DBConstraintError(table, check_name, integrity_error) @filters("postgresql", sqla_exc.ProgrammingError, r".* constraint \"(?P.+)\" " "of relation " "\"(?P.+)\" does not exist") @filters("mysql", sqla_exc.InternalError, r".*1091,.*Can't DROP (?:FOREIGN KEY )?['`](?P.+)['`]; " "check that .* exists") @filters("mysql", sqla_exc.OperationalError, r".*1091,.*Can't DROP (?:FOREIGN KEY )?['`](?P.+)['`]; " "check that .* exists") @filters("mysql", sqla_exc.InternalError, r".*1025,.*Error on rename of '.+/(?P.+)' to ") def _check_constraint_non_existing( programming_error, match, engine_name, is_disconnect): """Filter for constraint non existing errors.""" try: relation = match.group("relation") except IndexError: relation = None try: constraint = match.group("constraint") except IndexError: constraint = None raise exception.DBNonExistentConstraint(relation, constraint, programming_error) @filters("sqlite", sqla_exc.OperationalError, r".* no such table: (?P
.+)") @filters("mysql", sqla_exc.InternalError, r".*1051,.*Unknown table '(.+\.)?(?P
.+)'\"") @filters("mysql", sqla_exc.OperationalError, r".*1051,.*Unknown table '(.+\.)?(?P
.+)'\"") @filters("postgresql", sqla_exc.ProgrammingError, r".* table \"(?P
.+)\" does not exist") def _check_table_non_existing( programming_error, match, engine_name, is_disconnect): """Filter for table non existing errors.""" raise exception.DBNonExistentTable(match.group("table"), programming_error) @filters("mysql", sqla_exc.InternalError, r".*1049,.*Unknown database '(?P.+)'\"") @filters("mysql", sqla_exc.OperationalError, r".*1049,.*Unknown database '(?P.+)'\"") @filters("postgresql", sqla_exc.OperationalError, r".*database \"(?P.+)\" does not exist") @filters("sqlite", sqla_exc.OperationalError, ".*unable to open database file.*") def _check_database_non_existing( error, match, engine_name, is_disconnect): try: database = match.group("database") except IndexError: database = None raise exception.DBNonExistentDatabase(database, error) @filters("mysql", sqla_exc.DBAPIError, r".*\b1146\b") def _raise_mysql_table_doesnt_exist_asis( error, match, engine_name, is_disconnect): """Raise MySQL error 1146 as is. Raise MySQL error 1146 as is, so that it does not conflict with the MySQL dialect's checking a table not existing. """ raise error @filters("mysql", sqla_exc.OperationalError, r".*(1292|1366).*Incorrect \w+ value.*") @filters("mysql", sqla_exc.DataError, r".*1265.*Data truncated for column.*") @filters("mysql", sqla_exc.DataError, r".*1264.*Out of range value for column.*") @filters("mysql", sqla_exc.InternalError, r"^.*1366.*Incorrect string value:*") @filters("sqlite", sqla_exc.ProgrammingError, r"(?i).*You must not use 8-bit bytestrings*") @filters("mysql", sqla_exc.DataError, r".*1406.*Data too long for column.*") def _raise_data_error(error, match, engine_name, is_disconnect): """Raise DBDataError exception for different data errors.""" raise exception.DBDataError(error) @filters("mysql", sqla_exc.OperationalError, r".*\(1305,\s+\'SAVEPOINT\s+(.+)\s+does not exist\'\)") def _raise_savepoints_as_dberrors(error, match, engine_name, is_disconnect): # NOTE(rpodolyaka): this is a special case of an OperationalError that used # to be an InternalError. It's expected to be wrapped into oslo.db error. raise exception.DBError(error) @filters("*", sqla_exc.OperationalError, r".*") def _raise_operational_errors_directly_filter(operational_error, match, engine_name, is_disconnect): """Filter for all remaining OperationalError classes and apply. Filter for all remaining OperationalError classes and apply special rules. """ if is_disconnect: # operational errors that represent disconnect # should be wrapped raise exception.DBConnectionError(operational_error) else: # NOTE(comstud): A lot of code is checking for OperationalError # so let's not wrap it for now. raise operational_error @filters("mysql", sqla_exc.OperationalError, r".*\(.*(?:2002|2003|2006|2013|1047)") # noqa @filters("mysql", sqla_exc.InternalError, r".*\(.*(?:1927)") # noqa @filters("mysql", sqla_exc.InternalError, r".*Packet sequence number wrong") # noqa @filters("postgresql", sqla_exc.OperationalError, r".*could not connect to server") # noqa def _is_db_connection_error(operational_error, match, engine_name, is_disconnect): """Detect the exception as indicating a recoverable error on connect.""" raise exception.DBConnectionError(operational_error) @filters("*", sqla_exc.NotSupportedError, r".*") def _raise_for_NotSupportedError(error, match, engine_name, is_disconnect): raise exception.DBNotSupportedError(error) @filters("*", sqla_exc.DBAPIError, r".*") def _raise_for_remaining_DBAPIError(error, match, engine_name, is_disconnect): """Filter for remaining DBAPIErrors. Filter for remaining DBAPIErrors and wrap if they represent a disconnect error. """ if is_disconnect: raise exception.DBConnectionError(error) else: LOG.warning('DBAPIError exception wrapped.', exc_info=True) raise exception.DBError(error) @filters('*', UnicodeEncodeError, r".*") def _raise_for_unicode_encode(error, match, engine_name, is_disconnect): raise exception.DBInvalidUnicodeParameter() @filters("*", Exception, r".*") def _raise_for_all_others(error, match, engine_name, is_disconnect): LOG.warning('DB exception wrapped.', exc_info=True) raise exception.DBError(error) ROLLBACK_CAUSE_KEY = 'oslo.db.sp_rollback_cause' def handler(context): """Iterate through available filters and invoke those which match. The first one which raises wins. The order in which the filters are attempted is sorted by specificity - dialect name or "*", exception class per method resolution order (``__mro__``). Method resolution order is used so that filter rules indicating a more specific exception class are attempted first. """ def _dialect_registries(dialect): if dialect.name in _registry: yield _registry[dialect.name] if '*' in _registry: yield _registry['*'] # do not reraise for our own exceptions # https://github.com/sqlalchemy/sqlalchemy/issues/10116 if isinstance(context.original_exception, exception.DBError): return dialect = compat.dialect_from_exception_context(context) for per_dialect in _dialect_registries(dialect): for exc in (context.sqlalchemy_exception, context.original_exception): for super_ in exc.__class__.__mro__: if super_ not in per_dialect: continue regexp_reg = per_dialect[super_] for fn, regexp in regexp_reg: match = regexp.match(exc.args[0]) if not match: continue try: fn( exc, match, dialect.name, context.is_disconnect, ) except exception.DBError as dbe: if ( context.connection is not None and not context.connection.closed and not context.connection.invalidated and ROLLBACK_CAUSE_KEY in context.connection.info ): dbe.cause = context.connection.info.pop( ROLLBACK_CAUSE_KEY, ) if isinstance(dbe, exception.DBConnectionError): context.is_disconnect = True # new in 2.0.5 if ( hasattr(context, "is_pre_ping") and context.is_pre_ping ): # if this is a pre-ping, need to # integrate with the built # in pre-ping handler that doesnt know # about DBConnectionError, just needs # the updated status return None return dbe def register_engine(engine): event.listen(engine, "handle_error", handler, retval=True) @event.listens_for(engine, "rollback_savepoint") def rollback_savepoint(conn, name, context): exc_info = sys.exc_info() if exc_info[1]: # NOTE(zzzeek) accessing conn.info on an invalidated # connection causes it to reconnect, which we don't # want to do inside a rollback handler if not conn.invalidated: conn.info[ROLLBACK_CAUSE_KEY] = exc_info[1] # NOTE(zzzeek) this eliminates a reference cycle between tracebacks # that would occur in Python 3 only, which has been shown to occur if # this function were in fact part of the traceback. That's not the # case here however this is left as a defensive measure. del exc_info # try to clear the "cause" ASAP outside of savepoints, # by grabbing the end of transaction events... @event.listens_for(engine, "rollback") @event.listens_for(engine, "commit") def pop_exc_tx(conn): # NOTE(zzzeek) accessing conn.info on an invalidated # connection causes it to reconnect, which we don't # want to do inside a rollback handler if not conn.invalidated: conn.info.pop(ROLLBACK_CAUSE_KEY, None) # .. as well as connection pool checkin (just in case). # the .info dictionary lasts as long as the DBAPI connection itself # and is cleared out when the connection is recycled or closed # due to invalidate etc. @event.listens_for(engine, "checkin") def pop_exc_checkin(dbapi_conn, connection_record): connection_record.info.pop(ROLLBACK_CAUSE_KEY, None) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5326004 oslo.db-16.0.0/oslo_db/sqlalchemy/migration_cli/0000775000175000017500000000000000000000000021564 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/migration_cli/README.rst0000664000175000017500000000045000000000000023252 0ustar00zuulzuul00000000000000This module could be used either for: 1. Smooth transition from migrate tool to alembic 2. As standalone alembic tool Core points: 1. Upgrade/downgrade database with usage of alembic/migrate migrations or both 2. Compatibility with oslo.config 3. The way to autogenerate new revisions or stamps ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/migration_cli/__init__.py0000664000175000017500000000000000000000000023663 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/migration_cli/ext_alembic.py0000664000175000017500000001016100000000000024411 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import alembic from alembic import config as alembic_config import alembic.migration as alembic_migration from alembic import script as alembic_script from oslo_db.sqlalchemy.migration_cli import ext_base class AlembicExtension(ext_base.MigrationExtensionBase): """Extension to provide alembic features. :param engine: SQLAlchemy engine instance for a given database :type engine: sqlalchemy.engine.Engine :param migration_config: Stores specific configuration for migrations :type migration_config: dict """ order = 2 @property def enabled(self): return os.path.exists(self.alembic_ini_path) def __init__(self, engine, migration_config): self.alembic_ini_path = migration_config.get('alembic_ini_path', '') self.config = alembic_config.Config(self.alembic_ini_path) # option should be used if script is not in default directory repo_path = migration_config.get('alembic_repo_path') if repo_path: self.config.set_main_option('script_location', repo_path) self.engine = engine def upgrade(self, version): with self.engine.begin() as connection: self.config.attributes['connection'] = connection return alembic.command.upgrade(self.config, version or 'head') def downgrade(self, version): if isinstance(version, int) or version is None or version.isdigit(): version = 'base' with self.engine.begin() as connection: self.config.attributes['connection'] = connection return alembic.command.downgrade(self.config, version) def version(self): with self.engine.connect() as conn: context = alembic_migration.MigrationContext.configure(conn) return context.get_current_revision() def revision(self, message='', autogenerate=False): """Creates template for migration. :param message: Text that will be used for migration title :type message: string :param autogenerate: If True - generates diff based on current database state :type autogenerate: bool """ with self.engine.begin() as connection: self.config.attributes['connection'] = connection return alembic.command.revision(self.config, message=message, autogenerate=autogenerate) def stamp(self, revision): """Stamps database with provided revision. :param revision: Should match one from repository or head - to stamp database with most recent revision :type revision: string """ with self.engine.begin() as connection: self.config.attributes['connection'] = connection return alembic.command.stamp(self.config, revision=revision) def has_revision(self, rev_id): if rev_id in ['base', 'head']: return True # Although alembic supports relative upgrades and downgrades, # get_revision always returns False for relative revisions. # Since only alembic supports relative revisions, assume the # revision belongs to this plugin. if rev_id: # rev_id can be None, so the check is required if '-' in rev_id or '+' in rev_id: return True script = alembic_script.ScriptDirectory( self.config.get_main_option('script_location')) try: script.get_revision(rev_id) return True except alembic.util.CommandError: return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/migration_cli/ext_base.py0000664000175000017500000000443300000000000023734 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc class MigrationExtensionBase(object, metaclass=abc.ABCMeta): # used to sort migration in logical order order = 0 @property def enabled(self): """Used for availability verification of a plugin. :rtype: bool """ return False @abc.abstractmethod def upgrade(self, version): """Used for upgrading database. :param version: Desired database version :type version: string """ @abc.abstractmethod def downgrade(self, version): """Used for downgrading database. :param version: Desired database version :type version: string """ @abc.abstractmethod def version(self): """Current database version. :returns: Databse version :rtype: string """ def revision(self, *args, **kwargs): """Used to generate migration script. In migration engines that support this feature, it should generate new migration script. Accept arbitrary set of arguments. """ raise NotImplementedError() def stamp(self, *args, **kwargs): """Stamps database based on plugin features. Accept arbitrary set of arguments. """ raise NotImplementedError() def has_revision(self, rev_id): """Checks whether the repo contains a revision :param rev_id: Revision to check :returns: Whether the revision is in the repo :rtype: bool """ raise NotImplementedError() def __cmp__(self, other): """Used for definition of plugin order. :param other: MigrationExtensionBase instance :rtype: bool """ return self.order > other.order ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/migration_cli/manager.py0000664000175000017500000001033300000000000023550 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from debtcollector import removals import sqlalchemy from stevedore import enabled from oslo_db import exception MIGRATION_NAMESPACE = 'oslo.db.migration' def check_plugin_enabled(ext): """Used for EnabledExtensionManager.""" return ext.obj.enabled @removals.remove( message='Support for sqlalchemy-migrate and with it the migration manager ' 'is deprecated for removal; consider migrating to and using alembic ' 'directly', version='8.3.0' ) class MigrationManager(object): def __init__(self, migration_config, engine=None): if engine is None: if migration_config.get('db_url'): engine = sqlalchemy.create_engine( migration_config['db_url'], poolclass=sqlalchemy.pool.NullPool, ) else: raise ValueError('Either database url or engine' ' must be provided.') self._manager = enabled.EnabledExtensionManager( MIGRATION_NAMESPACE, check_plugin_enabled, invoke_args=(engine, migration_config), invoke_on_load=True ) if not self._plugins: raise ValueError('There must be at least one plugin active.') @property def _plugins(self): return sorted(ext.obj for ext in self._manager.extensions) def upgrade(self, revision): """Upgrade database with all available backends.""" # a revision exists only in a single plugin. Until we reached it, we # should upgrade to the plugins' heads. # revision=None is a special case meaning latest revision. rev_in_plugins = [p.has_revision(revision) for p in self._plugins] if not any(rev_in_plugins) and revision is not None: raise exception.DBMigrationError('Revision does not exist') results = [] for plugin, has_revision in zip(self._plugins, rev_in_plugins): if not has_revision or revision is None: results.append(plugin.upgrade(None)) else: results.append(plugin.upgrade(revision)) break return results def downgrade(self, revision): """Downgrade database with available backends.""" # a revision exists only in a single plugin. Until we reached it, we # should upgrade to the plugins' first revision. # revision=None is a special case meaning initial revision. rev_in_plugins = [p.has_revision(revision) for p in self._plugins] if not any(rev_in_plugins) and revision is not None: raise exception.DBMigrationError('Revision does not exist') # downgrading should be performed in reversed order results = [] for plugin, has_revision in zip(reversed(self._plugins), reversed(rev_in_plugins)): if not has_revision or revision is None: results.append(plugin.downgrade(None)) else: results.append(plugin.downgrade(revision)) break return results def version(self): """Return last version of db.""" last = None for plugin in self._plugins: version = plugin.version() if version is not None: last = version return last def revision(self, message, autogenerate): """Generate template or autogenerated revision.""" # revision should be done only by last plugin return self._plugins[-1].revision(message, autogenerate) def stamp(self, revision): """Create stamp for a given revision.""" return self._plugins[-1].stamp(revision) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/models.py0000664000175000017500000001016100000000000020600 0ustar00zuulzuul00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Piston Cloud Computing, Inc. # Copyright 2012 Cloudscaling Group, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models. """ from oslo_utils import timeutils from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy.orm import object_mapper from oslo_db.sqlalchemy import types class ModelBase(object): """Base class for models.""" __table_initialized__ = False def save(self, session): """Save this object.""" session.add(self) session.flush() def __setitem__(self, key, value): setattr(self, key, value) def __getitem__(self, key): return getattr(self, key) def __contains__(self, key): # Don't use hasattr() because hasattr() catches any exception, not only # AttributeError. We want to passthrough SQLAlchemy exceptions # (ex: sqlalchemy.orm.exc.DetachedInstanceError). try: getattr(self, key) except AttributeError: return False else: return True def get(self, key, default=None): return getattr(self, key, default) @property def _extra_keys(self): """Specifies custom fields Subclasses can override this property to return a list of custom fields that should be included in their dict representation. For reference check tests/db/sqlalchemy/test_models.py """ return [] def __iter__(self): columns = list(dict(object_mapper(self).columns).keys()) # NOTE(russellb): Allow models to specify other keys that can be looked # up, beyond the actual db columns. An example would be the 'name' # property for an Instance. columns.extend(self._extra_keys) return ModelIterator(self, iter(columns)) def update(self, values): """Make the model object behave like a dict.""" for k, v in values.items(): setattr(self, k, v) def _as_dict(self): """Make the model object behave like a dict. Includes attributes from joins. """ local = dict((key, value) for key, value in self) joined = dict([(k, v) for k, v in self.__dict__.items() if not k[0] == '_']) local.update(joined) return local def iteritems(self): """Make the model object behave like a dict.""" return self._as_dict().items() def items(self): """Make the model object behave like a dict.""" return self._as_dict().items() def keys(self): """Make the model object behave like a dict.""" return [key for key, value in self.iteritems()] class ModelIterator(object): def __init__(self, model, columns): self.model = model self.i = columns def __iter__(self): return self def __next__(self): n = next(self.i) return n, getattr(self.model, n) class TimestampMixin(object): created_at = Column(DateTime, default=lambda: timeutils.utcnow()) updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow()) class SoftDeleteMixin(object): deleted_at = Column(DateTime) deleted = Column(types.SoftDeleteInteger, default=0) def soft_delete(self, session): """Mark this object as deleted.""" self.deleted = self.id self.deleted_at = timeutils.utcnow() self.save(session=session) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/orm.py0000664000175000017500000000512300000000000020114 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SQLAlchemy ORM connectivity and query structures. """ from oslo_utils import timeutils import sqlalchemy.orm from oslo_db.sqlalchemy import update_match class Query(sqlalchemy.orm.query.Query): """Subclass of sqlalchemy.query with soft_delete() method.""" def soft_delete(self, synchronize_session='evaluate'): entity = self.column_descriptions[0]['entity'] return self.update({'deleted': entity.id, 'updated_at': entity.updated_at, 'deleted_at': timeutils.utcnow()}, synchronize_session=synchronize_session) def update_returning_pk(self, values, surrogate_key): """Perform an UPDATE, returning the primary key of the matched row. This is a method-version of oslo_db.sqlalchemy.update_match.update_returning_pk(); see that function for usage details. """ return update_match.update_returning_pk(self, values, surrogate_key) def update_on_match(self, specimen, surrogate_key, values, **kw): """Emit an UPDATE statement matching the given specimen. This is a method-version of oslo_db.sqlalchemy.update_match.update_on_match(); see that function for usage details. """ return update_match.update_on_match( self, specimen, surrogate_key, values, **kw) class Session(sqlalchemy.orm.session.Session): """oslo.db-specific Session subclass.""" def get_maker(engine, autocommit=False, expire_on_commit=False): """Return a SQLAlchemy sessionmaker using the given engine.""" return sqlalchemy.orm.sessionmaker(bind=engine, class_=Session, autocommit=autocommit, expire_on_commit=expire_on_commit, query_cls=Query) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/provision.py0000664000175000017500000005244000000000000021353 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat # Copyright 2013 Mirantis.inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Provision test environment for specific DB backends""" import abc import logging import os import random import re import string import sqlalchemy from sqlalchemy import schema from sqlalchemy import sql import testresources from oslo_db import exception from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import utils LOG = logging.getLogger(__name__) class ProvisionedDatabase(object): """Represents a database engine pointing to a DB ready to run tests. backend: an instance of :class:`.Backend` enginefacade: an instance of :class:`._TransactionFactory` engine: a SQLAlchemy :class:`.Engine` db_token: if provision_new_database were used, this is the randomly generated name of the database. Note that with SQLite memory connections, this token is ignored. For a database that wasn't actually created, will be None. """ __slots__ = 'backend', 'enginefacade', 'engine', 'db_token' def __init__(self, backend, enginefacade, engine, db_token): self.backend = backend self.enginefacade = enginefacade self.engine = engine self.db_token = db_token class Schema(object): """"Represents a database schema that has or will be populated. This is a marker object as required by testresources but otherwise serves no purpose. """ __slots__ = 'database', class BackendResource(testresources.TestResourceManager): def __init__(self, database_type, ad_hoc_url=None): super(BackendResource, self).__init__() self.database_type = database_type self.backend = Backend.backend_for_database_type(self.database_type) self.ad_hoc_url = ad_hoc_url if ad_hoc_url is None: self.backend = Backend.backend_for_database_type( self.database_type) else: self.backend = Backend(self.database_type, ad_hoc_url) self.backend._verify() def make(self, dependency_resources): return self.backend def clean(self, resource): self.backend._dispose() def isDirty(self): return False class DatabaseResource(testresources.TestResourceManager): """Database resource which connects and disconnects to a URL. For SQLite, this means the database is created implicitly, as a result of SQLite's usual behavior. If the database is a file-based URL, it will remain after the resource has been torn down. For all other kinds of databases, the resource indicates to connect and disconnect from that database. """ def __init__(self, database_type, _enginefacade=None, provision_new_database=True, ad_hoc_url=None): super(DatabaseResource, self).__init__() self.database_type = database_type self.provision_new_database = provision_new_database # NOTE(zzzeek) the _enginefacade is an optional argument # here in order to accomodate Neutron's current direct use # of the DatabaseResource object. Within oslo_db's use, # the "enginefacade" will always be passed in from the # test and/or fixture. if _enginefacade: self._enginefacade = _enginefacade else: self._enginefacade = enginefacade._context_manager self.resources = [ ('backend', BackendResource(database_type, ad_hoc_url)) ] def make(self, dependency_resources): backend = dependency_resources['backend'] _enginefacade = self._enginefacade.make_new_manager() if self.provision_new_database: db_token = _random_ident() url = backend.provisioned_database_url(db_token) LOG.info( "CREATE BACKEND %s TOKEN %s", backend.engine.url, db_token) backend.create_named_database(db_token, conditional=True) else: db_token = None url = backend.url _enginefacade.configure( logging_name="%s@%s" % (self.database_type, db_token)) _enginefacade._factory._start(connection=url) engine = _enginefacade._factory._writer_engine return ProvisionedDatabase(backend, _enginefacade, engine, db_token) def clean(self, resource): if self.provision_new_database: LOG.info( "DROP BACKEND %s TOKEN %s", resource.backend.engine, resource.db_token) resource.backend.drop_named_database(resource.db_token) def isDirty(self): return False class SchemaResource(testresources.TestResourceManager): def __init__(self, database_resource, generate_schema, teardown=False): super(SchemaResource, self).__init__() self.generate_schema = generate_schema self.teardown = teardown self.resources = [ ('database', database_resource) ] def clean(self, resource): LOG.info( "DROP ALL OBJECTS, BACKEND %s", resource.database.engine.url) resource.database.backend.drop_all_objects( resource.database.engine) def make(self, dependency_resources): if self.generate_schema: self.generate_schema(dependency_resources['database'].engine) return Schema() def isDirty(self): if self.teardown: return True else: return False class Backend(object): """Represent a particular database backend that may be provisionable. The ``Backend`` object maintains a database type (e.g. database without specific driver type, such as "sqlite", "postgresql", etc.), a target URL, a base ``Engine`` for that URL object that can be used to provision databases and a ``BackendImpl`` which knows how to perform operations against this type of ``Engine``. """ backends_by_database_type = {} def __init__(self, database_type, url): self.database_type = database_type self.url = url self.verified = False self.engine = None self.impl = BackendImpl.impl(database_type) self.current_dbs = set() @classmethod def backend_for_database_type(cls, database_type): """Return the ``Backend`` for the given database type. """ try: backend = cls.backends_by_database_type[database_type] except KeyError: raise exception.BackendNotAvailable( "Backend '%s' is unavailable: No such backend" % database_type) else: return backend._verify() @classmethod def all_viable_backends(cls): """Return an iterator of all ``Backend`` objects that are present and provisionable. """ for backend in cls.backends_by_database_type.values(): try: yield backend._verify() except exception.BackendNotAvailable: pass def _verify(self): """Verify that this ``Backend`` is available and provisionable. :return: this ``Backend`` :raises: ``BackendNotAvailable`` if the backend is not available. """ if not self.verified: try: eng = self._ensure_backend_available(self.url) except exception.BackendNotAvailable as bne: self._no_engine_reason = str(bne) raise else: self.engine = eng finally: self.verified = True if self.engine is None: raise exception.BackendNotAvailable(self._no_engine_reason) return self @classmethod def _ensure_backend_available(cls, url): url = utils.make_url(url) try: eng = sqlalchemy.create_engine(url) except ImportError as i_e: # SQLAlchemy performs an "import" of the DBAPI module # within create_engine(). So if mysql etc. # isn't installed, we get an ImportError here. LOG.info( "The %(dbapi)s backend is unavailable: %(err)s", dict(dbapi=url.drivername, err=i_e)) raise exception.BackendNotAvailable( "Backend '%s' is unavailable: No DBAPI installed" % url.drivername) else: try: conn = eng.connect() except sqlalchemy.exc.DBAPIError as d_e: # upon connect, SQLAlchemy calls dbapi.connect(). This # usually raises OperationalError and should always at # least raise a SQLAlchemy-wrapped DBAPI Error. LOG.info( "The %(dbapi)s backend is unavailable: %(err)s", dict(dbapi=url.drivername, err=d_e) ) raise exception.BackendNotAvailable( "Backend '%s' is unavailable: Could not connect" % url.drivername) else: conn.close() return eng def _dispose(self): """Dispose main resources of this backend.""" self.impl.dispose(self.engine) def create_named_database(self, ident, conditional=False): """Create a database with the given name.""" if not conditional or ident not in self.current_dbs: self.current_dbs.add(ident) self.impl.create_named_database( self.engine, ident, conditional=conditional) def drop_named_database(self, ident, conditional=False): """Drop a database with the given name.""" self.impl.drop_named_database( self.engine, ident, conditional=conditional) self.current_dbs.discard(ident) def drop_all_objects(self, engine): """Drop all database objects. Drops all database objects remaining on the default schema of the given engine. """ self.impl.drop_all_objects(engine) def database_exists(self, ident): """Return True if a database of the given name exists.""" return self.impl.database_exists(self.engine, ident) def provisioned_database_url(self, ident): """Given the identifier of an anoymous database, return a URL. For hostname-based URLs, this typically involves switching just the 'database' portion of the URL with the given name and creating a URL. For SQLite URLs, the identifier may be used to create a filename or may be ignored in the case of a memory database. """ return self.impl.provisioned_database_url(self.url, ident) @classmethod def _setup(cls): """Initial startup feature will scan the environment for configured URLs and place them into the list of URLs we will use for provisioning. This searches through OS_TEST_DBAPI_ADMIN_CONNECTION for URLs. If not present, we set up URLs based on the "opportunstic" convention, e.g. username+password = "openstack_citest". The provisioning system will then use or discard these URLs as they are requested, based on whether or not the target database is actually found to be available. """ configured_urls = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', None) if configured_urls: configured_urls = configured_urls.split(";") else: configured_urls = [ impl.create_opportunistic_driver_url() for impl in BackendImpl.all_impls() ] for url_str in configured_urls: url = utils.make_url(url_str) m = re.match(r'([^+]+?)(?:\+(.+))?$', url.drivername) database_type = m.group(1) Backend.backends_by_database_type[database_type] = \ Backend(database_type, url) class BackendImpl(object, metaclass=abc.ABCMeta): """Provide database-specific implementations of key provisioning functions. ``BackendImpl`` is owned by a ``Backend`` instance which delegates to it for all database-specific features. """ default_engine_kwargs = {} supports_drop_fk = True def dispose(self, engine): LOG.info("DISPOSE ENGINE %s", engine) engine.dispose() @classmethod def all_impls(cls): """Return an iterator of all possible BackendImpl objects. These are BackendImpls that are implemented, but not necessarily provisionable. """ for database_type in cls.impl.reg: if database_type == '*': continue yield BackendImpl.impl(database_type) @utils.dispatch_for_dialect("*") def impl(drivername): """Return a ``BackendImpl`` instance corresponding to the given driver name. This is a dispatched method which will refer to the constructor of implementing subclasses. """ raise NotImplementedError( "No provision impl available for driver: %s" % drivername) def __init__(self, drivername): self.drivername = drivername @abc.abstractmethod def create_opportunistic_driver_url(self): """Produce a string url known as the 'opportunistic' URL. This URL is one that corresponds to an established OpenStack convention for a pre-established database login, which, when detected as available in the local environment, is automatically used as a test platform for a specific type of driver. """ @abc.abstractmethod def create_named_database(self, engine, ident, conditional=False): """Create a database with the given name.""" @abc.abstractmethod def drop_named_database(self, engine, ident, conditional=False): """Drop a database with the given name.""" def drop_all_objects(self, engine): """Drop all database objects. Drops all database objects remaining on the default schema of the given engine. Per-db implementations will also need to drop items specific to those systems, such as sequences, custom types (e.g. pg ENUM), etc. """ with engine.begin() as conn: inspector = sqlalchemy.inspect(engine) metadata = schema.MetaData() tbs = [] all_fks = [] for table_name in inspector.get_table_names(): fks = [] for fk in inspector.get_foreign_keys(table_name): # note that SQLite reflection does not have names # for foreign keys until SQLAlchemy 1.0 if not fk['name']: continue fks.append( schema.ForeignKeyConstraint((), (), name=fk['name']) ) table = schema.Table(table_name, metadata, *fks) tbs.append(table) all_fks.extend(fks) if self.supports_drop_fk: for fkc in all_fks: conn.execute(schema.DropConstraint(fkc)) for table in tbs: conn.execute(schema.DropTable(table)) self.drop_additional_objects(conn) def drop_additional_objects(self, conn): pass def provisioned_database_url(self, base_url, ident): """Return a provisioned database URL. Given the URL of a particular database backend and the string name of a particular 'database' within that backend, return an URL which refers directly to the named database. For hostname-based URLs, this typically involves switching just the 'database' portion of the URL with the given name and creating an engine. For URLs that instead deal with DSNs, the rules may be more custom; for example, the engine may need to connect to the root URL and then emit a command to switch to the named database. """ url = utils.make_url(base_url) url = url.set(database=ident) return url @BackendImpl.impl.dispatch_for("mysql") class MySQLBackendImpl(BackendImpl): def create_opportunistic_driver_url(self): return "mysql+pymysql://openstack_citest:openstack_citest@localhost/" def create_named_database(self, engine, ident, conditional=False): with engine.begin() as conn: if not conditional or not self.database_exists(conn, ident): conn.exec_driver_sql("CREATE DATABASE %s" % ident) def drop_named_database(self, engine, ident, conditional=False): with engine.begin() as conn: if not conditional or self.database_exists(conn, ident): conn.exec_driver_sql("DROP DATABASE %s" % ident) def database_exists(self, engine, ident): s = sql.text("SHOW DATABASES LIKE :ident") return bool(engine.scalar(s, {'ident': ident})) @BackendImpl.impl.dispatch_for("sqlite") class SQLiteBackendImpl(BackendImpl): supports_drop_fk = False def dispose(self, engine): LOG.info("DISPOSE ENGINE %s", engine) engine.dispose() url = engine.url self._drop_url_file(url, True) def _drop_url_file(self, url, conditional): filename = url.database if filename and (not conditional or os.access(filename, os.F_OK)): os.remove(filename) def create_opportunistic_driver_url(self): return "sqlite://" def create_named_database(self, engine, ident, conditional=False): url = self.provisioned_database_url(engine.url, ident) filename = url.database if filename and (not conditional or not os.access(filename, os.F_OK)): eng = sqlalchemy.create_engine(url) eng.connect().close() def drop_named_database(self, engine, ident, conditional=False): url = self.provisioned_database_url(engine.url, ident) filename = url.database if filename and (not conditional or os.access(filename, os.F_OK)): os.remove(filename) def database_exists(self, engine, ident): url = self._provisioned_database_url(engine.url, ident) filename = url.database return not filename or os.access(filename, os.F_OK) def provisioned_database_url(self, base_url, ident): if base_url.database: return utils.make_url("sqlite:////tmp/%s.db" % ident) else: return base_url @BackendImpl.impl.dispatch_for("postgresql") class PostgresqlBackendImpl(BackendImpl): def create_opportunistic_driver_url(self): return "postgresql+psycopg2://openstack_citest:openstack_citest@localhost/postgres" # noqa: E501 def create_named_database(self, engine, ident, conditional=False): with engine.connect().execution_options( isolation_level="AUTOCOMMIT", ) as conn: if not conditional or not self.database_exists(conn, ident): conn.exec_driver_sql("CREATE DATABASE %s" % ident) def drop_named_database(self, engine, ident, conditional=False): with engine.connect().execution_options( isolation_level="AUTOCOMMIT", ) as conn: self._close_out_database_users(conn, ident) if conditional: conn.exec_driver_sql("DROP DATABASE IF EXISTS %s" % ident) else: conn.exec_driver_sql("DROP DATABASE %s" % ident) def drop_additional_objects(self, conn): enums = [e['name'] for e in sqlalchemy.inspect(conn).get_enums()] for e in enums: conn.exec_driver_sql("DROP TYPE %s" % e) def database_exists(self, engine, ident): return bool( engine.scalar( sqlalchemy.text( "SELECT datname FROM pg_database WHERE datname=:name" ), {'name': ident}, ) ) def _close_out_database_users(self, conn, ident): """Attempt to guarantee a database can be dropped. Optional feature which guarantees no connections with our username are attached to the DB we're going to drop. This method has caveats; for one, the 'pid' column was named 'procpid' prior to Postgresql 9.2. But more critically, prior to 9.2 this operation required superuser permissions, even if the connections we're closing are under the same username as us. In more recent versions this restriction has been lifted for same-user connections. """ if conn.dialect.server_version_info >= (9, 2): conn.execute( sqlalchemy.text( "SELECT pg_terminate_backend(pid) " "FROM pg_stat_activity " "WHERE usename=current_user AND " "pid != pg_backend_pid() AND " "datname=:dname" ), {'dname': ident}, ) def _random_ident(): return ''.join(random.choice(string.ascii_lowercase) for i in range(10)) Backend._setup() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/session.py0000664000175000017500000001517400000000000021011 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Session Handling for SQLAlchemy backend. Recommended ways to use sessions within this framework: * Use the ``enginefacade`` system for connectivity, session and transaction management: .. code-block:: python from oslo_db.sqlalchemy import enginefacade @enginefacade.reader def get_foo(context, foo): return (model_query(models.Foo, context.session). filter_by(foo=foo). first()) @enginefacade.writer def update_foo(context, id, newfoo): (model_query(models.Foo, context.session). filter_by(id=id). update({'foo': newfoo})) @enginefacade.writer def create_foo(context, values): foo_ref = models.Foo() foo_ref.update(values) foo_ref.save(context.session) return foo_ref In the above system, transactions are committed automatically, and are shared among all dependent database methods. Ensure that methods which "write" data are enclosed within @writer blocks. .. note:: Statements in the session scope will not be automatically retried. * If you create models within the session, they need to be added, but you do not need to call `model.save()`: .. code-block:: python @enginefacade.writer def create_many_foo(context, foos): for foo in foos: foo_ref = models.Foo() foo_ref.update(foo) context.session.add(foo_ref) @enginefacade.writer def update_bar(context, foo_id, newbar): foo_ref = (model_query(models.Foo, context.session). filter_by(id=foo_id). first()) (model_query(models.Bar, context.session). filter_by(id=foo_ref['bar_id']). update({'bar': newbar})) The two queries in `update_bar` can alternatively be expressed using a single query, which may be more efficient depending on scenario: .. code-block:: python @enginefacade.writer def update_bar(context, foo_id, newbar): subq = (model_query(models.Foo.id, context.session). filter_by(id=foo_id). limit(1). subquery()) (model_query(models.Bar, context.session). filter_by(id=subq.as_scalar()). update({'bar': newbar})) For reference, this emits approximately the following SQL statement: .. code-block:: sql UPDATE bar SET bar = '${newbar}' WHERE id=(SELECT bar_id FROM foo WHERE id = '${foo_id}' LIMIT 1); .. note:: `create_duplicate_foo` is a trivially simple example of catching an exception while using a savepoint. Here we create two duplicate instances with same primary key, must catch the exception out of context managed by a single session: .. code-block:: python @enginefacade.writer def create_duplicate_foo(context): foo1 = models.Foo() foo2 = models.Foo() foo1.id = foo2.id = 1 try: with context.session.begin_nested(): session.add(foo1) session.add(foo2) except exception.DBDuplicateEntry as e: handle_error(e) * The enginefacade system eliminates the need to decide when sessions need to be passed between methods. All methods should instead share a common context object; the enginefacade system will maintain the transaction across method calls. .. code-block:: python @enginefacade.writer def myfunc(context, foo): # do some database things bar = _private_func(context, foo) return bar def _private_func(context, foo): with enginefacade.using_writer(context) as session: # do some other database things session.add(SomeObject()) return bar * Avoid ``with_lockmode('UPDATE')`` when possible. FOR UPDATE is not compatible with MySQL/Galera. Instead, an "opportunistic" approach should be used, such that if an UPDATE fails, the entire transaction should be retried. The @wrap_db_retry decorator is one such system that can be used to achieve this. Enabling soft deletes: * To use/enable soft-deletes, `SoftDeleteMixin` may be used. For example: .. code-block:: python class NovaBase(models.SoftDeleteMixin, models.ModelBase): pass Efficient use of soft deletes: * While there is a ``model.soft_delete()`` method, prefer ``query.soft_delete()``. Some examples: .. code-block:: python @enginefacade.writer def soft_delete_bar(context): # synchronize_session=False will prevent the ORM from attempting # to search the Session for instances matching the DELETE; # this is typically not necessary for small operations. count = model_query(BarModel, context.session).\\ find(some_condition).soft_delete(synchronize_session=False) if count == 0: raise Exception("0 entries were soft deleted") @enginefacade.writer def complex_soft_delete_with_synchronization_bar(context): # use synchronize_session='evaluate' when you'd like to attempt # to update the state of the Session to match that of the DELETE. # This is potentially helpful if the operation is complex and # continues to work with instances that were loaded, though # not usually needed. count = (model_query(BarModel, context.session). find(some_condition). soft_delete(synchronize_session='evaulate')) if count == 0: raise Exception("0 entries were soft deleted") """ from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import engines from oslo_db.sqlalchemy import orm EngineFacade = enginefacade.LegacyEngineFacade create_engine = engines.create_engine get_maker = orm.get_maker Query = orm.Query Session = orm.Session __all__ = ["EngineFacade", "create_engine", "get_maker", "Query", "Session"] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/test_base.py0000664000175000017500000000304100000000000021265 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from oslo_utils import reflection ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql'] def backend_specific(*dialects): """Decorator to skip backend specific tests on inappropriate engines. ::dialects: list of dialects names under which the test will be launched. """ def wrap(f): @functools.wraps(f) def ins_wrap(self): if not set(dialects).issubset(ALLOWED_DIALECTS): raise ValueError( "Please use allowed dialects: %s" % ALLOWED_DIALECTS) if self.engine.name not in dialects: msg = ('The test "%s" can be run ' 'only on %s. Current engine is %s.') args = (reflection.get_callable_name(f), ', '.join(dialects), self.engine.name) self.skipTest(msg % args) else: return f(self) return ins_wrap return wrap ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/test_fixtures.py0000664000175000017500000005274100000000000022237 0ustar00zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import os import testresources import testscenarios from oslo_db import exception from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import provision from oslo_db.sqlalchemy import utils class ReplaceEngineFacadeFixture(fixtures.Fixture): """A fixture that will plug the engine of one enginefacade into another. This fixture can be used by test suites that already have their own non- oslo_db database setup / teardown schemes, to plug any URL or test-oriented enginefacade as-is into an enginefacade-oriented API. For applications that use oslo.db's testing fixtures, the ReplaceEngineFacade fixture is used internally. E.g.:: class MyDBTest(TestCase): def setUp(self): from myapplication.api import main_enginefacade my_test_enginefacade = enginefacade.transaction_context() my_test_enginefacade.configure(connection=my_test_url) self.useFixture( ReplaceEngineFacadeFixture( main_enginefacade, my_test_enginefacade)) Above, the main_enginefacade object is the normal application level one, and my_test_enginefacade is a local one that we've created to refer to some testing database. Throughout the fixture's setup, the application level enginefacade will use the engine factory and engines of the testing enginefacade, and at fixture teardown will be replaced back. """ def __init__(self, enginefacade, replace_with_enginefacade): super(ReplaceEngineFacadeFixture, self).__init__() self.enginefacade = enginefacade self.replace_with_enginefacade = replace_with_enginefacade def _setUp(self): _reset_facade = self.enginefacade.patch_factory( self.replace_with_enginefacade._factory ) self.addCleanup(_reset_facade) class BaseDbFixture(fixtures.Fixture): """Base database provisioning fixture. This serves as the base class for the other fixtures, but by itself does not implement _setUp(). It provides the basis for the flags implemented by the various capability mixins (GenerateSchema, DeletesFromSchema, etc.) as well as providing an abstraction over the provisioning objects, which are specific to testresources. Overall, consumers of this fixture just need to use the right classes and the testresources mechanics are taken care of. """ DRIVER = "sqlite" _DROP_SCHEMA_PER_TEST = True _BUILD_SCHEMA = False _BUILD_WITH_MIGRATIONS = False _database_resources = {} _db_not_available = {} _schema_resources = {} def __init__(self, driver=None, ident=None): super(BaseDbFixture, self).__init__() self.driver = driver or self.DRIVER self.ident = ident or "default" self.resource_key = (self.driver, self.__class__, self.ident) def get_enginefacade(self): """Return an enginefacade._TransactionContextManager. This is typically a global variable like "context_manager" declared in the db/api.py module and is the object returned by enginefacade.transaction_context(). If left not implemented, the global enginefacade manager is used. For the case where a project uses per-object or per-test enginefacades like Gnocchi, the get_per_test_enginefacade() method should also be implemented. """ return enginefacade._context_manager def get_per_test_enginefacade(self): """Return an enginefacade._TransactionContextManager per test. This facade should be the one that the test expects the code to use. Usually this is the same one returned by get_engineafacade() which is the default. For special applications like Gnocchi, this can be overridden to provide an instance-level facade. """ return self.get_enginefacade() def _get_db_resource_not_available_reason(self): return self._db_not_available.get(self.resource_key, None) def _has_db_resource(self): return self._database_resources.get( self.resource_key, None) is not None def _generate_schema_resource(self, database_resource): return provision.SchemaResource( database_resource, None if not self._BUILD_SCHEMA else self.generate_schema_create_all if not self._BUILD_WITH_MIGRATIONS else self.generate_schema_migrations, self._DROP_SCHEMA_PER_TEST ) def _get_resources(self): key = self.resource_key # the DatabaseResource and SchemaResource provision objects # can be used by testresources as a marker outside of an individual # test to indicate that this database / schema can be used across # multiple tests. To make this work, many instances of this # fixture have to return the *same* resource object given the same # inputs. so we cache these in class-level dictionaries. if key not in self._database_resources: _enginefacade = self.get_enginefacade() try: self._database_resources[key] = \ self._generate_database_resource(_enginefacade) except exception.BackendNotAvailable as bne: self._database_resources[key] = None self._db_not_available[key] = str(bne) database_resource = self._database_resources[key] if database_resource is None: return [] else: if key in self._schema_resources: schema_resource = self._schema_resources[key] else: schema_resource = self._schema_resources[key] = \ self._generate_schema_resource(database_resource) return [ ('_schema_%s' % self.ident, schema_resource), ('_db_%s' % self.ident, database_resource) ] class GeneratesSchema(object): """Mixin defining a fixture as generating a schema using create_all(). This is a "capability" mixin that works in conjunction with classes that include BaseDbFixture as a base. """ _BUILD_SCHEMA = True _BUILD_WITH_MIGRATIONS = False def generate_schema_create_all(self, engine): """A hook which should generate the model schema using create_all(). This hook is called within the scope of creating the database assuming BUILD_WITH_MIGRATIONS is False. """ class GeneratesSchemaFromMigrations(GeneratesSchema): """Mixin defining a fixture as generating a schema using migrations. This is a "capability" mixin that works in conjunction with classes that include BaseDbFixture as a base. """ _BUILD_WITH_MIGRATIONS = True def generate_schema_migrations(self, engine): """A hook which should generate the model schema using migrations. This hook is called within the scope of creating the database assuming BUILD_WITH_MIGRATIONS is True. """ class ResetsData(object): """Mixin defining a fixture that resets schema data without dropping.""" _DROP_SCHEMA_PER_TEST = False def setup_for_reset(self, engine, enginefacade): """"Perform setup that may be needed before the test runs.""" def reset_schema_data(self, engine, enginefacade): """Reset the data in the schema.""" class DeletesFromSchema(ResetsData): """Mixin defining a fixture that can delete from all tables in place. When DeletesFromSchema is present in a fixture, _DROP_SCHEMA_PER_TEST is now False; this means that the "teardown" flag of provision.SchemaResource will be False, which prevents SchemaResource from dropping all objects within the schema after each test. This is a "capability" mixin that works in conjunction with classes that include BaseDbFixture as a base. """ def reset_schema_data(self, engine, facade): self.delete_from_schema(engine) def delete_from_schema(self, engine): """A hook which should delete all data from an existing schema. Should *not* drop any objects, just remove data from tables that needs to be reset between tests. """ class SimpleDbFixture(BaseDbFixture): """Fixture which provides an engine from a fixed URL. The SimpleDbFixture is generally appropriate only for a SQLite memory database, as this database is naturally isolated from other processes and does not require management of schemas. For tests that need to run specifically against MySQL or Postgresql, the OpportunisticDbFixture is more appropriate. The database connection information itself comes from the provisoning system, matching the desired driver (typically sqlite) to the default URL that provisioning provides for this driver (in the case of sqlite, it's the SQLite memory URL, e.g. sqlite://. For MySQL and Postgresql, it's the familiar "openstack_citest" URL on localhost). There are a variety of create/drop schemes that can take place: * The default is to procure a database connection on setup, and at teardown, an instruction is issued to "drop" all objects in the schema (e.g. tables, indexes). The SQLAlchemy engine itself remains referenced at the class level for subsequent re-use. * When the GeneratesSchema or GeneratesSchemaFromMigrations mixins are implemented, the appropriate generate_schema method is also called when the fixture is set up, by default this is per test. * When the DeletesFromSchema mixin is implemented, the generate_schema method is now only called **once**, and the "drop all objects" system is replaced with the delete_from_schema method. This allows the same database to remain set up with all schema objects intact, so that expensive migrations need not be run on every test. * The fixture does **not** dispose the engine at the end of a test. It is assumed the same engine will be re-used many times across many tests. The AdHocDbFixture extends this one to provide engine.dispose() at the end of a test. This fixture is intended to work without needing a reference to the test itself, and therefore cannot take advantage of the OptimisingTestSuite. """ _dependency_resources = {} def _get_provisioned_db(self): return self._dependency_resources["_db_%s" % self.ident] def _generate_database_resource(self, _enginefacade): return provision.DatabaseResource(self.driver, _enginefacade, provision_new_database=False) def _setUp(self): super(SimpleDbFixture, self)._setUp() cls = self.__class__ if "_db_%s" % self.ident not in cls._dependency_resources: resources = self._get_resources() # initialize resources the same way that testresources does. for name, resource in resources: cls._dependency_resources[name] = resource.getResource() provisioned_db = self._get_provisioned_db() if not self._DROP_SCHEMA_PER_TEST: self.setup_for_reset( provisioned_db.engine, provisioned_db.enginefacade) self.useFixture(ReplaceEngineFacadeFixture( self.get_per_test_enginefacade(), provisioned_db.enginefacade )) if not self._DROP_SCHEMA_PER_TEST: self.addCleanup( self.reset_schema_data, provisioned_db.engine, provisioned_db.enginefacade) self.addCleanup(self._cleanup) def _teardown_resources(self): for name, resource in self._get_resources(): dep = self._dependency_resources.pop(name) resource.finishedWith(dep) def _cleanup(self): pass class AdHocDbFixture(SimpleDbFixture): """"Fixture which creates and disposes a database engine per test. Also allows a specific URL to be passed, meaning the fixture can be hardcoded to a specific SQLite file. For a SQLite, this fixture will create the named database upon setup and tear it down upon teardown. For other databases, the database is assumed to exist already and will remain after teardown. """ def __init__(self, url=None): if url: self.url = utils.make_url(url) driver = self.url.get_backend_name() else: driver = None self.url = None BaseDbFixture.__init__( self, driver=driver, ident=provision._random_ident()) self.url = url def _generate_database_resource(self, _enginefacade): return provision.DatabaseResource( self.driver, _enginefacade, ad_hoc_url=self.url, provision_new_database=False) def _cleanup(self): self._teardown_resources() class OpportunisticDbFixture(BaseDbFixture): """Fixture which uses testresources fully for optimised runs. This fixture relies upon the use of the OpportunisticDBTestMixin to supply a test.resources attribute, and also works much more effectively when combined the testresources.OptimisingTestSuite. The optimize_package_test_loader() function should be used at the module and package levels to optimize database provisioning across many tests. """ def __init__(self, test, driver=None, ident=None): super(OpportunisticDbFixture, self).__init__( driver=driver, ident=ident) self.test = test def _get_provisioned_db(self): return getattr(self.test, "_db_%s" % self.ident) def _generate_database_resource(self, _enginefacade): return provision.DatabaseResource( self.driver, _enginefacade, provision_new_database=True) def _setUp(self): super(OpportunisticDbFixture, self)._setUp() if not self._has_db_resource(): return provisioned_db = self._get_provisioned_db() if not self._DROP_SCHEMA_PER_TEST: self.setup_for_reset( provisioned_db.engine, provisioned_db.enginefacade) self.useFixture(ReplaceEngineFacadeFixture( self.get_per_test_enginefacade(), provisioned_db.enginefacade )) if not self._DROP_SCHEMA_PER_TEST: self.addCleanup( self.reset_schema_data, provisioned_db.engine, provisioned_db.enginefacade) class OpportunisticDBTestMixin(object): """Test mixin that integrates the test suite with testresources. There are three goals to this system: 1. Allow creation of "stub" test suites that will run all the tests in a parent suite against a specific kind of database (e.g. Mysql, Postgresql), where the entire suite will be skipped if that target kind of database is not available to the suite. 2. provide a test with a process-local, anonymously named schema within a target database, so that the test can run concurrently with other tests without conflicting data 3. provide compatibility with the testresources.OptimisingTestSuite, which organizes TestCase instances ahead of time into groups that all make use of the same type of database, setting up and tearing down a database schema once for the scope of any number of tests within. This technique is essential when testing against a non-SQLite database because building of a schema is expensive, and also is most ideally accomplished using the applications schema migration which are even more vastly slow than a straight create_all(). This mixin provides the .resources attribute required by testresources when using the OptimisingTestSuite.The .resources attribute then provides a collection of testresources.TestResourceManager objects, which are defined here in oslo_db.sqlalchemy.provision. These objects know how to find available database backends, build up temporary databases, and invoke schema generation and teardown instructions. The actual "build the schema objects" part of the equation, and optionally a "delete from all the tables" step, is provided by the implementing application itself. """ SKIP_ON_UNAVAILABLE_DB = True FIXTURE = OpportunisticDbFixture _collected_resources = None _instantiated_fixtures = None @property def resources(self): """Provide a collection of TestResourceManager objects. The collection here is memoized, both at the level of the test case itself, as well as in the fixture object(s) which provide those resources. """ if self._collected_resources is not None: return self._collected_resources fixtures = self._instantiate_fixtures() self._collected_resources = [] for fixture in fixtures: self._collected_resources.extend(fixture._get_resources()) return self._collected_resources def setUp(self): self._setup_fixtures() super(OpportunisticDBTestMixin, self).setUp() def _get_default_provisioned_db(self): return self._db_default def _instantiate_fixtures(self): if self._instantiated_fixtures: return self._instantiated_fixtures self._instantiated_fixtures = utils.to_list(self.generate_fixtures()) return self._instantiated_fixtures def generate_fixtures(self): return self.FIXTURE(test=self) def _setup_fixtures(self): testresources.setUpResources( self, self.resources, testresources._get_result()) self.addCleanup( testresources.tearDownResources, self, self.resources, testresources._get_result() ) fixtures = self._instantiate_fixtures() for fixture in fixtures: self.useFixture(fixture) if not fixture._has_db_resource(): msg = fixture._get_db_resource_not_available_reason() if self.SKIP_ON_UNAVAILABLE_DB: self.skipTest(msg) else: self.fail(msg) class MySQLOpportunisticFixture(OpportunisticDbFixture): DRIVER = 'mysql' class PostgresqlOpportunisticFixture(OpportunisticDbFixture): DRIVER = 'postgresql' def optimize_package_test_loader(file_): """Organize package-level tests into a testresources.OptimizingTestSuite. This function provides a unittest-compatible load_tests hook for a given package; for per-module, use the :func:`.optimize_module_test_loader` function. When a unitest or subunit style test runner is used, the function will be called in order to return a TestSuite containing the tests to run; this function ensures that this suite is an OptimisingTestSuite, which will organize the production of test resources across groups of tests at once. The function is invoked as:: from oslo_db.sqlalchemy import test_fixtures load_tests = test_fixtures.optimize_package_test_loader(__file__) The loader *must* be present in the package level __init__.py. The function also applies testscenarios expansion to all test collections. This so that an existing test suite that already needs to build TestScenarios from a load_tests call can still have this take place when replaced with this function. """ this_dir = os.path.dirname(file_) def load_tests(loader, found_tests, pattern): result = testresources.OptimisingTestSuite() result.addTests(found_tests) pkg_tests = loader.discover(start_dir=this_dir, pattern=pattern) result.addTests(testscenarios.generate_scenarios(pkg_tests)) return result return load_tests def optimize_module_test_loader(): """Organize module-level tests into a testresources.OptimizingTestSuite. This function provides a unittest-compatible load_tests hook for a given module; for per-package, use the :func:`.optimize_package_test_loader` function. When a unitest or subunit style test runner is used, the function will be called in order to return a TestSuite containing the tests to run; this function ensures that this suite is an OptimisingTestSuite, which will organize the production of test resources across groups of tests at once. The function is invoked as:: from oslo_db.sqlalchemy import test_fixtures load_tests = test_fixtures.optimize_module_test_loader() The loader *must* be present in an individual module, and *not* the package level __init__.py. The function also applies testscenarios expansion to all test collections. This so that an existing test suite that already needs to build TestScenarios from a load_tests call can still have this take place when replaced with this function. """ def load_tests(loader, found_tests, pattern): result = testresources.OptimisingTestSuite() result.addTests(testscenarios.generate_scenarios(found_tests)) return result return load_tests ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/test_migrations.py0000664000175000017500000002353300000000000022537 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright 2012-2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import functools import logging import pprint import re import alembic import alembic.autogenerate import alembic.migration import sqlalchemy import sqlalchemy.exc import sqlalchemy.sql.expression as expr import sqlalchemy.types as types from oslo_db.sqlalchemy import provision from oslo_db.sqlalchemy import utils LOG = logging.getLogger(__name__) class ModelsMigrationsSync(object, metaclass=abc.ABCMeta): """A helper class for comparison of DB migration scripts and models. It's intended to be inherited by test cases in target projects. They have to provide implementations for methods used internally in the test (as we have no way to implement them here). test_model_sync() will run migration scripts for the engine provided and then compare the given metadata to the one reflected from the database. The difference between MODELS and MIGRATION scripts will be printed and the test will fail, if the difference is not empty. The return value is really a list of actions, that should be performed in order to make the current database schema state (i.e. migration scripts) consistent with models definitions. It's left up to developers to analyze the output and decide whether the models definitions or the migration scripts should be modified to make them consistent. Output:: [( 'add_table', description of the table from models ), ( 'remove_table', description of the table from database ), ( 'add_column', schema, table name, column description from models ), ( 'remove_column', schema, table name, column description from database ), ( 'add_index', description of the index from models ), ( 'remove_index', description of the index from database ), ( 'add_constraint', description of constraint from models ), ( 'remove_constraint, description of constraint from database ), ( 'modify_nullable', schema, table name, column name, { 'existing_type': type of the column from database, 'existing_server_default': default value from database }, nullable from database, nullable from models ), ( 'modify_type', schema, table name, column name, { 'existing_nullable': database nullable, 'existing_server_default': default value from database }, database column type, type of the column from models ), ( 'modify_default', schema, table name, column name, { 'existing_nullable': database nullable, 'existing_type': type of the column from database }, connection column default value, default from models )] Method include_object() can be overridden to exclude some tables from comparison (e.g. migrate_repo). """ @abc.abstractmethod def db_sync(self, engine): """Run migration scripts with the given engine instance. This method must be implemented in subclasses and run migration scripts for a DB the given engine is connected to. """ @abc.abstractmethod def get_engine(self): """Return the engine instance to be used when running tests. This method must be implemented in subclasses and return an engine instance to be used when running tests. """ @abc.abstractmethod def get_metadata(self): """Return the metadata instance to be used for schema comparison. This method must be implemented in subclasses and return the metadata instance attached to the BASE model. """ def include_object(self, object_, name, type_, reflected, compare_to): """Return True for objects that should be compared. :param object_: a SchemaItem object such as a Table or Column object :param name: the name of the object :param type_: a string describing the type of object (e.g. "table") :param reflected: True if the given object was produced based on table reflection, False if it's from a local MetaData object :param compare_to: the object being compared against, if available, else None """ return True def compare_type(self, ctxt, insp_col, meta_col, insp_type, meta_type): """Return True if types are different, False if not. Return None to allow the default implementation to compare these types. :param ctxt: alembic MigrationContext instance :param insp_col: reflected column :param meta_col: column from model :param insp_type: reflected column type :param meta_type: column type from model """ # some backends (e.g. mysql) don't provide native boolean type BOOLEAN_METADATA = (types.BOOLEAN, types.Boolean) BOOLEAN_SQL = BOOLEAN_METADATA + (types.INTEGER, types.Integer) if issubclass(type(meta_type), BOOLEAN_METADATA): return not issubclass(type(insp_type), BOOLEAN_SQL) # Alembic <=0.8.4 do not contain logic of comparing Variant type with # others. if isinstance(meta_type, types.Variant): orig_type = meta_col.type impl_type = meta_type.load_dialect_impl(ctxt.dialect) meta_col.type = impl_type try: return self.compare_type(ctxt, insp_col, meta_col, insp_type, impl_type) finally: meta_col.type = orig_type return ctxt.impl.compare_type(insp_col, meta_col) def compare_server_default(self, ctxt, ins_col, meta_col, insp_def, meta_def, rendered_meta_def): """Compare default values between model and db table. Return True if the defaults are different, False if not, or None to allow the default implementation to compare these defaults. :param ctxt: alembic MigrationContext instance :param insp_col: reflected column :param meta_col: column from model :param insp_def: reflected column default value :param meta_def: column default value from model :param rendered_meta_def: rendered column default value (from model) """ return self._compare_server_default(ctxt.bind, meta_col, insp_def, meta_def) @utils.DialectFunctionDispatcher.dispatch_for_dialect("*") def _compare_server_default(bind, meta_col, insp_def, meta_def): pass @_compare_server_default.dispatch_for('mysql') def _compare_server_default(bind, meta_col, insp_def, meta_def): if isinstance(meta_col.type, sqlalchemy.Boolean): if meta_def is None or insp_def is None: return meta_def != insp_def insp_def = insp_def.strip("'") return not ( isinstance(meta_def.arg, expr.True_) and insp_def == "1" or isinstance(meta_def.arg, expr.False_) and insp_def == "0" ) if isinstance(meta_col.type, sqlalchemy.String): if meta_def is None or insp_def is None: return meta_def != insp_def insp_def = re.sub(r"^'|'$", "", insp_def) return meta_def.arg != insp_def def filter_metadata_diff(self, diff): """Filter changes before assert in test_models_sync(). Allow subclasses to whitelist/blacklist changes. By default, no filtering is performed, changes are returned as is. :param diff: a list of differences (see `compare_metadata()` docs for details on format) :returns: a list of differences """ return diff def test_models_sync(self): # drop all objects after a test run engine = self.get_engine() backend = provision.Backend(engine.name, engine.url) self.addCleanup(functools.partial(backend.drop_all_objects, engine)) # run migration scripts self.db_sync(self.get_engine()) with self.get_engine().connect() as conn: opts = { 'include_object': self.include_object, 'compare_type': self.compare_type, 'compare_server_default': self.compare_server_default, } mc = alembic.migration.MigrationContext.configure(conn, opts=opts) # compare schemas and fail with diff, if it's not empty diff = self.filter_metadata_diff( alembic.autogenerate.compare_metadata(mc, self.get_metadata())) if diff: msg = pprint.pformat(diff, indent=2, width=20) self.fail( "Models and migration scripts aren't in sync:\n%s" % msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/types.py0000664000175000017500000001057100000000000020466 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from sqlalchemy.dialects import mysql from sqlalchemy.types import Integer, Text, TypeDecorator class JsonEncodedType(TypeDecorator): """Base column type for data serialized as JSON-encoded string in db.""" type = None impl = Text cache_ok = True """This type is safe to cache.""" def __init__(self, mysql_as_long=False, mysql_as_medium=False): """Initialize JSON-encoding type.""" super(JsonEncodedType, self).__init__() if mysql_as_long and mysql_as_medium: raise TypeError("mysql_as_long and mysql_as_medium are mutually " "exclusive") if mysql_as_long: self.impl = Text().with_variant(mysql.LONGTEXT(), 'mysql') elif mysql_as_medium: self.impl = Text().with_variant(mysql.MEDIUMTEXT(), 'mysql') def process_bind_param(self, value, dialect): """Bind parameters to the process.""" if value is None: if self.type is not None: # Save default value according to current type to keep the # interface consistent. value = self.type() elif self.type is not None and not isinstance(value, self.type): raise TypeError("%s supposes to store %s objects, but %s given" % (self.__class__.__name__, self.type.__name__, type(value).__name__)) serialized_value = json.dumps(value) return serialized_value def process_result_value(self, value, dialect): """Process result value.""" if value is not None: value = json.loads(value) return value class JsonEncodedDict(JsonEncodedType): """Represents dict serialized as json-encoded string in db. Note that this type does NOT track mutations. If you want to update it, you have to assign existing value to a temporary variable, update, then assign back. See this page for more robust work around: http://docs.sqlalchemy.org/en/rel_1_0/orm/extensions/mutable.html """ type = dict cache_ok = True """This type is safe to cache.""" class JsonEncodedList(JsonEncodedType): """Represents list serialized as json-encoded string in db. Note that this type does NOT track mutations. If you want to update it, you have to assign existing value to a temporary variable, update, then assign back. See this page for more robust work around: http://docs.sqlalchemy.org/en/rel_1_0/orm/extensions/mutable.html """ type = list cache_ok = True """This type is safe to cache.""" class SoftDeleteInteger(TypeDecorator): """Coerce a bound param to be a proper integer before passing it to DBAPI. Some backends like PostgreSQL are very strict about types and do not perform automatic type casts, e.g. when trying to INSERT a boolean value like ``false`` into an integer column. Coercing of the bound param in DB layer by the means of a custom SQLAlchemy type decorator makes sure we always pass a proper integer value to a DBAPI implementation. This is not a general purpose boolean integer type as it specifically allows for arbitrary positive integers outside of the boolean int range (0, 1, False, True), so that it's possible to have compound unique constraints over multiple columns including ``deleted`` (e.g. to soft-delete flavors with the same name in Nova without triggering a constraint violation): ``deleted`` is set to be equal to a PK int value on deletion, 0 denotes a non-deleted row. """ impl = Integer cache_ok = True """This type is safe to cache.""" def process_bind_param(self, value, dialect): """Return the binding parameter.""" if value is None: return None return int(value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/update_match.py0000664000175000017500000004271000000000000021760 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from sqlalchemy import inspect from sqlalchemy import orm from sqlalchemy import sql from sqlalchemy import types as sqltypes from oslo_db.sqlalchemy import utils def update_on_match( query, specimen, surrogate_key, values=None, attempts=3, include_only=None, process_query=None, handle_failure=None ): """Emit an UPDATE statement matching the given specimen. E.g.:: with enginefacade.writer() as session: specimen = MyInstance( uuid='ccea54f', interface_id='ad33fea', vm_state='SOME_VM_STATE', ) values = { 'vm_state': 'SOME_NEW_VM_STATE' } base_query = model_query( context, models.Instance, project_only=True, session=session) hostname_query = model_query( context, models.Instance, session=session, read_deleted='no'). filter(func.lower(models.Instance.hostname) == 'SOMEHOSTNAME') surrogate_key = ('uuid', ) def process_query(query): return query.where(~exists(hostname_query)) def handle_failure(query): try: instance = base_query.one() except NoResultFound: raise exception.InstanceNotFound(instance_id=instance_uuid) if session.query(hostname_query.exists()).scalar(): raise exception.InstanceExists( name=values['hostname'].lower()) # try again return False persistent_instance = base_query.update_on_match( specimen, surrogate_key, values=values, process_query=process_query, handle_failure=handle_failure ) The UPDATE statement is constructed against the given specimen using those values which are present to construct a WHERE clause. If the specimen contains additional values to be ignored, the ``include_only`` parameter may be passed which indicates a sequence of attributes to use when constructing the WHERE. The UPDATE is performed against an ORM Query, which is created from the given ``Session``, or alternatively by passing the ```query`` parameter referring to an existing query. Before the query is invoked, it is also passed through the callable sent as ``process_query``, if present. This hook allows additional criteria to be added to the query after it is created but before invocation. The function will then invoke the UPDATE statement and check for "success" one or more times, up to a maximum of that passed as ``attempts``. The initial check for "success" from the UPDATE statement is that the number of rows returned matches 1. If zero rows are matched, then the UPDATE statement is assumed to have "failed", and the failure handling phase begins. The failure handling phase involves invoking the given ``handle_failure`` function, if any. This handler can perform additional queries to attempt to figure out why the UPDATE didn't match any rows. The handler, upon detection of the exact failure condition, should throw an exception to exit; if it doesn't, it has the option of returning True or False, where False means the error was not handled, and True means that there was not in fact an error, and the function should return successfully. If the failure handler is not present, or returns False after ``attempts`` number of attempts, then the function overall raises CantUpdateException. If the handler returns True, then the function returns with no error. The return value of the function is a persistent version of the given specimen; this may be the specimen itself, if no matching object were already present in the session; otherwise, the existing object is returned, with the state of the specimen merged into it. The returned persistent object will have the given values populated into the object. The object is is returned as "persistent", meaning that it is associated with the given Session and has an identity key (that is, a real primary key value). In order to produce this identity key, a strategy must be used to determine it as efficiently and safely as possible: 1. If the given specimen already contained its primary key attributes fully populated, then these attributes were used as criteria in the UPDATE, so we have the primary key value; it is populated directly. 2. If the target backend supports RETURNING, then when the update() query is performed with a RETURNING clause so that the matching primary key is returned atomically. This currently includes Postgresql, Oracle and others (notably not MySQL or SQLite). 3. If the target backend is MySQL, and the given model uses a single-column, AUTO_INCREMENT integer primary key value (as is the case for Nova), MySQL's recommended approach of making use of ``LAST_INSERT_ID(expr)`` is used to atomically acquire the matching primary key value within the scope of the UPDATE statement, then it fetched immediately following by using ``SELECT LAST_INSERT_ID()``. http://dev.mysql.com/doc/refman/5.0/en/information-\ functions.html#function_last-insert-id 4. Otherwise, for composite keys on MySQL or other backends such as SQLite, the row as UPDATED must be re-fetched in order to acquire the primary key value. The ``surrogate_key`` parameter is used for this in order to re-fetch the row; this is a column name with a known, unique value where the object can be fetched. """ if values is None: values = {} entity = inspect(specimen) mapper = entity.mapper if [desc['type'] for desc in query.column_descriptions] != \ [mapper.class_]: raise AssertionError("Query does not match given specimen") criteria = manufacture_entity_criteria( specimen, include_only=include_only, exclude=[surrogate_key]) query = query.filter(criteria) if process_query: query = process_query(query) surrogate_key_arg = ( surrogate_key, entity.attrs[surrogate_key].loaded_value) pk_value = None for attempt in range(attempts): try: pk_value = query.update_returning_pk(values, surrogate_key_arg) except MultiRowsMatched: raise except NoRowsMatched: if handle_failure and handle_failure(query): break else: break else: raise NoRowsMatched("Zero rows matched for %d attempts" % attempts) if pk_value is None: pk_value = entity.mapper.primary_key_from_instance(specimen) # NOTE(mdbooth): Can't pass the original specimen object here as it might # have lists of multiple potential values rather than actual values. values = copy.copy(values) values[surrogate_key] = surrogate_key_arg[1] persistent_obj = manufacture_persistent_object( query.session, specimen.__class__(), values, pk_value) return persistent_obj def manufacture_persistent_object( session, specimen, values=None, primary_key=None): """Make an ORM-mapped object persistent in a Session without SQL. The persistent object is returned. If a matching object is already present in the given session, the specimen is merged into it and the persistent object returned. Otherwise, the specimen itself is made persistent and is returned. The object must contain a full primary key, or provide it via the values or primary_key parameters. The object is peristed to the Session in a "clean" state with no pending changes. :param session: A Session object. :param specimen: a mapped object which is typically transient. :param values: a dictionary of values to be applied to the specimen, in addition to the state that's already on it. The attributes will be set such that no history is created; the object remains clean. :param primary_key: optional tuple-based primary key. This will also be applied to the instance if present. """ state = inspect(specimen) mapper = state.mapper for k, v in values.items(): orm.attributes.set_committed_value(specimen, k, v) pk_attrs = [ mapper.get_property_by_column(col).key for col in mapper.primary_key ] if primary_key is not None: for key, value in zip(pk_attrs, primary_key): orm.attributes.set_committed_value( specimen, key, value ) for key in pk_attrs: if state.attrs[key].loaded_value is orm.attributes.NO_VALUE: raise ValueError("full primary key must be present") orm.make_transient_to_detached(specimen) if state.key not in session.identity_map: session.add(specimen) return specimen else: return session.merge(specimen, load=False) def manufacture_entity_criteria(entity, include_only=None, exclude=None): """Given a mapped instance, produce a WHERE clause. The attributes set upon the instance will be combined to produce a SQL expression using the mapped SQL expressions as the base of comparison. Values on the instance may be set as tuples in which case the criteria will produce an IN clause. None is also acceptable as a scalar or tuple entry, which will produce IS NULL that is properly joined with an OR against an IN expression if appropriate. :param entity: a mapped entity. :param include_only: optional sequence of keys to limit which keys are included. :param exclude: sequence of keys to exclude """ state = inspect(entity) exclude = set(exclude) if exclude is not None else set() existing = dict( (attr.key, attr.loaded_value) for attr in state.attrs if attr.loaded_value is not orm.attributes.NO_VALUE and attr.key not in exclude ) if include_only: existing = dict( (k, existing[k]) for k in set(existing).intersection(include_only) ) return manufacture_criteria(state.mapper, existing) def manufacture_criteria(mapped, values): """Given a mapper/class and a namespace of values, produce a WHERE clause. The class should be a mapped class and the entries in the dictionary correspond to mapped attribute names on the class. A value may also be a tuple in which case that particular attribute will be compared to a tuple using IN. The scalar value or tuple can also contain None which translates to an IS NULL, that is properly joined with OR against an IN expression if appropriate. :param cls: a mapped class, or actual :class:`.Mapper` object. :param values: dictionary of values. """ mapper = inspect(mapped) # organize keys using mapped attribute ordering, which is deterministic value_keys = set(values) keys = [k for k in mapper.column_attrs.keys() if k in value_keys] return sql.and_(*[ _sql_crit(mapper.column_attrs[key].expression, values[key]) for key in keys ]) def _sql_crit(expression, value): """Produce an equality expression against the given value. This takes into account a value that is actually a collection of values, as well as a value of None or collection that contains None. """ values = utils.to_list(value, default=(None, )) if len(values) == 1: if values[0] is None: return expression == sql.null() else: return expression == values[0] elif _none_set.intersection(values): return sql.or_( expression == sql.null(), _sql_crit(expression, set(values).difference(_none_set)) ) else: return expression.in_(values) def update_returning_pk(query, values, surrogate_key): """Perform an UPDATE, returning the primary key of the matched row. The primary key is returned using a selection of strategies: * if the database supports RETURNING, RETURNING is used to retrieve the primary key values inline. * If the database is MySQL and the entity is mapped to a single integer primary key column, MySQL's last_insert_id() function is used inline within the UPDATE and then upon a second SELECT to get the value. * Otherwise, a "refetch" strategy is used, where a given "surrogate" key value (typically a UUID column on the entity) is used to run a new SELECT against that UUID. This UUID is also placed into the UPDATE query to ensure the row matches. :param query: a Query object with existing criterion, against a single entity. :param values: a dictionary of values to be updated on the row. :param surrogate_key: a tuple of (attrname, value), referring to a UNIQUE attribute that will also match the row. This attribute is used to retrieve the row via a SELECT when no optimized strategy exists. :return: the primary key, returned as a tuple. Is only returned if rows matched is one. Otherwise, CantUpdateException is raised. """ entity = query.column_descriptions[0]['type'] mapper = inspect(entity).mapper session = query.session bind = session.connection(bind_arguments=dict(mapper=mapper)) if bind.dialect.name == "postgresql": pk_strategy = _pk_strategy_returning elif bind.dialect.name == 'mysql' and \ len(mapper.primary_key) == 1 and \ isinstance( mapper.primary_key[0].type, sqltypes.Integer): pk_strategy = _pk_strategy_mysql_last_insert_id else: pk_strategy = _pk_strategy_refetch return pk_strategy(query, mapper, values, surrogate_key) def _assert_single_row(rows_updated): if rows_updated == 1: return rows_updated elif rows_updated > 1: raise MultiRowsMatched("%d rows matched; expected one" % rows_updated) else: raise NoRowsMatched("No rows matched the UPDATE") def _pk_strategy_refetch(query, mapper, values, surrogate_key): surrogate_key_name, surrogate_key_value = surrogate_key surrogate_key_col = mapper.attrs[surrogate_key_name].expression rowcount = query.\ filter(surrogate_key_col == surrogate_key_value).\ update(values, synchronize_session=False) _assert_single_row(rowcount) # SELECT my_table.id AS my_table_id FROM my_table # WHERE my_table.y = ? AND my_table.z = ? # LIMIT ? OFFSET ? fetch_query = query.session.query( *mapper.primary_key).filter( surrogate_key_col == surrogate_key_value) primary_key = fetch_query.one() return primary_key def _pk_strategy_returning(query, mapper, values, surrogate_key): surrogate_key_name, surrogate_key_value = surrogate_key surrogate_key_col = mapper.attrs[surrogate_key_name].expression update_stmt = _update_stmt_from_query(mapper, query, values) update_stmt = update_stmt.where(surrogate_key_col == surrogate_key_value) update_stmt = update_stmt.returning(*mapper.primary_key) # UPDATE my_table SET x=%(x)s, z=%(z)s WHERE my_table.y = %(y_1)s # AND my_table.z = %(z_1)s RETURNING my_table.id result = query.session.execute(update_stmt) rowcount = result.rowcount _assert_single_row(rowcount) primary_key = tuple(result.first()) return primary_key def _pk_strategy_mysql_last_insert_id(query, mapper, values, surrogate_key): surrogate_key_name, surrogate_key_value = surrogate_key surrogate_key_col = mapper.attrs[surrogate_key_name].expression surrogate_pk_col = mapper.primary_key[0] update_stmt = _update_stmt_from_query(mapper, query, values) update_stmt = update_stmt.where(surrogate_key_col == surrogate_key_value) update_stmt = update_stmt.values( {surrogate_pk_col: sql.func.last_insert_id(surrogate_pk_col)}) # UPDATE my_table SET id=last_insert_id(my_table.id), # x=%s, z=%s WHERE my_table.y = %s AND my_table.z = %s result = query.session.execute(update_stmt) rowcount = result.rowcount _assert_single_row(rowcount) # SELECT last_insert_id() AS last_insert_id_1 primary_key = query.session.scalar(sql.func.last_insert_id()), return primary_key def _update_stmt_from_query(mapper, query, values): upd_values = dict( ( mapper.column_attrs[key], value ) for key, value in values.items() ) primary_table = inspect(query.column_descriptions[0]['entity']).local_table where_criteria = query.whereclause update_stmt = sql.update( primary_table, ).where( where_criteria, ).values(upd_values) return update_stmt _none_set = frozenset([None]) class CantUpdateException(Exception): pass class NoRowsMatched(CantUpdateException): pass class MultiRowsMatched(CantUpdateException): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/sqlalchemy/utils.py0000664000175000017500000007515000000000000020466 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010-2011 OpenStack Foundation. # Copyright 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from collections import abc import itertools import logging import re from oslo_utils import timeutils import sqlalchemy from sqlalchemy import Boolean from sqlalchemy.engine import Connectable from sqlalchemy.engine import url as sa_url from sqlalchemy import exc from sqlalchemy import func from sqlalchemy import Index from sqlalchemy import inspect from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy.sql.expression import cast from sqlalchemy.sql.expression import literal_column from sqlalchemy.sql import text from sqlalchemy import Table from oslo_db._i18n import _ from oslo_db import exception from oslo_db.sqlalchemy import models # NOTE(ochuprykov): Add references for backwards compatibility InvalidSortKey = exception.InvalidSortKey ColumnError = exception.ColumnError LOG = logging.getLogger(__name__) _DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+") _VALID_SORT_DIR = [ "-".join(x) for x in itertools.product(["asc", "desc"], ["nullsfirst", "nullslast"])] def sanitize_db_url(url): match = _DBURL_REGEX.match(url) if match: return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):]) return url def get_unique_keys(model): """Get a list of sets of unique model keys. :param model: the ORM model class :rtype: list of sets of strings :return: unique model keys or None if unable to find them """ try: mapper = inspect(model) except exc.NoInspectionAvailable: return None else: local_table = mapper.local_table base_table = mapper.base_mapper.local_table if local_table is None: return None # extract result from cache if present has_info = hasattr(local_table, 'info') if has_info: info = local_table.info if 'oslodb_unique_keys' in info: return info['oslodb_unique_keys'] res = [] try: constraints = base_table.constraints except AttributeError: constraints = [] for constraint in constraints: # filter out any CheckConstraints if isinstance(constraint, (sqlalchemy.UniqueConstraint, sqlalchemy.PrimaryKeyConstraint)): res.append({c.name for c in constraint.columns}) try: indexes = base_table.indexes except AttributeError: indexes = [] for index in indexes: if index.unique: res.append({c.name for c in index.columns}) # cache result for next calls with the same model if has_info: info['oslodb_unique_keys'] = res return res def _stable_sorting_order(model, sort_keys): """Check whether the sorting order is stable. :return: True if it is stable, False if it's not, None if it's impossible to determine. """ keys = get_unique_keys(model) if keys is None: return None sort_keys_set = set(sort_keys) for unique_keys in keys: if unique_keys.issubset(sort_keys_set): return True return False # copy from glance/db/sqlalchemy/api.py def paginate_query(query, model, limit, sort_keys, marker=None, sort_dir=None, sort_dirs=None): """Returns a query with sorting / pagination criteria added. Pagination works by requiring a unique sort_key, specified by sort_keys. (If sort_keys is not unique, then we risk looping through values.) We use the last row in the previous page as the 'marker' for pagination. So we must return values that follow the passed marker in the order. With a single-valued sort_key, this would be easy: sort_key > X. With a compound-values sort_key, (k1, k2, k3) we must do this to repeat the lexicographical ordering: (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) We also have to cope with different sort_directions and cases where k2, k3, ... are nullable. Typically, the id of the last row is used as the client-facing pagination marker, then the actual marker object must be fetched from the db and passed in to us as marker. The "offset" parameter is intentionally avoided. As offset requires a full scan through the preceding results each time, criteria-based pagination is preferred. See http://use-the-index-luke.com/no-offset for further background. :param query: the query object to which we should add paging/sorting :param model: the ORM model class :param limit: maximum number of items to return :param sort_keys: array of attributes by which results should be sorted :param marker: the last item of the previous page; we returns the next results after this value. :param sort_dir: direction in which results should be sorted (asc, desc) suffix -nullsfirst, -nullslast can be added to defined the ordering of null values :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys :rtype: sqlalchemy.orm.query.Query :return: The query with sorting/pagination added. """ if _stable_sorting_order(model, sort_keys) is False: LOG.warning('Unique keys not in sort_keys. ' 'The sorting order may be unstable.') if sort_dir and sort_dirs: raise AssertionError('Disallow set sort_dir and ' 'sort_dirs at the same time.') # Default the sort direction to ascending if sort_dirs is None and sort_dir is None: sort_dir = 'asc' # Ensure a per-column sort direction if sort_dirs is None: sort_dirs = [sort_dir for _sort_key in sort_keys] if len(sort_dirs) != len(sort_keys): raise AssertionError('sort_dirs and sort_keys must have same length.') # Add sorting for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): try: inspect(model).all_orm_descriptors[current_sort_key] except KeyError: raise exception.InvalidSortKey(current_sort_key) else: sort_key_attr = getattr(model, current_sort_key) try: main_sort_dir, __, null_sort_dir = current_sort_dir.partition("-") sort_dir_func = { 'asc': sqlalchemy.asc, 'desc': sqlalchemy.desc, }[main_sort_dir] null_order_by_stmt = { "": None, "nullsfirst": sort_key_attr.is_(None), "nullslast": sort_key_attr.is_not(None), }[null_sort_dir] except KeyError: raise ValueError(_("Unknown sort direction, " "must be one of: %s") % ", ".join(_VALID_SORT_DIR)) if null_order_by_stmt is not None: query = query.order_by(sqlalchemy.desc(null_order_by_stmt)) query = query.order_by(sort_dir_func(sort_key_attr)) # Add pagination if marker is not None: marker_values = [] for sort_key in sort_keys: v = getattr(marker, sort_key) marker_values.append(v) # Build up an array of sort criteria as in the docstring criteria_list = [] for i in range(len(sort_keys)): crit_attrs = [] # NOTE: We skip the marker value comparison if marker_values[i] is # None, for two reasons: 1) the comparison operators below # ('<', '>') are not applicable on None value; 2) this is # safe because we can assume the primary key is included in # sort_key, thus checked as (one of) marker values. if marker_values[i] is not None: for j in range(i): model_attr = getattr(model, sort_keys[j]) if marker_values[j] is not None: crit_attrs.append((model_attr == marker_values[j])) model_attr = getattr(model, sort_keys[i]) val = marker_values[i] # sqlalchemy doesn't like booleans in < >. bug/1656947 if isinstance(model_attr.type, Boolean): val = int(val) model_attr = cast(model_attr, Integer) if sort_dirs[i].startswith('desc'): crit_attr = (model_attr < val) if sort_dirs[i].endswith('nullsfirst'): crit_attr = sqlalchemy.sql.or_(crit_attr, model_attr.is_(None)) else: crit_attr = (model_attr > val) if sort_dirs[i].endswith('nullslast'): crit_attr = sqlalchemy.sql.or_(crit_attr, model_attr.is_(None)) crit_attrs.append(crit_attr) criteria = sqlalchemy.sql.and_(*crit_attrs) criteria_list.append(criteria) f = sqlalchemy.sql.or_(*criteria_list) query = query.filter(f) if limit is not None: query = query.limit(limit) return query def to_list(x, default=None): if x is None: return default if not isinstance(x, abc.Iterable) or isinstance(x, str): return [x] elif isinstance(x, list): return x else: return list(x) def _read_deleted_filter(query, db_model, deleted): if 'deleted' not in db_model.__table__.columns: raise ValueError(_("There is no `deleted` column in `%s` table. " "Project doesn't use soft-deleted feature.") % db_model.__name__) default_deleted_value = db_model.__table__.c.deleted.default.arg if deleted: query = query.filter(db_model.deleted != default_deleted_value) else: query = query.filter(db_model.deleted == default_deleted_value) return query def _project_filter(query, db_model, project_id): if 'project_id' not in db_model.__table__.columns: raise ValueError(_("There is no `project_id` column in `%s` table.") % db_model.__name__) if isinstance(project_id, (list, tuple, set)): query = query.filter(db_model.project_id.in_(project_id)) else: query = query.filter(db_model.project_id == project_id) return query def model_query(model, session, args=None, **kwargs): """Query helper for db.sqlalchemy api methods. This accounts for `deleted` and `project_id` fields. :param model: Model to query. Must be a subclass of ModelBase. :type model: models.ModelBase :param session: The session to use. :type session: sqlalchemy.orm.session.Session :param args: Arguments to query. If None - model is used. :type args: tuple Keyword arguments: :keyword project_id: If present, allows filtering by project_id(s). Can be either a project_id value, or an iterable of project_id values, or None. If an iterable is passed, only rows whose project_id column value is on the `project_id` list will be returned. If None is passed, only rows which are not bound to any project, will be returned. :type project_id: iterable, model.__table__.columns.project_id.type, None type :keyword deleted: If present, allows filtering by deleted field. If True is passed, only deleted entries will be returned, if False - only existing entries. :type deleted: bool Usage: .. code-block:: python from oslo_db.sqlalchemy import utils def get_instance_by_uuid(uuid): session = get_session() with session.begin() return (utils.model_query(models.Instance, session=session) .filter(models.Instance.uuid == uuid) .first()) def get_nodes_stat(): data = (Node.id, Node.cpu, Node.ram, Node.hdd) session = get_session() with session.begin() return utils.model_query(Node, session=session, args=data).all() Also you can create your own helper, based on ``utils.model_query()``. For example, it can be useful if you plan to use ``project_id`` and ``deleted`` parameters from project's ``context`` .. code-block:: python from oslo_db.sqlalchemy import utils def _model_query(context, model, session=None, args=None, project_id=None, project_only=False, read_deleted=None): # We suppose, that functions ``_get_project_id()`` and # ``_get_deleted()`` should handle passed parameters and # context object (for example, decide, if we need to restrict a user # to query his own entries by project_id or only allow admin to read # deleted entries). For return values, we expect to get # ``project_id`` and ``deleted``, which are suitable for the # ``model_query()`` signature. kwargs = {} if project_id is not None: kwargs['project_id'] = _get_project_id(context, project_id, project_only) if read_deleted is not None: kwargs['deleted'] = _get_deleted_dict(context, read_deleted) session = session or get_session() with session.begin(): return utils.model_query(model, session=session, args=args, **kwargs) def get_instance_by_uuid(context, uuid): return (_model_query(context, models.Instance, read_deleted='yes') .filter(models.Instance.uuid == uuid) .first()) def get_nodes_data(context, project_id, project_only='allow_none'): data = (Node.id, Node.cpu, Node.ram, Node.hdd) return (_model_query(context, Node, args=data, project_id=project_id, project_only=project_only) .all()) """ if not issubclass(model, models.ModelBase): raise TypeError(_("model should be a subclass of ModelBase")) query = session.query(model) if not args else session.query(*args) if 'deleted' in kwargs: query = _read_deleted_filter(query, model, kwargs['deleted']) if 'project_id' in kwargs: query = _project_filter(query, model, kwargs['project_id']) return query def get_table(engine, name): """Returns an sqlalchemy table dynamically from db. Needed because the models don't work for us in migrations as models will be far out of sync with the current data. .. warning:: Do not use this method when creating ForeignKeys in database migrations because sqlalchemy needs the same MetaData object to hold information about the parent table and the reference table in the ForeignKey. This method uses a unique MetaData object per table object so it won't work with ForeignKey creation. """ metadata = MetaData() return Table(name, metadata, autoload_with=engine) def drop_old_duplicate_entries_from_table(engine, table_name, use_soft_delete, *uc_column_names): """Drop all old rows having the same values for columns in uc_columns. This method drop (or mark ad `deleted` if use_soft_delete is True) old duplicate rows form table with name `table_name`. :param engine: Sqlalchemy engine :param table_name: Table with duplicates :param use_soft_delete: If True - values will be marked as `deleted`, if False - values will be removed from table :param uc_column_names: Unique constraint columns """ meta = MetaData() table = Table(table_name, meta, autoload_with=engine) columns_for_group_by = [table.c[name] for name in uc_column_names] columns_for_select = [func.max(table.c.id)] columns_for_select.extend(columns_for_group_by) duplicated_rows_select = sqlalchemy.sql.select( *columns_for_select, ).group_by( *columns_for_group_by ).having( func.count(table.c.id) > 1 ) with engine.connect() as conn, conn.begin(): for row in conn.execute(duplicated_rows_select).fetchall(): # NOTE(boris-42): Do not remove row that has the biggest ID. delete_condition = table.c.id != row[0] is_none = None # workaround for pyflakes delete_condition &= table.c.deleted_at == is_none for name in uc_column_names: delete_condition &= table.c[name] == row._mapping[name] rows_to_delete_select = sqlalchemy.sql.select( table.c.id, ).where(delete_condition) for row in conn.execute(rows_to_delete_select).fetchall(): LOG.info( "Deleting duplicated row with id: %(id)s from table: " "%(table)s", dict(id=row[0], table=table_name)) if use_soft_delete: delete_statement = table.update().\ where(delete_condition).\ values({ 'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow() }) else: delete_statement = table.delete().where(delete_condition) conn.execute(delete_statement) def get_db_connection_info(conn_pieces): database = conn_pieces.path.strip('/') loc_pieces = conn_pieces.netloc.split('@') host = loc_pieces[1] auth_pieces = loc_pieces[0].split(':') user = auth_pieces[0] password = "" if len(auth_pieces) > 1: password = auth_pieces[1].strip() return (user, password, database, host) def get_indexes(engine, table_name): """Get all index list from a given table. :param engine: sqlalchemy engine :param table_name: name of the table """ inspector = sqlalchemy.inspect(engine) indexes = inspector.get_indexes(table_name) return indexes def index_exists(engine, table_name, index_name): """Check if given index exists. :param engine: sqlalchemy engine :param table_name: name of the table :param index_name: name of the index """ indexes = get_indexes(engine, table_name) index_names = [index['name'] for index in indexes] return index_name in index_names def index_exists_on_columns(engine, table_name, columns): """Check if an index on given columns exists. :param engine: sqlalchemy engine :param table_name: name of the table :param columns: a list type of columns that will be checked """ if not isinstance(columns, list): columns = list(columns) for index in get_indexes(engine, table_name): if index['column_names'] == columns: return True return False def add_index(engine, table_name, index_name, idx_columns): """Create an index for given columns. :param engine: sqlalchemy engine :param table_name: name of the table :param index_name: name of the index :param idx_columns: tuple with names of columns that will be indexed """ table = get_table(engine, table_name) if not index_exists(engine, table_name, index_name): index = Index( index_name, *[getattr(table.c, col) for col in idx_columns] ) index.create(engine) else: raise ValueError("Index '%s' already exists!" % index_name) def drop_index(engine, table_name, index_name): """Drop index with given name. :param engine: sqlalchemy engine :param table_name: name of the table :param index_name: name of the index """ table = get_table(engine, table_name) for index in table.indexes: if index.name == index_name: index.drop(engine) break else: raise ValueError("Index '%s' not found!" % index_name) def change_index_columns(engine, table_name, index_name, new_columns): """Change set of columns that are indexed by given index. :param engine: sqlalchemy engine :param table_name: name of the table :param index_name: name of the index :param new_columns: tuple with names of columns that will be indexed """ drop_index(engine, table_name, index_name) add_index(engine, table_name, index_name, new_columns) def column_exists(engine, table_name, column): """Check if table has given column. :param engine: sqlalchemy engine :param table_name: name of the table :param column: name of the colmn """ t = get_table(engine, table_name) return column in t.c class DialectFunctionDispatcher(object): @classmethod def dispatch_for_dialect(cls, expr, multiple=False): """Provide dialect-specific functionality within distinct functions. e.g.:: @dispatch_for_dialect("*") def set_special_option(engine): pass @set_special_option.dispatch_for("sqlite") def set_sqlite_special_option(engine): return engine.execute("sqlite thing") @set_special_option.dispatch_for("mysql+mysqldb") def set_mysqldb_special_option(engine): return engine.execute("mysqldb thing") After the above registration, the ``set_special_option()`` function is now a dispatcher, given a SQLAlchemy ``Engine``, ``Connection``, URL string, or ``sqlalchemy.engine.URL`` object:: eng = create_engine('...') result = set_special_option(eng) The filter system supports two modes, "multiple" and "single". The default is "single", and requires that one and only one function match for a given backend. In this mode, the function may also have a return value, which will be returned by the top level call. "multiple" mode, on the other hand, does not support return arguments, but allows for any number of matching functions, where each function will be called:: # the initial call sets this up as a "multiple" dispatcher @dispatch_for_dialect("*", multiple=True) def set_options(engine): # set options that apply to *all* engines @set_options.dispatch_for("postgresql") def set_postgresql_options(engine): # set options that apply to all Postgresql engines @set_options.dispatch_for("postgresql+psycopg2") def set_postgresql_psycopg2_options(engine): # set options that apply only to "postgresql+psycopg2" @set_options.dispatch_for("*+pyodbc") def set_pyodbc_options(engine): # set options that apply to all pyodbc backends Note that in both modes, any number of additional arguments can be accepted by member functions. For example, to populate a dictionary of options, it may be passed in:: @dispatch_for_dialect("*", multiple=True) def set_engine_options(url, opts): pass @set_engine_options.dispatch_for("mysql+mysqldb") def _mysql_set_default_charset_to_utf8(url, opts): opts.setdefault('charset', 'utf-8') @set_engine_options.dispatch_for("sqlite") def _set_sqlite_in_memory_check_same_thread(url, opts): if url.database in (None, 'memory'): opts['check_same_thread'] = False opts = {} set_engine_options(url, opts) The driver specifiers are of the form: ``[+]``. That is, database name or "*", followed by an optional ``+`` sign with driver or "*". Omitting the driver name implies all drivers for that database. """ if multiple: cls = DialectMultiFunctionDispatcher else: cls = DialectSingleFunctionDispatcher return cls().dispatch_for(expr) _db_plus_driver_reg = re.compile(r'([^+]+?)(?:\+(.+))?$') def dispatch_for(self, expr): def decorate(fn): dbname, driver = self._parse_dispatch(expr) if fn is self: fn = fn._last self._last = fn self._register(expr, dbname, driver, fn) return self return decorate def _parse_dispatch(self, text): m = self._db_plus_driver_reg.match(text) if not m: raise ValueError("Couldn't parse database[+driver]: %r" % text) return m.group(1) or '*', m.group(2) or '*' def __call__(self, *arg, **kw): target = arg[0] return self._dispatch_on( self._url_from_target(target), target, arg, kw) def _url_from_target(self, target): if isinstance(target, Connectable): return target.engine.url elif isinstance(target, str): if "://" not in target: target_url = sa_url.make_url("%s://" % target) else: target_url = sa_url.make_url(target) return target_url elif isinstance(target, sa_url.URL): return target else: raise ValueError("Invalid target type: %r" % target) def dispatch_on_drivername(self, drivername): """Return a sub-dispatcher for the given drivername. This provides a means of calling a different function, such as the "*" function, for a given target object that normally refers to a sub-function. """ dbname, driver = self._db_plus_driver_reg.match(drivername).group(1, 2) def go(*arg, **kw): return self._dispatch_on_db_driver(dbname, "*", arg, kw) return go def _dispatch_on(self, url, target, arg, kw): dbname, driver = self._db_plus_driver_reg.match( url.drivername).group(1, 2) if not driver: driver = url.get_dialect().driver return self._dispatch_on_db_driver(dbname, driver, arg, kw) def _invoke_fn(self, fn, arg, kw): return fn(*arg, **kw) class DialectSingleFunctionDispatcher(DialectFunctionDispatcher): def __init__(self): self.reg = collections.defaultdict(dict) def _register(self, expr, dbname, driver, fn): fn_dict = self.reg[dbname] if driver in fn_dict: raise TypeError("Multiple functions for expression %r" % expr) fn_dict[driver] = fn def _matches(self, dbname, driver): for db in (dbname, '*'): subdict = self.reg[db] for drv in (driver, '*'): if drv in subdict: return subdict[drv] else: raise ValueError( "No default function found for driver: %r" % ("%s+%s" % (dbname, driver))) def _dispatch_on_db_driver(self, dbname, driver, arg, kw): fn = self._matches(dbname, driver) return self._invoke_fn(fn, arg, kw) class DialectMultiFunctionDispatcher(DialectFunctionDispatcher): def __init__(self): self.reg = collections.defaultdict( lambda: collections.defaultdict(list)) def _register(self, expr, dbname, driver, fn): self.reg[dbname][driver].append(fn) def _matches(self, dbname, driver): if driver != '*': drivers = (driver, '*') else: drivers = ('*', ) for db in (dbname, '*'): subdict = self.reg[db] for drv in drivers: for fn in subdict[drv]: yield fn def _dispatch_on_db_driver(self, dbname, driver, arg, kw): for fn in self._matches(dbname, driver): if self._invoke_fn(fn, arg, kw) is not None: raise TypeError( "Return value not allowed for " "multiple filtered function") dispatch_for_dialect = DialectFunctionDispatcher.dispatch_for_dialect def get_non_innodb_tables(connectable, skip_tables=('migrate_version', 'alembic_version')): """Get a list of tables which don't use InnoDB storage engine. :param connectable: a SQLAlchemy Engine or a Connection instance :param skip_tables: a list of tables which might have a different storage engine """ query_str = """ SELECT table_name FROM information_schema.tables WHERE table_schema = :database AND engine != 'InnoDB' """ params = {} if skip_tables: params = dict( ('skip_%s' % i, table_name) for i, table_name in enumerate(skip_tables) ) placeholders = ', '.join(':' + p for p in params) query_str += ' AND table_name NOT IN (%s)' % placeholders params['database'] = connectable.engine.url.database query = text(query_str) # TODO(stephenfin): What about if this is already a Connection? with connectable.connect() as conn, conn.begin(): noninnodb = conn.execute(query, params) return [i[0] for i in noninnodb] def get_foreign_key_constraint_name(engine, table_name, column_name): """Find the name of foreign key in a table, given constrained column name. :param engine: a SQLAlchemy engine (or connection) :param table_name: name of table which contains the constraint :param column_name: name of column that is constrained by the foreign key. :return: the name of the first foreign key constraint which constrains the given column in the given table. """ insp = inspect(engine) for fk in insp.get_foreign_keys(table_name): if column_name in fk['constrained_columns']: return fk['name'] def make_url(target): """Return a ``url.URL`` object""" if isinstance(target, (str, sa_url.URL)): return sa_url.make_url(target) else: return sa_url.make_url(str(target)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5326004 oslo.db-16.0.0/oslo_db/tests/0000775000175000017500000000000000000000000015744 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/__init__.py0000664000175000017500000000145500000000000020062 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os def should_run_eventlet_tests(): return bool(int(os.environ.get('TEST_EVENTLET') or '0')) if should_run_eventlet_tests(): import eventlet eventlet.monkey_patch() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/base.py0000664000175000017500000000160700000000000017234 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base from oslo_db.tests import fixtures class BaseTestCase(base.BaseTestCase): """Test case base class for all oslo.db unit tests.""" def setUp(self): """Run before each test method to initialize test environment.""" super().setUp() self.warning_fixture = self.useFixture(fixtures.WarningsFixture()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/fixtures.py0000664000175000017500000000321100000000000020164 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import warnings import fixtures from sqlalchemy import exc as sqla_exc class WarningsFixture(fixtures.Fixture): """Filters out warnings during test runs.""" def setUp(self): super().setUp() self._original_warning_filters = warnings.filters[:] # Enable deprecation warnings warnings.simplefilter('once', DeprecationWarning) # Except for things we've deprecated but are still testing until # removal warnings.filterwarnings( 'ignore', category=DeprecationWarning, module='oslo_db', ) # Enable generic warnings to ensure we're not doing anything odd warnings.filterwarnings( 'error', category=sqla_exc.SAWarning, ) # Enable deprecation warnings to capture upcoming SQLAlchemy changes warnings.filterwarnings( 'error', category=sqla_exc.SADeprecationWarning, ) self.addCleanup(self._reset_warning_filters) def _reset_warning_filters(self): warnings.filters[:] = self._original_warning_filters ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5366023 oslo.db-16.0.0/oslo_db/tests/sqlalchemy/0000775000175000017500000000000000000000000020106 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/sqlalchemy/__init__.py0000664000175000017500000000132500000000000022220 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import test_fixtures load_tests = test_fixtures.optimize_package_test_loader(__file__) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/sqlalchemy/base.py0000664000175000017500000000304400000000000021373 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Openstack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy.test_base import backend_specific # noqa from oslo_db.sqlalchemy import test_fixtures as db_fixtures from oslo_db.tests import base as test_base @enginefacade.transaction_context_provider class Context(object): pass context = Context() # NOTE (zzzeek) These test classes are **private to oslo.db**. Please # make use of oslo_db.sqlalchemy.test_fixtures directly. class _DbTestCase( db_fixtures.OpportunisticDBTestMixin, test_base.BaseTestCase, ): def setUp(self): super(_DbTestCase, self).setUp() self.engine = enginefacade.writer.get_engine() self.sessionmaker = enginefacade.writer.get_sessionmaker() class _MySQLOpportunisticTestCase(_DbTestCase): FIXTURE = db_fixtures.MySQLOpportunisticFixture class _PostgreSQLOpportunisticTestCase(_DbTestCase): FIXTURE = db_fixtures.PostgresqlOpportunisticFixture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/sqlalchemy/test_async_eventlet.py0000664000175000017500000001121500000000000024542 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for SQLAlchemy and eventlet interaction.""" import logging import unittest from oslo_utils import importutils import sqlalchemy as sa from sqlalchemy import orm from oslo_db import exception as db_exc from oslo_db.sqlalchemy import models from oslo_db import tests from oslo_db.tests.sqlalchemy import base as test_base class EventletTestMixin(object): def setUp(self): super(EventletTestMixin, self).setUp() BASE = orm.declarative_base() class TmpTable(BASE, models.ModelBase): __tablename__ = 'test_async_eventlet' id = sa.Column('id', sa.Integer, primary_key=True, nullable=False) foo = sa.Column('foo', sa.Integer) __table_args__ = ( sa.UniqueConstraint('foo', name='uniq_foo'), ) self.test_table = TmpTable TmpTable.__table__.create(self.engine) self.addCleanup(lambda: TmpTable.__table__.drop(self.engine)) @unittest.skipIf(not tests.should_run_eventlet_tests(), 'eventlet tests disabled unless TEST_EVENTLET=1') def test_concurrent_transaction(self): # Cause sqlalchemy to log executed SQL statements. Useful to # determine exactly what and when was sent to DB. sqla_logger = logging.getLogger('sqlalchemy.engine') sqla_logger.setLevel(logging.INFO) self.addCleanup(sqla_logger.setLevel, logging.NOTSET) def operate_on_row(name, ready=None, proceed=None): logging.debug('%s starting', name) _session = self.sessionmaker() with _session.begin(): logging.debug('%s ready', name) # Modify the same row, inside transaction tbl = self.test_table() tbl.update({'foo': 10}) tbl.save(_session) if ready is not None: ready.send() if proceed is not None: logging.debug('%s waiting to proceed', name) proceed.wait() logging.debug('%s exiting transaction', name) logging.debug('%s terminating', name) return True eventlet = importutils.try_import('eventlet') if eventlet is None: return self.skipTest('eventlet is required for this test') a_ready = eventlet.event.Event() a_proceed = eventlet.event.Event() b_proceed = eventlet.event.Event() # thread A opens transaction logging.debug('spawning A') a = eventlet.spawn(operate_on_row, 'A', ready=a_ready, proceed=a_proceed) logging.debug('waiting for A to enter transaction') a_ready.wait() # thread B opens transaction on same row logging.debug('spawning B') b = eventlet.spawn(operate_on_row, 'B', proceed=b_proceed) logging.debug('waiting for B to (attempt to) enter transaction') eventlet.sleep(1) # should(?) advance B to blocking on transaction # While B is still blocked, A should be able to proceed a_proceed.send() # Will block forever(*) if DB library isn't reentrant. # (*) Until some form of timeout/deadlock detection kicks in. # This is the key test that async is working. If this hangs # (or raises a timeout/deadlock exception), then you have failed # this test. self.assertTrue(a.wait()) b_proceed.send() # If everything proceeded without blocking, B will throw a # "duplicate entry" exception when it tries to insert the same row self.assertRaises(db_exc.DBDuplicateEntry, b.wait) # Note that sqlite fails the above concurrency tests, and is not # mentioned below. # ie: This file performs no tests by default. class MySQLEventletTestCase(EventletTestMixin, test_base._MySQLOpportunisticTestCase): pass class PostgreSQLEventletTestCase(EventletTestMixin, test_base._PostgreSQLOpportunisticTestCase): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/sqlalchemy/test_enginefacade.py0000664000175000017500000022464700000000000024127 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import contextlib import copy import fixtures import pickle import sys from unittest import mock import warnings from oslo_config import cfg from oslo_context import context as oslo_context from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy.orm import registry from sqlalchemy.orm import Session from sqlalchemy import select from sqlalchemy import String from sqlalchemy import Table from oslo_db import exception from oslo_db import options from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import engines as oslo_engines from oslo_db.sqlalchemy import orm from oslo_db.tests import base as test_base from oslo_db.tests.sqlalchemy import base as db_test_base from oslo_db import warning enginefacade.transaction_context_provider(oslo_context.RequestContext) class SingletonOnName(mock.MagicMock): def __init__(self, the_name, **kw): super(SingletonOnName, self).__init__( __eq__=lambda self, other: other._assert_name == self._assert_name, _assert_name=the_name, **kw ) def __deepcopy__(self, memo): return self class SingletonConnection(SingletonOnName): def __init__(self, **kw): super(SingletonConnection, self).__init__( "connection", **kw) self.info = {} class SingletonEngine(SingletonOnName): def __init__(self, connection, **kw): super(SingletonEngine, self).__init__( "engine", connect=mock.Mock(return_value=connection), pool=mock.Mock(), url=connection, _assert_connection=connection, **kw ) class NonDecoratedContext(object): """a Context object that's not run through transaction_context_provider.""" class AssertDataSource(collections.namedtuple( "AssertDataSource", ["writer", "reader", "async_reader"])): def element_for_writer(self, const): if const is enginefacade._WRITER: return self.writer elif const is enginefacade._READER: return self.reader elif const is enginefacade._ASYNC_READER: return self.async_reader else: assert False, "Unknown constant: %s" % const class MockFacadeTest(test_base.BaseTestCase): """test by applying mocks to internal call-points. This applies mocks to oslo.db.sqlalchemy.engines.create_engine() and oslo.db.sqlalchemy.orm.get_maker(), then mocking a _TransactionFactory into oslo.db.sqlalchemy.enginefacade._context_manager._root_factory. Various scenarios are run against the enginefacade functions, and the exact calls made against the mock create_engine(), get_maker(), and associated objects are tested exactly against expected calls. """ synchronous_reader = True engine_uri = 'some_connection' slave_uri = None def setUp(self): super(MockFacadeTest, self).setUp() writer_conn = SingletonConnection() writer_engine = SingletonEngine(writer_conn) writer_session = mock.Mock( connection=mock.Mock(return_value=writer_conn), info={}) writer_maker = mock.Mock(return_value=writer_session) if self.slave_uri: async_reader_conn = SingletonConnection() async_reader_engine = SingletonEngine(async_reader_conn) async_reader_session = mock.Mock( connection=mock.Mock(return_value=async_reader_conn), info={}) async_reader_maker = mock.Mock(return_value=async_reader_session) else: async_reader_conn = writer_conn async_reader_engine = writer_engine async_reader_session = writer_session async_reader_maker = writer_maker if self.synchronous_reader: reader_conn = writer_conn reader_engine = writer_engine reader_session = writer_session reader_maker = writer_maker else: reader_conn = async_reader_conn reader_engine = async_reader_engine reader_session = async_reader_session reader_maker = async_reader_maker self.connections = AssertDataSource( writer_conn, reader_conn, async_reader_conn ) self.engines = AssertDataSource( writer_engine, reader_engine, async_reader_engine ) self.sessions = AssertDataSource( writer_session, reader_session, async_reader_session ) self.makers = AssertDataSource( writer_maker, reader_maker, async_reader_maker ) def get_maker(engine, **kw): if engine is writer_engine: return self.makers.writer elif engine is reader_engine: return self.makers.reader elif engine is async_reader_engine: return self.makers.async_reader else: assert False session_patch = mock.patch.object( orm, "get_maker", side_effect=get_maker) self.get_maker = session_patch.start() self.addCleanup(session_patch.stop) def create_engine(sql_connection, **kw): if sql_connection == self.engine_uri: return self.engines.writer elif sql_connection == self.slave_uri: return self.engines.async_reader else: assert False engine_patch = mock.patch.object( oslo_engines, "create_engine", side_effect=create_engine) self.create_engine = engine_patch.start() self.addCleanup(engine_patch.stop) self.factory = enginefacade._TransactionFactory() self.factory.configure( synchronous_reader=self.synchronous_reader ) self.factory.configure( connection=self.engine_uri, slave_connection=self.slave_uri ) facade_patcher = mock.patch.object( enginefacade._context_manager, "_root_factory", self.factory) facade_patcher.start() self.addCleanup(facade_patcher.stop) def _assert_ctx_connection(self, context, connection): self.assertIs(context.connection, connection) def _assert_ctx_session(self, context, session): self.assertIs(context.session, session) def _assert_non_decorated_ctx_connection(self, context, connection): transaction_ctx = enginefacade._transaction_ctx_for_context(context) self.assertIs(transaction_ctx.connection, connection) def _assert_non_decorated_ctx_session(self, context, session): transaction_ctx = enginefacade._transaction_ctx_for_context(context) self.assertIs(transaction_ctx.session, session) @contextlib.contextmanager def _assert_engines(self): """produce a mock series of engine calls. These are expected to match engine-related calls established by the test subject. """ writer_conn = SingletonConnection() writer_engine = SingletonEngine(writer_conn) if self.slave_uri: async_reader_conn = SingletonConnection() async_reader_engine = SingletonEngine(async_reader_conn) else: async_reader_conn = writer_conn async_reader_engine = writer_engine if self.synchronous_reader: reader_engine = writer_engine else: reader_engine = async_reader_engine engines = AssertDataSource( writer_engine, reader_engine, async_reader_engine) def create_engine(sql_connection, **kw): if sql_connection == self.engine_uri: return engines.writer elif sql_connection == self.slave_uri: return engines.async_reader else: assert False engine_factory = mock.Mock(side_effect=create_engine) engine_factory( sql_connection=self.engine_uri, **{ k: mock.ANY for k in self.factory._engine_cfg.keys() }, ) if self.slave_uri: engine_factory( sql_connection=self.slave_uri, **{ k: mock.ANY for k in self.factory._engine_cfg.keys() }, ) yield AssertDataSource( writer_engine, reader_engine, async_reader_engine ) self.assertEqual( engine_factory.mock_calls, self.create_engine.mock_calls ) for sym in [ enginefacade._WRITER, enginefacade._READER, enginefacade._ASYNC_READER ]: self.assertEqual( engines.element_for_writer(sym).mock_calls, self.engines.element_for_writer(sym).mock_calls ) def _assert_async_reader_connection(self, engines, session=None): return self._assert_connection( engines, enginefacade._ASYNC_READER, session) def _assert_reader_connection(self, engines, session=None): return self._assert_connection(engines, enginefacade._READER, session) def _assert_writer_connection(self, engines, session=None): return self._assert_connection(engines, enginefacade._WRITER, session) @contextlib.contextmanager def _assert_connection(self, engines, writer, session=None): """produce a mock series of connection calls. These are expected to match connection-related calls established by the test subject. """ if session: connection = session.connection() yield connection else: connection = engines.element_for_writer(writer).connect() trans = connection.begin() yield connection if writer is enginefacade._WRITER: trans.commit() else: trans.rollback() connection.close() self.assertEqual( connection.mock_calls, self.connections.element_for_writer(writer).mock_calls) @contextlib.contextmanager def _assert_makers(self, engines): writer_session = mock.Mock(connection=mock.Mock( return_value=engines.writer._assert_connection) ) writer_maker = mock.Mock(return_value=writer_session) if self.slave_uri: async_reader_session = mock.Mock(connection=mock.Mock( return_value=engines.async_reader._assert_connection) ) async_reader_maker = mock.Mock(return_value=async_reader_session) else: async_reader_session = writer_session async_reader_maker = writer_maker if self.synchronous_reader: reader_maker = writer_maker else: reader_maker = async_reader_maker makers = AssertDataSource( writer_maker, reader_maker, async_reader_maker, ) def get_maker(engine, **kw): if engine is engines.writer: return makers.writer elif engine is engines.reader: return makers.reader elif engine is engines.async_reader: return makers.async_reader else: assert False maker_factories = mock.Mock(side_effect=get_maker) maker_factories( engine=engines.writer, expire_on_commit=False) if self.slave_uri: maker_factories( engine=engines.async_reader, expire_on_commit=False) yield makers self.assertEqual( maker_factories.mock_calls, self.get_maker.mock_calls) for sym in [ enginefacade._WRITER, enginefacade._READER, enginefacade._ASYNC_READER ]: self.assertEqual( makers.element_for_writer(sym).mock_calls, self.makers.element_for_writer(sym).mock_calls) def _assert_async_reader_session( self, makers, connection=None, assert_calls=True): return self._assert_session( makers, enginefacade._ASYNC_READER, connection, assert_calls) def _assert_reader_session( self, makers, connection=None, assert_calls=True): return self._assert_session( makers, enginefacade._READER, connection, assert_calls) def _assert_writer_session( self, makers, connection=None, assert_calls=True): return self._assert_session( makers, enginefacade._WRITER, connection, assert_calls) def _emit_sub_writer_session(self, session): return self._emit_sub_session(enginefacade._WRITER, session) def _emit_sub_reader_session(self, session): return self._emit_sub_session(enginefacade._READER, session) @contextlib.contextmanager def _assert_session( self, makers, writer, connection=None, assert_calls=True): """produce a mock series of session calls. These are expected to match session-related calls established by the test subject. """ if connection: session = makers.element_for_writer(writer)(bind=connection) else: session = makers.element_for_writer(writer)() session.begin() yield session if writer is enginefacade._WRITER: session.commit() elif enginefacade.\ _context_manager._factory._transaction_ctx_cfg[ 'rollback_reader_sessions']: session.rollback() session.close() if assert_calls: self.assertEqual( session.mock_calls, self.sessions.element_for_writer(writer).mock_calls) @contextlib.contextmanager def _emit_sub_session(self, writer, session): yield session if enginefacade._context_manager.\ _factory._transaction_ctx_cfg['flush_on_subtransaction']: session.flush() def test_dispose_pool(self): facade = enginefacade.transaction_context() facade.configure( connection=self.engine_uri, ) facade.dispose_pool() self.assertFalse(hasattr(facade._factory, '_writer_engine')) facade._factory._start() facade.dispose_pool() self.assertEqual( facade._factory._writer_engine.pool.mock_calls, [mock.call.dispose()] ) def test_dispose_pool_w_reader(self): facade = enginefacade.transaction_context() facade.configure( connection=self.engine_uri, slave_connection=self.slave_uri ) facade.dispose_pool() self.assertFalse(hasattr(facade._factory, '_writer_engine')) self.assertFalse(hasattr(facade._factory, '_reader_engine')) facade._factory._start() facade.dispose_pool() self.assertEqual( facade._factory._writer_engine.pool.mock_calls, [mock.call.dispose()] ) self.assertEqual( facade._factory._reader_engine.pool.mock_calls, [mock.call.dispose()] ) def test_started_flag(self): facade = enginefacade.transaction_context() self.assertFalse(facade.is_started) facade.configure(connection=self.engine_uri) facade.writer.get_engine() self.assertTrue(facade.is_started) def test_started_exception(self): facade = enginefacade.transaction_context() self.assertFalse(facade.is_started) facade.configure(connection=self.engine_uri) facade.writer.get_engine() exc = self.assertRaises( enginefacade.AlreadyStartedError, facade.configure, connection=self.engine_uri ) self.assertEqual( "this TransactionFactory is already started", exc.args[0] ) def test_session_reader_decorator(self): context = oslo_context.RequestContext() @enginefacade.reader def go(context): context.session.execute("test") go(context) with self._assert_engines() as engines: with self._assert_makers(engines) as makers: with self._assert_reader_session(makers) as session: session.execute("test") def test_session_reader_decorator_kwarg_call(self): context = oslo_context.RequestContext() @enginefacade.reader def go(context): context.session.execute("test") go(context=context) with self._assert_engines() as engines: with self._assert_makers(engines) as makers: with self._assert_reader_session(makers) as session: session.execute("test") def test_connection_reader_decorator(self): context = oslo_context.RequestContext() @enginefacade.reader.connection def go(context): context.connection.execute("test") go(context) with self._assert_engines() as engines: with self._assert_reader_connection(engines) as connection: connection.execute("test") def test_session_reader_nested_in_connection_reader(self): context = oslo_context.RequestContext() @enginefacade.reader.connection def go1(context): context.connection.execute("test1") go2(context) @enginefacade.reader def go2(context): context.session.execute("test2") go1(context) with self._assert_engines() as engines: with self._assert_reader_connection(engines) as connection: connection.execute("test1") with self._assert_makers(engines) as makers: with self._assert_reader_session( makers, connection) as session: session.execute("test2") def test_connection_reader_nested_in_session_reader(self): context = oslo_context.RequestContext() @enginefacade.reader def go1(context): context.session.execute("test1") go2(context) @enginefacade.reader.connection def go2(context): context.connection.execute("test2") go1(context) with self._assert_engines() as engines: with self._assert_makers(engines) as makers: with self._assert_reader_session(makers) as session: session.execute("test1") with self._assert_reader_connection( engines, session) as connection: connection.execute("test2") def test_session_reader_decorator_nested(self): context = oslo_context.RequestContext() @enginefacade.reader def go1(context): context.session.execute("test1") go2(context) @enginefacade.reader def go2(context): context.session.execute("test2") go1(context) with self._assert_engines() as engines: with self._assert_makers(engines) as makers: with self._assert_reader_session(makers) as session: session.execute("test1") session.execute("test2") def test_reader_nested_in_writer_ok(self): context = oslo_context.RequestContext() @enginefacade.writer def go1(context): context.session.execute("test1") go2(context) @enginefacade.reader def go2(context): context.session.execute("test2") go1(context) with self._assert_engines() as engines: with self._assert_makers(engines) as makers: with self._assert_writer_session(makers) as session: session.execute("test1") session.execute("test2") def test_writer_nested_in_reader_raises(self): context = oslo_context.RequestContext() @enginefacade.reader def go1(context): context.session.execute("test1") go2(context) @enginefacade.writer def go2(context): context.session.execute("test2") exc = self.assertRaises( TypeError, go1, context ) self.assertEqual( "Can't upgrade a READER " "transaction to a WRITER mid-transaction", exc.args[0] ) def test_async_on_writer_raises(self): exc = self.assertRaises( TypeError, getattr, enginefacade.writer, "async_" ) self.assertEqual( "Setting async on a WRITER makes no sense", exc.args[0] ) def test_savepoint_and_independent_raises(self): exc = self.assertRaises( TypeError, getattr, enginefacade.writer.independent, "savepoint" ) self.assertEqual( "setting savepoint and independent makes no sense.", exc.args[0] ) def test_reader_nested_in_async_reader_raises(self): context = oslo_context.RequestContext() @enginefacade.reader.async_ def go1(context): context.session.execute("test1") go2(context) @enginefacade.reader def go2(context): context.session.execute("test2") exc = self.assertRaises( TypeError, go1, context ) self.assertEqual( "Can't upgrade an ASYNC_READER transaction " "to a READER mid-transaction", exc.args[0] ) def test_reader_allow_async_nested_in_async_reader(self): context = oslo_context.RequestContext() @enginefacade.reader.async_ def go1(context): context.session.execute("test1") go2(context) @enginefacade.reader.allow_async def go2(context): context.session.execute("test2") go1(context) with self._assert_engines() as engines: with self._assert_makers(engines) as makers: with self._assert_async_reader_session(makers) as session: session.execute("test1") session.execute("test2") def test_reader_allow_async_nested_in_reader(self): context = oslo_context.RequestContext() @enginefacade.reader.reader def go1(context): context.session.execute("test1") go2(context) @enginefacade.reader.allow_async def go2(context): context.session.execute("test2") go1(context) with self._assert_engines() as engines: with self._assert_makers(engines) as makers: with self._assert_reader_session(makers) as session: session.execute("test1") session.execute("test2") def test_reader_allow_async_is_reader_by_default(self): context = oslo_context.RequestContext() @enginefacade.reader.allow_async def go1(context): context.session.execute("test1") go1(context) with self._assert_engines() as engines: with self._assert_makers(engines) as makers: with self._assert_reader_session(makers) as session: session.execute("test1") def test_writer_nested_in_async_reader_raises(self): context = oslo_context.RequestContext() @enginefacade.reader.async_ def go1(context): context.session.execute("test1") go2(context) @enginefacade.writer def go2(context): context.session.execute("test2") exc = self.assertRaises( TypeError, go1, context ) self.assertEqual( "Can't upgrade an ASYNC_READER transaction to a " "WRITER mid-transaction", exc.args[0] ) def test_reader_then_writer_ok(self): context = oslo_context.RequestContext() @enginefacade.reader def go1(context): context.session.execute("test1") @enginefacade.writer def go2(context): context.session.execute("test2") go1(context) go2(context) with self._assert_engines() as engines: with self._assert_makers(engines) as makers: with self._assert_reader_session( makers, assert_calls=False) as session: session.execute("test1") with self._assert_writer_session(makers) as session: session.execute("test2") def test_deprecated_async_reader_name(self): if sys.version_info >= (3, 7): self.skipTest("Test only runs on Python < 3.7") context = oslo_context.RequestContext() old = getattr(enginefacade.reader, "async") @old def go1(context): context.session.execute("test1") go1(context) with self._assert_engines() as engines: with self._assert_makers(engines) as makers: with self._assert_async_reader_session( makers, assert_calls=False) as session: session.execute("test1") def test_async_reader_then_reader_ok(self): context = oslo_context.RequestContext() @enginefacade.reader.async_ def go1(context): context.session.execute("test1") @enginefacade.reader def go2(context): context.session.execute("test2") go1(context) go2(context) with self._assert_engines() as engines: with self._assert_makers(engines) as makers: with self._assert_async_reader_session( makers, assert_calls=False) as session: session.execute("test1") with self._assert_reader_session(makers) as session: session.execute("test2") def test_using_reader(self): context = oslo_context.RequestContext() with enginefacade.reader.using(context) as session: self._assert_ctx_session(context, session) session.execute("test1") with self._assert_engines() as engines: with self._assert_makers(engines) as makers: with self._assert_reader_session(makers) as session: session.execute("test1") def test_using_context_present_in_session_info(self): context = oslo_context.RequestContext() with enginefacade.reader.using(context) as session: self.assertEqual(context, session.info['using_context']) self.assertIsNone(session.info['using_context']) def test_using_context_present_in_connection_info(self): context = oslo_context.RequestContext() with enginefacade.writer.connection.using(context) as connection: self.assertEqual(context, connection.info['using_context']) self.assertIsNone(connection.info['using_context']) def test_using_reader_rollback_reader_session(self): enginefacade.configure(rollback_reader_sessions=True) context = oslo_context.RequestContext() with enginefacade.reader.using(context) as session: self._assert_ctx_session(context, session) session.execute("test1") with self._assert_engines() as engines: with self._assert_makers(engines) as makers: with self._assert_reader_session(makers) as session: session.execute("test1") def test_using_flush_on_nested(self): enginefacade.configure(flush_on_nested=True) context = oslo_context.RequestContext() with enginefacade.writer.using(context) as session: with enginefacade.writer.using(context) as session: self._assert_ctx_session(context, session) session.execute("test1") with self._assert_engines() as engines: with self._assert_makers(engines) as makers: with self._assert_writer_session(makers) as session: with self._emit_sub_writer_session( session) as session: session.execute("test1") def test_using_writer(self): context = oslo_context.RequestContext() with enginefacade.writer.using(context) as session: self._assert_ctx_session(context, session) session.execute("test1") with self._assert_engines() as engines: with self._assert_makers(engines) as makers: with self._assert_writer_session(makers) as session: session.execute("test1") def test_using_writer_no_descriptors(self): context = NonDecoratedContext() with enginefacade.writer.using(context) as session: self._assert_non_decorated_ctx_session(context, session) session.execute("test1") with self._assert_engines() as engines: with self._assert_makers(engines) as makers: with self._assert_writer_session(makers) as session: session.execute("test1") def test_using_writer_connection_no_descriptors(self): context = NonDecoratedContext() with enginefacade.writer.connection.using(context) as connection: self._assert_non_decorated_ctx_connection(context, connection) connection.execute("test1") with self._assert_engines() as engines: with self._assert_writer_connection(engines) as conn: conn.execute("test1") def test_using_reader_connection(self): context = oslo_context.RequestContext() with enginefacade.reader.connection.using(context) as connection: self._assert_ctx_connection(context, connection) connection.execute("test1") with self._assert_engines() as engines: with self._assert_reader_connection(engines) as conn: conn.execute("test1") def test_using_writer_connection(self): context = oslo_context.RequestContext() with enginefacade.writer.connection.using(context) as connection: self._assert_ctx_connection(context, connection) connection.execute("test1") with self._assert_engines() as engines: with self._assert_writer_connection(engines) as conn: conn.execute("test1") def test_context_copied_using_existing_writer_connection(self): context = oslo_context.RequestContext() with enginefacade.writer.connection.using(context) as connection: self._assert_ctx_connection(context, connection) connection.execute("test1") ctx2 = copy.deepcopy(context) with enginefacade.reader.connection.using(ctx2) as conn2: self.assertIs(conn2, connection) self._assert_ctx_connection(ctx2, conn2) conn2.execute("test2") with self._assert_engines() as engines: with self._assert_writer_connection(engines) as conn: conn.execute("test1") conn.execute("test2") def test_context_nodesc_copied_using_existing_writer_connection(self): context = NonDecoratedContext() with enginefacade.writer.connection.using(context) as connection: self._assert_non_decorated_ctx_connection(context, connection) connection.execute("test1") ctx2 = copy.deepcopy(context) with enginefacade.reader.connection.using(ctx2) as conn2: self.assertIs(conn2, connection) self._assert_non_decorated_ctx_connection(ctx2, conn2) conn2.execute("test2") with self._assert_engines() as engines: with self._assert_writer_connection(engines) as conn: conn.execute("test1") conn.execute("test2") def test_session_context_notrequested_exception(self): context = oslo_context.RequestContext() with enginefacade.reader.connection.using(context): exc = self.assertRaises( exception.ContextNotRequestedError, getattr, context, 'session' ) self.assertRegex( exc.args[0], "The 'session' context attribute was requested but it has " "not been established for this context." ) def test_connection_context_notrequested_exception(self): context = oslo_context.RequestContext() with enginefacade.reader.using(context): exc = self.assertRaises( exception.ContextNotRequestedError, getattr, context, 'connection' ) self.assertRegex( exc.args[0], "The 'connection' context attribute was requested but it has " "not been established for this context." ) def test_session_context_exception(self): context = oslo_context.RequestContext() exc = self.assertRaises( exception.NoEngineContextEstablished, getattr, context, 'session' ) self.assertRegex( exc.args[0], "No TransactionContext is established for " "this .*RequestContext.* object within the current " "thread; the 'session' attribute is unavailable." ) def test_session_context_getattr(self): context = oslo_context.RequestContext() self.assertIsNone(getattr(context, 'session', None)) def test_connection_context_exception(self): context = oslo_context.RequestContext() exc = self.assertRaises( exception.NoEngineContextEstablished, getattr, context, 'connection' ) self.assertRegex( exc.args[0], "No TransactionContext is established for " "this .*RequestContext.* object within the current " "thread; the 'connection' attribute is unavailable." ) def test_connection_context_getattr(self): context = oslo_context.RequestContext() self.assertIsNone(getattr(context, 'connection', None)) def test_transaction_context_exception(self): context = oslo_context.RequestContext() exc = self.assertRaises( exception.NoEngineContextEstablished, getattr, context, 'transaction' ) self.assertRegex( exc.args[0], "No TransactionContext is established for " "this .*RequestContext.* object within the current " "thread; the 'transaction' attribute is unavailable." ) def test_transaction_context_getattr(self): context = oslo_context.RequestContext() self.assertIsNone(getattr(context, 'transaction', None)) def test_trans_ctx_context_exception(self): context = oslo_context.RequestContext() exc = self.assertRaises( exception.NoEngineContextEstablished, getattr, context, 'transaction_ctx' ) self.assertRegex( exc.args[0], "No TransactionContext is established for " "this .*RequestContext.* object within the current " "thread." ) def test_trans_ctx_context_getattr(self): context = oslo_context.RequestContext() self.assertIsNone(getattr(context, 'transaction_ctx', None)) def test_multiple_factories(self): """Test that the instrumentation applied to a context class is independent of a specific _TransactionContextManager / _TransactionFactory. """ mgr1 = enginefacade.transaction_context() mgr1.configure( connection=self.engine_uri, slave_connection=self.slave_uri ) mgr2 = enginefacade.transaction_context() mgr2.configure( connection=self.engine_uri, slave_connection=self.slave_uri ) context = oslo_context.RequestContext() self.assertRaises( exception.NoEngineContextEstablished, getattr, context, 'session' ) with mgr1.writer.using(context): self.assertIs(context.transaction_ctx.factory, mgr1._factory) self.assertIsNot(context.transaction_ctx.factory, mgr2._factory) self.assertIsNotNone(context.session) self.assertRaises( exception.NoEngineContextEstablished, getattr, context, 'session' ) with mgr2.writer.using(context): self.assertIsNot(context.transaction_ctx.factory, mgr1._factory) self.assertIs(context.transaction_ctx.factory, mgr2._factory) self.assertIsNotNone(context.session) def test_multiple_factories_nested(self): """Test that the instrumentation applied to a context class supports nested calls among multiple _TransactionContextManager objects. """ mgr1 = enginefacade.transaction_context() mgr1.configure( connection=self.engine_uri, slave_connection=self.slave_uri ) mgr2 = enginefacade.transaction_context() mgr2.configure( connection=self.engine_uri, slave_connection=self.slave_uri ) context = oslo_context.RequestContext() with mgr1.writer.using(context): self.assertIs(context.transaction_ctx.factory, mgr1._factory) self.assertIsNot(context.transaction_ctx.factory, mgr2._factory) with mgr2.writer.using(context): self.assertIsNot( context.transaction_ctx.factory, mgr1._factory) self.assertIs(context.transaction_ctx.factory, mgr2._factory) self.assertIsNotNone(context.session) # mgr1 is restored self.assertIs(context.transaction_ctx.factory, mgr1._factory) self.assertIsNot(context.transaction_ctx.factory, mgr2._factory) self.assertIsNotNone(context.session) self.assertRaises( exception.NoEngineContextEstablished, getattr, context, 'transaction_ctx' ) def test_context_found_for_bound_method(self): context = oslo_context.RequestContext() @enginefacade.reader def go(self, context): context.session.execute("test") go(self, context) with self._assert_engines() as engines: with self._assert_makers(engines) as makers: with self._assert_reader_session(makers) as session: session.execute("test") def test_context_found_for_class_method(self): context = oslo_context.RequestContext() class Spam(object): @classmethod @enginefacade.reader def go(cls, context): context.session.execute("test") Spam.go(context) with self._assert_engines() as engines: with self._assert_makers(engines) as makers: with self._assert_reader_session(makers) as session: session.execute("test") class PatchFactoryTest(test_base.BaseTestCase): def test_patch_manager(self): normal_mgr = enginefacade.transaction_context() normal_mgr.configure(connection="sqlite:///foo.db") alt_mgr = enginefacade.transaction_context() alt_mgr.configure(connection="sqlite:///bar.db") @normal_mgr.writer def go1(context): s1 = context.session self.assertEqual( s1.bind.url, "sqlite:///foo.db") self.assertIs( s1.bind, normal_mgr._factory._writer_engine) @normal_mgr.writer def go2(context): s1 = context.session self.assertEqual( s1.bind.url, "sqlite:///bar.db") self.assertIs( normal_mgr._factory._writer_engine, alt_mgr._factory._writer_engine ) def create_engine(sql_connection, **kw): return mock.Mock(url=sql_connection) with mock.patch( "oslo_db.sqlalchemy.engines.create_engine", create_engine): context = oslo_context.RequestContext() go1(context) reset = normal_mgr.patch_factory(alt_mgr) go2(context) reset() go1(context) def test_patch_factory(self): normal_mgr = enginefacade.transaction_context() normal_mgr.configure(connection="sqlite:///foo.db") alt_mgr = enginefacade.transaction_context() alt_mgr.configure(connection="sqlite:///bar.db") @normal_mgr.writer def go1(context): s1 = context.session self.assertEqual( s1.bind.url, "sqlite:///foo.db") self.assertIs( s1.bind, normal_mgr._factory._writer_engine) @normal_mgr.writer def go2(context): s1 = context.session self.assertEqual( s1.bind.url, "sqlite:///bar.db") self.assertIs( normal_mgr._factory._writer_engine, alt_mgr._factory._writer_engine ) def create_engine(sql_connection, **kw): return mock.Mock(url=sql_connection) with mock.patch( "oslo_db.sqlalchemy.engines.create_engine", create_engine): context = oslo_context.RequestContext() go1(context) reset = normal_mgr.patch_factory(alt_mgr._factory) go2(context) reset() go1(context) def test_patch_engine(self): normal_mgr = enginefacade.transaction_context() normal_mgr.configure( connection="sqlite:///foo.db", rollback_reader_sessions=True ) @normal_mgr.writer def go1(context): s1 = context.session self.assertEqual( s1.bind.url, "sqlite:///foo.db") self.assertIs( s1.bind, normal_mgr._factory._writer_engine) @normal_mgr.writer def go2(context): s1 = context.session self.assertEqual( s1.bind.url, "sqlite:///bar.db") self.assertTrue( enginefacade._transaction_ctx_for_context( context).rollback_reader_sessions ) # ensure this defaults to True self.assertTrue( enginefacade._transaction_ctx_for_context( context).factory.synchronous_reader ) def create_engine(sql_connection, **kw): return mock.Mock(url=sql_connection) with mock.patch( "oslo_db.sqlalchemy.engines.create_engine", create_engine): mock_engine = create_engine("sqlite:///bar.db") context = oslo_context.RequestContext() go1(context) reset = normal_mgr.patch_engine(mock_engine) go2(context) self.assertIs( normal_mgr._factory._writer_engine, mock_engine) reset() go1(context) def test_patch_not_started(self): normal_mgr = enginefacade.transaction_context() normal_mgr.configure( connection="sqlite:///foo.db", rollback_reader_sessions=True ) @normal_mgr.writer def go1(context): s1 = context.session self.assertEqual( s1.bind.url, "sqlite:///bar.db") self.assertTrue( enginefacade._transaction_ctx_for_context( context).rollback_reader_sessions ) def create_engine(sql_connection, **kw): return mock.Mock(url=sql_connection) with mock.patch( "oslo_db.sqlalchemy.engines.create_engine", create_engine): mock_engine = create_engine("sqlite:///bar.db") context = oslo_context.RequestContext() reset = normal_mgr.patch_engine(mock_engine) go1(context) self.assertIs( normal_mgr._factory._writer_engine, mock_engine) reset() def test_new_manager_from_config(self): normal_mgr = enginefacade.transaction_context() normal_mgr.configure( connection="sqlite://", sqlite_fk=True, mysql_sql_mode="FOOBAR", max_overflow=38 ) normal_mgr._factory._start() copied_mgr = normal_mgr.make_new_manager() self.assertTrue(normal_mgr._factory._started) self.assertIsNotNone(normal_mgr._factory._writer_engine) self.assertIsNot(copied_mgr._factory, normal_mgr._factory) self.assertFalse(copied_mgr._factory._started) copied_mgr._factory._start() self.assertIsNot( normal_mgr._factory._writer_engine, copied_mgr._factory._writer_engine) engine_args = copied_mgr._factory._engine_args_for_conf(None) self.assertTrue(engine_args['sqlite_fk']) self.assertEqual("FOOBAR", engine_args["mysql_sql_mode"]) self.assertEqual(38, engine_args["max_overflow"]) self.assertNotIn("mysql_wsrep_sync_wait", engine_args) def test_new_manager_from_options(self): """test enginefacade's defaults given a default structure from opts""" factory = enginefacade._TransactionFactory() cfg.CONF.register_opts(options.database_opts, 'database') factory.configure(**dict(cfg.CONF.database.items())) engine_args = factory._engine_args_for_conf(None) self.assertEqual(None, engine_args["mysql_wsrep_sync_wait"]) self.assertEqual(True, engine_args["sqlite_synchronous"]) self.assertEqual("TRADITIONAL", engine_args["mysql_sql_mode"]) class SynchronousReaderWSlaveMockFacadeTest(MockFacadeTest): synchronous_reader = True engine_uri = 'some_connection' slave_uri = 'some_slave_connection' class AsyncReaderWSlaveMockFacadeTest(MockFacadeTest): synchronous_reader = False engine_uri = 'some_connection' slave_uri = 'some_slave_connection' class LegacyIntegrationtest(db_test_base._DbTestCase): def test_legacy_integration(self): legacy_facade = enginefacade.get_legacy_facade() self.assertTrue( legacy_facade.get_engine() is enginefacade._context_manager._factory._writer_engine ) self.assertTrue( enginefacade.get_legacy_facade() is legacy_facade ) def test_get_sessionmaker(self): legacy_facade = enginefacade.get_legacy_facade() self.assertTrue( legacy_facade.get_sessionmaker() is enginefacade._context_manager._factory._writer_maker ) def test_legacy_facades_from_different_context_managers(self): transaction_context1 = enginefacade.transaction_context() transaction_context2 = enginefacade.transaction_context() transaction_context1.configure(connection='sqlite:///?conn1') transaction_context2.configure(connection='sqlite:///?conn2') legacy1 = transaction_context1.get_legacy_facade() legacy2 = transaction_context2.get_legacy_facade() self.assertNotEqual(legacy1, legacy2) def test_legacy_not_started(self): factory = enginefacade._TransactionFactory() self.assertRaises( exception.CantStartEngineError, factory.get_legacy_facade ) legacy_facade = factory.get_legacy_facade() self.assertRaises( exception.CantStartEngineError, legacy_facade.get_session ) self.assertRaises( exception.CantStartEngineError, legacy_facade.get_session ) self.assertRaises( exception.CantStartEngineError, legacy_facade.get_engine ) class ThreadingTest(db_test_base._DbTestCase): """Test copy/pickle on new threads using real connections and sessions.""" def _assert_ctx_connection(self, context, connection): self.assertIs(context.connection, connection) def _assert_ctx_session(self, context, session): self.assertIs(context.session, session) def _patch_thread_ident(self): self.ident = 1 test_instance = self class MockThreadingLocal(object): def __init__(self): self.__dict__['state'] = collections.defaultdict(dict) def __deepcopy__(self, memo): return self def __getattr__(self, key): ns = self.state[test_instance.ident] try: return ns[key] except KeyError: raise AttributeError(key) def __setattr__(self, key, value): ns = self.state[test_instance.ident] ns[key] = value def __delattr__(self, key): ns = self.state[test_instance.ident] try: del ns[key] except KeyError: raise AttributeError(key) return mock.patch.object( enginefacade, "_TransactionContextTLocal", MockThreadingLocal) def test_thread_ctxmanager_writer(self): context = oslo_context.RequestContext() with self._patch_thread_ident(): with enginefacade.writer.using(context) as session: self._assert_ctx_session(context, session) self.ident = 2 with enginefacade.reader.using(context) as sess2: # new session self.assertIsNot(sess2, session) # thread local shows the new session self._assert_ctx_session(context, sess2) self.ident = 1 with enginefacade.reader.using(context) as sess3: self.assertIs(sess3, session) self._assert_ctx_session(context, session) def test_thread_ctxmanager_writer_connection(self): context = oslo_context.RequestContext() with self._patch_thread_ident(): with enginefacade.writer.connection.using(context) as conn: self._assert_ctx_connection(context, conn) self.ident = 2 with enginefacade.reader.connection.using(context) as conn2: # new connection self.assertIsNot(conn2, conn) # thread local shows the new connection self._assert_ctx_connection(context, conn2) with enginefacade.reader.connection.using( context) as conn3: # we still get the right connection even though # this context is not the "copied" context self.assertIsNot(conn3, conn) self.assertIs(conn3, conn2) self.ident = 1 with enginefacade.reader.connection.using(context) as conn3: self.assertIs(conn3, conn) self._assert_ctx_connection(context, conn) def test_thread_ctxmanager_switch_styles(self): @enginefacade.writer.connection def go_one(context): self.assertRaises( exception.ContextNotRequestedError, getattr, context, "session" ) self.assertIsNotNone(context.connection) self.ident = 2 go_two(context) self.ident = 1 self.assertRaises( exception.ContextNotRequestedError, getattr, context, "session" ) self.assertIsNotNone(context.connection) @enginefacade.reader def go_two(context): self.assertRaises( exception.ContextNotRequestedError, getattr, context, "connection" ) self.assertIsNotNone(context.session) context = oslo_context.RequestContext() with self._patch_thread_ident(): go_one(context) def test_thread_decorator_writer(self): sessions = set() @enginefacade.writer def go_one(context): sessions.add(context.session) self.ident = 2 go_two(context) self.ident = 1 go_three(context) @enginefacade.reader def go_two(context): assert context.session not in sessions @enginefacade.reader def go_three(context): assert context.session in sessions context = oslo_context.RequestContext() with self._patch_thread_ident(): go_one(context) def test_thread_decorator_writer_connection(self): connections = set() @enginefacade.writer.connection def go_one(context): connections.add(context.connection) self.ident = 2 go_two(context) self.ident = 1 go_three(context) @enginefacade.reader.connection def go_two(context): assert context.connection not in connections @enginefacade.reader def go_three(context): assert context.connection in connections context = oslo_context.RequestContext() with self._patch_thread_ident(): go_one(context) def test_contexts_picklable(self): context = oslo_context.RequestContext() with enginefacade.writer.using(context) as session: self._assert_ctx_session(context, session) pickled = pickle.dumps(context) unpickled = pickle.loads(pickled) with enginefacade.writer.using(unpickled) as session2: self._assert_ctx_session(unpickled, session2) assert session is not session2 class LiveFacadeTest(db_test_base._DbTestCase): """test using live SQL with test-provisioned databases. Several of these tests require that multiple transactions run simultaenously; as the default SQLite :memory: connection can't achieve this, opportunistic test implementations against MySQL and PostgreSQL are supplied. """ def setUp(self): super(LiveFacadeTest, self).setUp() metadata = MetaData() user_table = Table( 'user', metadata, Column('id', Integer, primary_key=True), Column('name', String(30)), Column('favorite_color', String(10), default='yellow'), mysql_engine='InnoDB' ) self.user_table = user_table metadata.create_all(self.engine) self.addCleanup(metadata.drop_all, self.engine) reg = registry() class User(object): def __init__(self, name): self.name = name reg.map_imperatively(User, user_table) self.User = User def _assert_ctx_connection(self, context, connection): self.assertIs(context.connection, connection) def _assert_ctx_session(self, context, session): self.assertIs(context.session, session) def test_transaction_committed(self): context = oslo_context.RequestContext() with enginefacade.writer.using(context) as session: session.add(self.User(name="u1")) session = self.sessionmaker(autocommit=False) with session.begin(): self.assertEqual( "u1", session.query(self.User.name).scalar() ) def test_transaction_rollback(self): context = oslo_context.RequestContext() class MyException(Exception): pass @enginefacade.writer def go(context): context.session.add(self.User(name="u1")) context.session.flush() raise MyException("a test") self.assertRaises(MyException, go, context) session = self.sessionmaker(autocommit=False) with session.begin(): self.assertEqual( None, session.query(self.User.name).scalar() ) @mock.patch.object(Session, 'commit') @mock.patch.object(Session, 'rollback') def test_save_and_reraise_when_rollback_exception(self, rollback_patch, commit_patch): context = oslo_context.RequestContext() log = self.useFixture(fixtures.FakeLogger()) class RollbackException(Exception): pass class CommitException(Exception): pass commit_patch.side_effect = CommitException() rollback_patch.side_effect = RollbackException() @enginefacade.writer def go_session(context): context.session.add(self.User(name="u1")) self.assertRaises(RollbackException, go_session, context) self.assertIn('CommitException', log.output) def test_flush_on_subtransaction(self): facade = enginefacade.transaction_context() facade.configure( connection=self.engine.url, flush_on_subtransaction=True) facade.patch_engine(self.engine) context = oslo_context.RequestContext() with facade.writer.using(context): with facade.writer.using(context): u = self.User(name="u1") context.session.add(u) self.assertEqual('yellow', u.favorite_color) def test_flush_on_subtransaction_default_off(self): context = oslo_context.RequestContext() facade = enginefacade.transaction_context() facade.configure(connection=self.engine.url) facade.patch_engine(self.engine) with facade.writer.using(context): with facade.writer.using(context): u = self.User(name="u1") context.session.add(u) self.assertIsNone(u.favorite_color) self.assertEqual('yellow', u.favorite_color) def test_context_deepcopy_on_session(self): context = oslo_context.RequestContext() with enginefacade.writer.using(context) as session: ctx2 = copy.deepcopy(context) self._assert_ctx_session(ctx2, session) with enginefacade.writer.using(ctx2) as s2: self.assertIs(session, s2) self._assert_ctx_session(ctx2, s2) s2.add(self.User(name="u1")) s2.flush() session = self.sessionmaker(autocommit=False) with session.begin(): self.assertEqual( "u1", session.query(self.User.name).scalar() ) def test_context_deepcopy_on_connection(self): context = oslo_context.RequestContext() with enginefacade.writer.connection.using(context) as conn: ctx2 = copy.deepcopy(context) self._assert_ctx_connection(ctx2, conn) with enginefacade.writer.connection.using(ctx2) as conn2: self.assertIs(conn, conn2) self._assert_ctx_connection(ctx2, conn2) conn2.execute(self.user_table.insert().values(name="u1")) self._assert_ctx_connection(ctx2, conn2) session = self.sessionmaker(autocommit=False) with session.begin(): self.assertEqual( "u1", session.query(self.User.name).scalar() ) @db_test_base.backend_specific("postgresql", "mysql") def test_external_session_transaction(self): context = oslo_context.RequestContext() with enginefacade.writer.using(context) as session: session.add(self.User(name="u1")) session.flush() with enginefacade.writer.independent.using(context) as s2: # transaction() uses a new session self.assertIsNot(s2, session) self._assert_ctx_session(context, s2) # rows within a distinct transaction s2.add(self.User(name="u2")) # it also takes over the global enginefacade # within the context with enginefacade.writer.using(context) as s3: self.assertIs(s3, s2) s3.add(self.User(name="u3")) self._assert_ctx_session(context, session) # rollback the "outer" transaction session.rollback() # add more state on the "outer" transaction session.begin() session.add(self.User(name="u4")) session = self.sessionmaker(autocommit=False) # inner transaction + second part of "outer" transaction were committed with session.begin(): self.assertEqual( [("u2",), ("u3",), ("u4", )], session.query( self.User.name).order_by(self.User.name).all() ) def test_savepoint_transaction_decorator(self): context = oslo_context.RequestContext() @enginefacade.writer def go1(context): session = context.session session.add(self.User(name="u1")) session.flush() try: go2(context) except Exception: pass go3(context) session.add(self.User(name="u4")) @enginefacade.writer.savepoint def go2(context): session = context.session session.add(self.User(name="u2")) raise Exception("nope") @enginefacade.writer.savepoint def go3(context): session = context.session session.add(self.User(name="u3")) go1(context) session = self.sessionmaker(autocommit=False) # inner transaction + second part of "outer" transaction were committed with session.begin(): self.assertEqual( [("u1",), ("u3",), ("u4", )], session.query( self.User.name).order_by(self.User.name).all() ) def test_savepoint_transaction(self): context = oslo_context.RequestContext() with enginefacade.writer.using(context) as session: session.add(self.User(name="u1")) session.flush() try: with enginefacade.writer.savepoint.using(context) as session: session.add(self.User(name="u2")) raise Exception("nope") except Exception: pass with enginefacade.writer.savepoint.using(context) as session: session.add(self.User(name="u3")) session.add(self.User(name="u4")) session = self.sessionmaker(autocommit=False) # inner transaction + second part of "outer" transaction were committed with session.begin(): self.assertEqual( [("u1",), ("u3",), ("u4", )], session.query( self.User.name).order_by(self.User.name).all() ) @db_test_base.backend_specific("postgresql", "mysql") def test_external_session_transaction_decorator(self): context = oslo_context.RequestContext() @enginefacade.writer def go1(context): session = context.session session.add(self.User(name="u1")) session.flush() go2(context, session) self._assert_ctx_session(context, session) # rollback the "outer" transaction session.rollback() # add more state on the "outer" transaction session.begin() session.add(self.User(name="u4")) @enginefacade.writer.independent def go2(context, session): s2 = context.session # uses a new session self.assertIsNot(s2, session) self._assert_ctx_session(context, s2) # rows within a distinct transaction s2.add(self.User(name="u2")) # it also takes over the global enginefacade # within the context with enginefacade.writer.using(context) as s3: self.assertIs(s3, s2) s3.add(self.User(name="u3")) go1(context) session = self.sessionmaker(autocommit=False) # inner transaction + second part of "outer" transaction were committed with session.begin(): self.assertEqual( [("u2",), ("u3",), ("u4", )], session.query( self.User.name).order_by(self.User.name).all() ) @db_test_base.backend_specific("postgresql", "mysql") def test_external_connection_transaction(self): context = oslo_context.RequestContext() with enginefacade.writer.connection.using(context) as connection: connection.execute(self.user_table.insert().values(name="u1")) # transaction() uses a new Connection with enginefacade.writer.independent.connection.\ using(context) as c2: self.assertIsNot(c2, connection) self._assert_ctx_connection(context, c2) # rows within a distinct transaction c2.execute(self.user_table.insert().values(name="u2")) # it also takes over the global enginefacade # within the context with enginefacade.writer.connection.using(context) as c3: self.assertIs(c2, c3) c3.execute(self.user_table.insert().values(name="u3")) self._assert_ctx_connection(context, connection) # rollback the "outer" transaction transaction_ctx = context.transaction_ctx transaction_ctx.transaction.rollback() transaction_ctx.transaction = connection.begin() # add more state on the "outer" transaction connection.execute(self.user_table.insert().values(name="u4")) session = self.sessionmaker(autocommit=False) with session.begin(): self.assertEqual( [("u2",), ("u3",), ("u4", )], session.query( self.User.name).order_by(self.User.name).all() ) @db_test_base.backend_specific("postgresql", "mysql") def test_external_writer_in_reader(self): context = oslo_context.RequestContext() with enginefacade.reader.using(context) as session: ping = session.scalar(select(1)) self.assertEqual(1, ping) # we're definitely a reader @enginefacade.writer def go(ctx): pass exc = self.assertRaises(TypeError, go, context) self.assertEqual( "Can't upgrade a READER transaction to a " "WRITER mid-transaction", exc.args[0]) # but we can do a writer on a new transaction with enginefacade.writer.independent.using(context) as sess2: self.assertIsNot(sess2, session) self._assert_ctx_session(context, sess2) session.add(self.User(name="u1_nocommit")) sess2.add(self.User(name="u1_commit")) user = session.query(self.User).first() self.assertEqual("u1_commit", user.name) session = self.sessionmaker(autocommit=False) with session.begin(): self.assertEqual( [("u1_commit",)], session.query( self.User.name).order_by(self.User.name).all() ) def test_replace_scope(self): # "timeout" is an argument accepted by # the pysqlite dialect, which we set here to ensure # that even in an all-sqlite test, we test that the URL # is different in the context we are looking for alt_connection = "sqlite:///?timeout=90" alt_mgr1 = enginefacade.transaction_context() alt_mgr1.configure( connection=alt_connection, ) @enginefacade.writer def go1(context): s1 = context.session self.assertEqual( s1.bind.url, enginefacade._context_manager._factory._writer_engine.url) self.assertIs( s1.bind, enginefacade._context_manager._factory._writer_engine) self.assertEqual(s1.bind.url, self.engine.url) with alt_mgr1.replace.using(context): go2(context) go4(context) @enginefacade.writer def go2(context): s2 = context.session # factory is not replaced globally... self.assertIsNot( enginefacade._context_manager._factory._writer_engine, alt_mgr1._factory._writer_engine ) # but it is replaced for us self.assertIs(s2.bind, alt_mgr1._factory._writer_engine) self.assertEqual( str(s2.bind.url), alt_connection) go3(context) @enginefacade.reader def go3(context): s3 = context.session # in a call of a call, we still have the alt URL self.assertIs(s3.bind, alt_mgr1._factory._writer_engine) self.assertEqual( str(s3.bind.url), alt_connection) @enginefacade.writer def go4(context): s4 = context.session # outside the "replace" context, all is back to normal self.assertIs(s4.bind, self.engine) self.assertEqual( s4.bind.url, self.engine.url) context = oslo_context.RequestContext() go1(context) self.assertIsNot( enginefacade._context_manager._factory._writer_engine, alt_mgr1._factory._writer_engine ) def test_replace_scope_only_global_eng(self): # "timeout" is an argument accepted by # the pysqlite dialect, which we set here to ensure # that even in an all-sqlite test, we test that the URL # is different in the context we are looking for alt_connection1 = "sqlite:///?timeout=90" alt_mgr1 = enginefacade.transaction_context() alt_mgr1.configure( connection=alt_connection1, ) alt_connection2 = "sqlite:///?timeout=120" alt_mgr2 = enginefacade.transaction_context() alt_mgr2.configure( connection=alt_connection2, ) @enginefacade.writer def go1(context): s1 = context.session # global engine self.assertEqual(s1.bind.url, self.engine.url) # now replace global engine... with alt_mgr1.replace.using(context): go2(context) # and back go6(context) @enginefacade.writer def go2(context): s2 = context.session # we have the replace-the-global engine self.assertEqual(str(s2.bind.url), alt_connection1) self.assertIs(s2.bind, alt_mgr1._factory._writer_engine) go3(context) @alt_mgr2.writer def go3(context): s3 = context.session # we don't use the global engine in the first place. # make sure our own factory still used. self.assertEqual(str(s3.bind.url), alt_connection2) self.assertIs(s3.bind, alt_mgr2._factory._writer_engine) go4(context) @enginefacade.writer def go4(context): s4 = context.session # we *do* use the global, so we still want the replacement. self.assertEqual(str(s4.bind.url), alt_connection1) self.assertIs(s4.bind, alt_mgr1._factory._writer_engine) @enginefacade.writer def go5(context): s5 = context.session # ...and here also self.assertEqual(str(s5.bind.url), alt_connection1) self.assertIs(s5.bind, alt_mgr1._factory._writer_engine) @enginefacade.writer def go6(context): s6 = context.session # ...but not here! self.assertEqual(str(s6.bind.url), str(self.engine.url)) self.assertIs(s6.bind, self.engine) context = oslo_context.RequestContext() go1(context) class MySQLLiveFacadeTest( db_test_base._MySQLOpportunisticTestCase, LiveFacadeTest, ): pass class PGLiveFacadeTest( db_test_base._PostgreSQLOpportunisticTestCase, LiveFacadeTest, ): pass class ConfigOptionsTest(test_base.BaseTestCase): def test_all_options(self): """test that everything in CONF.database.iteritems() is accepted. There's a handful of options in oslo.db.options that seem to have no meaning, but need to be accepted. In particular, Cinder and maybe others are doing exactly this call. """ factory = enginefacade._TransactionFactory() cfg.CONF.register_opts(options.database_opts, 'database') factory.configure(**dict(cfg.CONF.database.items())) def test_options_not_supported(self): factory = enginefacade._TransactionFactory() with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") factory.configure( fake1='x', connection_recycle_time=200, wrong2='y') self.assertEqual(1, len(w)) self.assertTrue( issubclass(w[-1].category, warning.NotSupportedWarning)) self.assertEqual( "Configuration option(s) ['fake1', 'wrong2'] not supported", str(w[-1].message) ) def test_no_engine(self): factory = enginefacade._TransactionFactory() self.assertRaises( exception.CantStartEngineError, factory._create_session, enginefacade._WRITER ) self.assertRaises( exception.CantStartEngineError, factory._create_session, enginefacade._WRITER ) class TestTransactionFactoryCallback(test_base.BaseTestCase): def test_setup_for_connection_called_with_profiler(self): context_manager = enginefacade.transaction_context() context_manager.configure(connection='sqlite://') hook = mock.Mock() context_manager.append_on_engine_create(hook) self.assertEqual( [hook], context_manager._factory._facade_cfg['on_engine_create']) @context_manager.reader def go(context): hook.assert_called_once_with(context.session.bind) go(oslo_context.RequestContext()) # TODO(zzzeek): test configuration options, e.g. like # test_sqlalchemy->test_creation_from_config ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/sqlalchemy/test_exc_filters.py0000664000175000017500000015472000000000000024037 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test exception filters applied to engines.""" import contextlib import itertools from unittest import mock import sqlalchemy as sqla from sqlalchemy import event import sqlalchemy.exc from sqlalchemy.orm import declarative_base from sqlalchemy.orm import registry from sqlalchemy import sql from oslo_db import exception from oslo_db.sqlalchemy import compat from oslo_db.sqlalchemy import engines from oslo_db.sqlalchemy import exc_filters from oslo_db.sqlalchemy import utils from oslo_db.tests import base as test_base from oslo_db.tests.sqlalchemy import base as db_test_base from oslo_db.tests import utils as test_utils _TABLE_NAME = '__tmp__test__tmp__' class _SQLAExceptionMatcher(object): def assertInnerException( self, matched, exception_type, message, sql=None, params=None): exc = matched.inner_exception self.assertSQLAException(exc, exception_type, message, sql, params) def assertSQLAException( self, exc, exception_type, message, sql=None, params=None): if isinstance(exception_type, (type, tuple)): self.assertTrue(issubclass(exc.__class__, exception_type)) else: self.assertEqual(exception_type, exc.__class__.__name__) if isinstance(message, tuple): self.assertEqual( [m.lower() if isinstance(m, str) else m for m in message], [a.lower() if isinstance(a, str) else a for a in exc.orig.args] ) else: self.assertEqual(message.lower(), str(exc.orig).lower()) if sql is not None: if params is not None: if '?' in exc.statement: self.assertEqual(sql, exc.statement) self.assertEqual(params, exc.params) else: self.assertEqual(sql % params, exc.statement % exc.params) else: self.assertEqual(sql, exc.statement) class TestsExceptionFilter(_SQLAExceptionMatcher, test_base.BaseTestCase): class Error(Exception): """DBAPI base error. This exception and subclasses are used in a mock context within these tests. """ class DataError(Error): pass class OperationalError(Error): pass class InterfaceError(Error): pass class InternalError(Error): pass class IntegrityError(Error): pass class ProgrammingError(Error): pass class TransactionRollbackError(OperationalError): """Special psycopg2-only error class. SQLAlchemy has an issue with this per issue #3075: https://bitbucket.org/zzzeek/sqlalchemy/issue/3075/ """ def setUp(self): super(TestsExceptionFilter, self).setUp() self.engine = sqla.create_engine("sqlite://") exc_filters.register_engine(self.engine) self.engine.connect().close() # initialize @contextlib.contextmanager def _dbapi_fixture(self, dialect_name, is_disconnect=False): engine = self.engine with test_utils.nested( mock.patch.object(engine.dialect.dbapi, "Error", self.Error), mock.patch.object(engine.dialect, "name", dialect_name), mock.patch.object(engine.dialect, "is_disconnect", lambda *args: is_disconnect) ): yield @contextlib.contextmanager def _fixture(self, dialect_name, exception, is_disconnect=False): def do_execute(self, cursor, statement, parameters, **kw): raise exception engine = self.engine # ensure the engine has done its initial checks against the # DB as we are going to be removing its ability to execute a # statement self.engine.connect().close() patches = [ mock.patch.object(engine.dialect, "do_execute", do_execute), # replace the whole DBAPI rather than patching "Error" # as some DBAPIs might not be patchable (?) mock.patch.object(engine.dialect, "dbapi", mock.Mock(Error=self.Error)), mock.patch.object(engine.dialect, "name", dialect_name), mock.patch.object(engine.dialect, "is_disconnect", lambda *args: is_disconnect) ] if compat.sqla_2: patches.append( mock.patch.object( engine.dialect, "loaded_dbapi", mock.Mock(Error=self.Error), ) ) with test_utils.nested(*patches): yield def _run_test(self, dialect_name, statement, raises, expected, is_disconnect=False, params=None): with self._fixture(dialect_name, raises, is_disconnect=is_disconnect): with self.engine.connect() as conn: matched = self.assertRaises( expected, conn.execute, sql.text(statement), params ) return matched class TestFallthroughsAndNonDBAPI(TestsExceptionFilter): def test_generic_dbapi(self): matched = self._run_test( "mysql", "select you_made_a_programming_error", self.ProgrammingError("Error 123, you made a mistake"), exception.DBError ) self.assertInnerException( matched, "ProgrammingError", "Error 123, you made a mistake", 'select you_made_a_programming_error', ()) def test_generic_dbapi_disconnect(self): matched = self._run_test( "mysql", "select the_db_disconnected", self.InterfaceError("connection lost"), exception.DBConnectionError, is_disconnect=True ) self.assertInnerException( matched, "InterfaceError", "connection lost", "select the_db_disconnected", ()), def test_operational_dbapi_disconnect(self): matched = self._run_test( "mysql", "select the_db_disconnected", self.OperationalError("connection lost"), exception.DBConnectionError, is_disconnect=True ) self.assertInnerException( matched, "OperationalError", "connection lost", "select the_db_disconnected", ()), def test_operational_error_asis(self): """Test operational errors. test that SQLAlchemy OperationalErrors that aren't disconnects are passed through without wrapping. """ matched = self._run_test( "mysql", "select some_operational_error", self.OperationalError("some op error"), sqla.exc.OperationalError ) self.assertSQLAException( matched, "OperationalError", "some op error" ) def test_unicode_encode(self): # intentionally generate a UnicodeEncodeError, as its # constructor is quite complicated and seems to be non-public # or at least not documented anywhere. uee_ref = None try: '\u2435'.encode('ascii') except UnicodeEncodeError as uee: # Python3.x added new scoping rules here (sadly) # http://legacy.python.org/dev/peps/pep-3110/#semantic-changes uee_ref = uee self._run_test( 'postgresql', 'select \u2435', uee_ref, exception.DBInvalidUnicodeParameter ) def test_garden_variety(self): matched = self._run_test( "mysql", "select some_thing_that_breaks", AttributeError("mysqldb has an attribute error"), exception.DBError ) self.assertEqual("mysqldb has an attribute error", matched.args[0]) class TestNonExistentConstraint( _SQLAExceptionMatcher, db_test_base._DbTestCase, ): def setUp(self): super(TestNonExistentConstraint, self).setUp() meta = sqla.MetaData() self.table_1 = sqla.Table( "resource_foo", meta, sqla.Column("id", sqla.Integer, primary_key=True), mysql_engine='InnoDB', mysql_charset='utf8', ) self.table_1.create(self.engine) class TestNonExistentConstraintPostgreSQL( TestNonExistentConstraint, db_test_base._PostgreSQLOpportunisticTestCase, ): def test_raise(self): with self.engine.connect() as conn: matched = self.assertRaises( exception.DBNonExistentConstraint, conn.execute, sqla.schema.DropConstraint( sqla.ForeignKeyConstraint(["id"], ["baz.id"], name="bar_fkey", table=self.table_1)), ) self.assertInnerException( matched, "ProgrammingError", "constraint \"bar_fkey\" of relation " "\"resource_foo\" does not exist\n", "ALTER TABLE resource_foo DROP CONSTRAINT bar_fkey", ) self.assertEqual("resource_foo", matched.table) self.assertEqual("bar_fkey", matched.constraint) class TestNonExistentConstraintMySQL( TestNonExistentConstraint, db_test_base._MySQLOpportunisticTestCase, ): def test_raise(self): with self.engine.connect() as conn: matched = self.assertRaises( exception.DBNonExistentConstraint, conn.execute, sqla.schema.DropConstraint( sqla.ForeignKeyConstraint(["id"], ["baz.id"], name="bar_fkey", table=self.table_1)), ) # NOTE(jd) Cannot check precisely with assertInnerException since MySQL # error are not the same depending on its version… self.assertIsInstance(matched.inner_exception, (sqlalchemy.exc.InternalError, sqlalchemy.exc.OperationalError)) if matched.table is not None: self.assertEqual("resource_foo", matched.table) if matched.constraint is not None: self.assertEqual("bar_fkey", matched.constraint) class TestNonExistentTable( _SQLAExceptionMatcher, db_test_base._DbTestCase, ): def setUp(self): super(TestNonExistentTable, self).setUp() self.meta = sqla.MetaData() self.table_1 = sqla.Table( "foo", self.meta, sqla.Column("id", sqla.Integer, primary_key=True), mysql_engine='InnoDB', mysql_charset='utf8', ) def test_raise(self): with self.engine.connect() as conn: matched = self.assertRaises( exception.DBNonExistentTable, conn.execute, sqla.schema.DropTable(self.table_1), ) self.assertInnerException( matched, "OperationalError", "no such table: foo", "\nDROP TABLE foo", ) self.assertEqual("foo", matched.table) class TestNonExistentTablePostgreSQL( TestNonExistentTable, db_test_base._PostgreSQLOpportunisticTestCase, ): def test_raise(self): with self.engine.connect() as conn: matched = self.assertRaises( exception.DBNonExistentTable, conn.execute, sqla.schema.DropTable(self.table_1), ) self.assertInnerException( matched, "ProgrammingError", "table \"foo\" does not exist\n", "\nDROP TABLE foo", ) self.assertEqual("foo", matched.table) class TestNonExistentTableMySQL( TestNonExistentTable, db_test_base._MySQLOpportunisticTestCase, ): def test_raise(self): with self.engine.connect() as conn: matched = self.assertRaises( exception.DBNonExistentTable, conn.execute, sqla.schema.DropTable(self.table_1), ) # NOTE(jd) Cannot check precisely with assertInnerException since MySQL # error are not the same depending on its version… self.assertIsInstance(matched.inner_exception, (sqlalchemy.exc.InternalError, sqlalchemy.exc.OperationalError)) self.assertEqual("foo", matched.table) class TestNonExistentDatabase( _SQLAExceptionMatcher, db_test_base._DbTestCase, ): def setUp(self): super(TestNonExistentDatabase, self).setUp() url = utils.make_url(self.engine.url) self.url = url.set(database="non_existent_database") def test_raise(self): matched = self.assertRaises( exception.DBNonExistentDatabase, engines.create_engine, utils.make_url( 'sqlite:////non_existent_dir/non_existent_database') ) self.assertIsNone(matched.database) self.assertInnerException( matched, sqlalchemy.exc.OperationalError, 'unable to open database file', ) class TestNonExistentDatabaseMySQL( TestNonExistentDatabase, db_test_base._MySQLOpportunisticTestCase, ): def test_raise(self): matched = self.assertRaises( exception.DBNonExistentDatabase, engines.create_engine, self.url ) self.assertEqual('non_existent_database', matched.database) # NOTE(rpodolyaka) cannot check precisely with assertInnerException # since MySQL errors are not the same depending on its version self.assertIsInstance( matched.inner_exception, (sqlalchemy.exc.InternalError, sqlalchemy.exc.OperationalError), ) class TestNonExistentDatabasePostgreSQL( TestNonExistentDatabase, db_test_base._PostgreSQLOpportunisticTestCase, ): def test_raise(self): matched = self.assertRaises( exception.DBNonExistentDatabase, engines.create_engine, self.url ) self.assertEqual('non_existent_database', matched.database) # NOTE(stephenfin): As above, we cannot use assertInnerException since # the error messages vary depending on the version of PostgreSQL self.assertIsInstance( matched.inner_exception, sqlalchemy.exc.OperationalError, ) # On Postgres 13: # fatal: database "non_existent_database" does not exist # On Postgres 14 or later: # connection to server at "localhost" (::1), port 5432 failed: fatal: # database "non_existent_database" does not exist self.assertIn( 'fatal: database "non_existent_database" does not exist', str(matched.inner_exception).lower(), ) class TestReferenceErrorSQLite( _SQLAExceptionMatcher, db_test_base._DbTestCase, ): def setUp(self): super(TestReferenceErrorSQLite, self).setUp() meta = sqla.MetaData() self.table_1 = sqla.Table( "resource_foo", meta, sqla.Column("id", sqla.Integer, primary_key=True), sqla.Column("foo", sqla.Integer), mysql_engine='InnoDB', mysql_charset='utf8', ) self.table_1.create(self.engine) self.table_2 = sqla.Table( "resource_entity", meta, sqla.Column("id", sqla.Integer, primary_key=True), sqla.Column("foo_id", sqla.Integer, sqla.ForeignKey("resource_foo.id", name="foo_fkey")), mysql_engine='InnoDB', mysql_charset='utf8', ) self.table_2.create(self.engine) def test_raise(self): connection = self.engine.raw_connection() try: cursor = connection.cursor() cursor.execute('PRAGMA foreign_keys = ON') cursor.close() finally: connection.close() with self.engine.connect() as conn: matched = self.assertRaises( exception.DBReferenceError, conn.execute, self.table_2.insert().values(id=1, foo_id=2) ) self.assertInnerException( matched, "IntegrityError", "FOREIGN KEY constraint failed", 'INSERT INTO resource_entity (id, foo_id) VALUES (?, ?)', (1, 2) ) self.assertIsNone(matched.table) self.assertIsNone(matched.constraint) self.assertIsNone(matched.key) self.assertIsNone(matched.key_table) def test_raise_delete(self): connection = self.engine.raw_connection() try: cursor = connection.cursor() cursor.execute('PRAGMA foreign_keys = ON') cursor.close() finally: connection.close() with self.engine.connect() as conn: with conn.begin(): conn.execute(self.table_1.insert().values(id=1234, foo=42)) conn.execute( self.table_2.insert().values(id=4321, foo_id=1234)) matched = self.assertRaises( exception.DBReferenceError, conn.execute, self.table_1.delete() ) self.assertInnerException( matched, "IntegrityError", "foreign key constraint failed", "DELETE FROM resource_foo", (), ) self.assertIsNone(matched.table) self.assertIsNone(matched.constraint) self.assertIsNone(matched.key) self.assertIsNone(matched.key_table) class TestReferenceErrorPostgreSQL( TestReferenceErrorSQLite, db_test_base._PostgreSQLOpportunisticTestCase, ): def test_raise(self): with self.engine.connect() as conn: params = {'id': 1, 'foo_id': 2} matched = self.assertRaises( exception.DBReferenceError, conn.execute, self.table_2.insert().values(**params) ) self.assertInnerException( matched, "IntegrityError", "insert or update on table \"resource_entity\" " "violates foreign key constraint \"foo_fkey\"\nDETAIL: Key " "(foo_id)=(2) is not present in table \"resource_foo\".\n", "INSERT INTO resource_entity (id, foo_id) VALUES (%(id)s, " "%(foo_id)s)", params, ) self.assertEqual("resource_entity", matched.table) self.assertEqual("foo_fkey", matched.constraint) self.assertEqual("foo_id", matched.key) self.assertEqual("resource_foo", matched.key_table) def test_raise_delete(self): with self.engine.connect() as conn: with conn.begin(): conn.execute(self.table_1.insert().values(id=1234, foo=42)) conn.execute( self.table_2.insert().values(id=4321, foo_id=1234)) with conn.begin(): matched = self.assertRaises( exception.DBReferenceError, conn.execute, self.table_1.delete() ) self.assertInnerException( matched, "IntegrityError", "update or delete on table \"resource_foo\" violates foreign key " "constraint \"foo_fkey\" on table \"resource_entity\"\n" "DETAIL: Key (id)=(1234) is still referenced from " "table \"resource_entity\".\n", "DELETE FROM resource_foo", {}, ) self.assertEqual("resource_foo", matched.table) self.assertEqual("foo_fkey", matched.constraint) self.assertEqual("id", matched.key) self.assertEqual("resource_entity", matched.key_table) class TestReferenceErrorMySQL( TestReferenceErrorSQLite, db_test_base._MySQLOpportunisticTestCase, ): def test_raise(self): with self.engine.connect() as conn: matched = self.assertRaises( exception.DBReferenceError, conn.execute, self.table_2.insert().values(id=1, foo_id=2) ) # NOTE(jd) Cannot check precisely with assertInnerException since MySQL # error are not the same depending on its version… self.assertIsInstance(matched.inner_exception, sqlalchemy.exc.IntegrityError) self.assertEqual(matched.inner_exception.orig.args[0], 1452) self.assertEqual("resource_entity", matched.table) self.assertEqual("foo_fkey", matched.constraint) self.assertEqual("foo_id", matched.key) self.assertEqual("resource_foo", matched.key_table) def test_raise_ansi_quotes(self): with self.engine.connect() as conn: conn.detach() # will not be returned to the pool when closed # this is incompatible with some internals of the engine conn.execute(sql.text("SET SESSION sql_mode = 'ANSI';")) matched = self.assertRaises( exception.DBReferenceError, conn.execute, self.table_2.insert().values(id=1, foo_id=2) ) # NOTE(jd) Cannot check precisely with assertInnerException since MySQL # error are not the same depending on its version… self.assertIsInstance(matched.inner_exception, sqlalchemy.exc.IntegrityError) self.assertEqual(matched.inner_exception.orig.args[0], 1452) self.assertEqual("resource_entity", matched.table) self.assertEqual("foo_fkey", matched.constraint) self.assertEqual("foo_id", matched.key) self.assertEqual("resource_foo", matched.key_table) def test_raise_delete(self): with self.engine.connect() as conn, conn.begin(): conn.execute(self.table_1.insert().values(id=1234, foo=42)) conn.execute(self.table_2.insert().values(id=4321, foo_id=1234)) matched = self.assertRaises( exception.DBReferenceError, conn.execute, self.table_1.delete() ) # NOTE(jd) Cannot check precisely with assertInnerException since MySQL # error are not the same depending on its version… self.assertIsInstance(matched.inner_exception, sqlalchemy.exc.IntegrityError) self.assertEqual(1451, matched.inner_exception.orig.args[0]) self.assertEqual("resource_entity", matched.table) self.assertEqual("foo_fkey", matched.constraint) self.assertEqual("foo_id", matched.key) self.assertEqual("resource_foo", matched.key_table) class TestExceptionCauseMySQLSavepoint( db_test_base._MySQLOpportunisticTestCase, ): def setUp(self): super(TestExceptionCauseMySQLSavepoint, self).setUp() Base = declarative_base() class A(Base): __tablename__ = 'a' id = sqla.Column(sqla.Integer, primary_key=True) __table_args__ = {'mysql_engine': 'InnoDB'} Base.metadata.create_all(self.engine) self.A = A def test_cause_for_failed_flush_plus_no_savepoint(self): session = self.sessionmaker() with session.begin(): session.add(self.A(id=1)) try: with session.begin(): try: with session.begin_nested(): session.execute(sql.text("rollback")) session.add(self.A(id=1)) # outermost is the failed SAVEPOINT rollback # from the "with session.begin_nested()" except exception.DBError as dbe_inner: # in SQLA 1.1+, the rollback() method of Session # catches the error and repairs the state of the # session even though the SAVEPOINT was lost; # the net result here is that one exception is thrown # instead of two. This is SQLAlchemy ticket #3680 self.assertIsInstance( dbe_inner.cause, exception.DBDuplicateEntry) except exception.DBError as dbe_outer: self.AssertIsInstance(dbe_outer.cause, exception.DBDuplicateEntry) # resets itself afterwards try: with session.begin(): session.add(self.A(id=1)) except exception.DBError as dbe_outer: self.assertIsNone(dbe_outer.cause) def test_rollback_doesnt_interfere_with_killed_conn(self): session = self.sessionmaker() session.begin() try: session.execute(sql.text("select 1")) # close underying DB connection compat.driver_connection(session.connection()).close() # alternate approach, but same idea: # conn_id = session.scalar("select connection_id()") # session.execute("kill connection %s" % conn_id) # try using it, will raise an error session.execute(sql.text("select 1")) except exception.DBConnectionError: # issue being tested is that this session.rollback() # does not itself try to re-connect and raise another # error. session.rollback() else: assert False, "no exception raised" def test_savepoint_rollback_doesnt_interfere_with_killed_conn(self): session = self.sessionmaker() session.begin() try: session.begin_nested() session.execute(sql.text("select 1")) # close underying DB connection compat.driver_connection(session.connection()).close() # alternate approach, but same idea: # conn_id = session.scalar("select connection_id()") # session.execute("kill connection %s" % conn_id) # try using it, will raise an error session.execute(sql.text("select 1")) except exception.DBConnectionError: # issue being tested is that this session.rollback() # does not itself try to re-connect and raise another # error. session.rollback() else: assert False, "no exception raised" class TestConstraint(TestsExceptionFilter): def test_postgresql(self): matched = self._run_test( "postgresql", "insert into resource some_values", self.IntegrityError( "new row for relation \"resource\" violates " "check constraint \"ck_started_before_ended\""), exception.DBConstraintError, ) self.assertEqual("resource", matched.table) self.assertEqual("ck_started_before_ended", matched.check_name) class TestDuplicate(TestsExceptionFilter): def _run_dupe_constraint_test(self, dialect_name, message, expected_columns=['a', 'b'], expected_value=None): matched = self._run_test( dialect_name, "insert into table some_values", self.IntegrityError(message), exception.DBDuplicateEntry ) self.assertEqual(expected_columns, matched.columns) self.assertEqual(expected_value, matched.value) def _not_dupe_constraint_test(self, dialect_name, statement, message, expected_cls): matched = self._run_test( dialect_name, statement, self.IntegrityError(message), expected_cls ) self.assertInnerException( matched, "IntegrityError", str(self.IntegrityError(message)), statement ) def test_sqlite(self): self._run_dupe_constraint_test("sqlite", 'column a, b are not unique') def test_sqlite_3_7_16_or_3_8_2_and_higher(self): self._run_dupe_constraint_test( "sqlite", 'UNIQUE constraint failed: tbl.a, tbl.b') def test_sqlite_dupe_primary_key(self): self._run_dupe_constraint_test( "sqlite", "PRIMARY KEY must be unique 'insert into t values(10)'", expected_columns=[]) def test_mysql_pymysql(self): self._run_dupe_constraint_test( "mysql", '(1062, "Duplicate entry ' '\'2-3\' for key \'uniq_tbl0a0b\'")', expected_value='2-3') self._run_dupe_constraint_test( "mysql", '(1062, "Duplicate entry ' '\'\' for key \'uniq_tbl0a0b\'")', expected_value='') def test_mysql_mysqlconnector(self): self._run_dupe_constraint_test( "mysql", '1062 (23000): Duplicate entry ' '\'2-3\' for key \'uniq_tbl0a0b\'")', expected_value='2-3') def test_postgresql(self): self._run_dupe_constraint_test( 'postgresql', 'duplicate key value violates unique constraint' '"uniq_tbl0a0b"' '\nDETAIL: Key (a, b)=(2, 3) already exists.\n', expected_value='2, 3' ) def test_mysql_single(self): self._run_dupe_constraint_test( "mysql", "1062 (23000): Duplicate entry '2' for key 'b'", expected_columns=['b'], expected_value='2' ) def test_mysql_duplicate_entry_key_start_with_tablename(self): self._run_dupe_constraint_test( "mysql", "1062 (23000): Duplicate entry '2' for key 'tbl.uniq_tbl0b'", expected_columns=['b'], expected_value='2' ) def test_mysql_binary(self): self._run_dupe_constraint_test( "mysql", "(1062, \'Duplicate entry " "\\\'\\\\x8A$\\\\x8D\\\\xA6\"s\\\\x8E\\\' " "for key \\\'PRIMARY\\\'\')", expected_columns=['PRIMARY'], expected_value="\\\\x8A$\\\\x8D\\\\xA6\"s\\\\x8E" ) self._run_dupe_constraint_test( "mysql", "(1062, \'Duplicate entry " "''\\\\x8A$\\\\x8D\\\\xA6\"s\\\\x8E!,' " "for key 'PRIMARY'\')", expected_columns=['PRIMARY'], expected_value="'\\\\x8A$\\\\x8D\\\\xA6\"s\\\\x8E!," ) def test_mysql_duplicate_entry_key_start_with_tablename_binary(self): self._run_dupe_constraint_test( "mysql", "(1062, \'Duplicate entry " "\\\'\\\\x8A$\\\\x8D\\\\xA6\"s\\\\x8E\\\' " "for key \\\'tbl.uniq_tbl0c1\\\'\')", expected_columns=['c1'], expected_value="\\\\x8A$\\\\x8D\\\\xA6\"s\\\\x8E" ) self._run_dupe_constraint_test( "mysql", "(1062, \'Duplicate entry " "''\\\\x8A$\\\\x8D\\\\xA6\"s\\\\x8E!,' " "for key 'tbl.uniq_tbl0c1'\')", expected_columns=['c1'], expected_value="'\\\\x8A$\\\\x8D\\\\xA6\"s\\\\x8E!," ) def test_postgresql_single(self): self._run_dupe_constraint_test( 'postgresql', 'duplicate key value violates unique constraint "uniq_tbl0b"\n' 'DETAIL: Key (b)=(2) already exists.\n', expected_columns=['b'], expected_value='2' ) def test_unsupported_backend(self): self._not_dupe_constraint_test( "nonexistent", "insert into table some_values", self.IntegrityError("constraint violation"), exception.DBError ) class TestDeadlock(TestsExceptionFilter): statement = ('SELECT quota_usages.created_at AS ' 'quota_usages_created_at FROM quota_usages ' 'WHERE quota_usages.project_id = :project_id_1 ' 'AND quota_usages.deleted = :deleted_1 FOR UPDATE') params = { 'project_id_1': '8891d4478bbf48ad992f050cdf55e9b5', 'deleted_1': 0 } def _run_deadlock_detect_test( self, dialect_name, message, orig_exception_cls=TestsExceptionFilter.OperationalError): self._run_test( dialect_name, self.statement, orig_exception_cls(message), exception.DBDeadlock, params=self.params ) def _not_deadlock_test( self, dialect_name, message, expected_cls, expected_dbapi_cls, orig_exception_cls=TestsExceptionFilter.OperationalError): matched = self._run_test( dialect_name, self.statement, orig_exception_cls(message), expected_cls, params=self.params ) if isinstance(matched, exception.DBError): matched = matched.inner_exception self.assertEqual(expected_dbapi_cls, matched.orig.__class__.__name__) def test_mysql_pymysql_deadlock(self): self._run_deadlock_detect_test( "mysql", "(1213, 'Deadlock found when trying " "to get lock; try restarting " "transaction')" ) def test_mysql_pymysql_wsrep_deadlock(self): self._run_deadlock_detect_test( "mysql", "(1213, 'WSREP detected deadlock/conflict and aborted the " "transaction. Try restarting the transaction')", orig_exception_cls=self.InternalError ) self._run_deadlock_detect_test( "mysql", "(1213, 'Deadlock: wsrep aborted transaction')", orig_exception_cls=self.InternalError ) self._run_deadlock_detect_test( "mysql", "(1213, 'Deadlock: wsrep aborted transaction')", orig_exception_cls=self.OperationalError ) def test_mysql_pymysql_galera_deadlock(self): self._run_deadlock_detect_test( "mysql", "(1205, 'Lock wait timeout exceeded; " "try restarting transaction')", orig_exception_cls=self.InternalError ) def test_mysql_mysqlconnector_deadlock(self): self._run_deadlock_detect_test( "mysql", "1213 (40001): Deadlock found when trying to get lock; try " "restarting transaction", orig_exception_cls=self.InternalError ) def test_mysql_not_deadlock(self): self._not_deadlock_test( "mysql", "(1005, 'some other error')", sqla.exc.OperationalError, # note OperationalErrors are sent thru "OperationalError", ) def test_postgresql_deadlock(self): self._run_deadlock_detect_test( "postgresql", "deadlock detected", orig_exception_cls=self.TransactionRollbackError ) def test_postgresql_not_deadlock(self): self._not_deadlock_test( "postgresql", 'relation "fake" does not exist', # can be either depending on #3075 (exception.DBError, sqla.exc.OperationalError), "TransactionRollbackError", orig_exception_cls=self.TransactionRollbackError ) class TestDataError(TestsExceptionFilter): def _run_bad_data_test(self, dialect_name, message, error_class): self._run_test(dialect_name, "INSERT INTO TABLE some_values", error_class(message), exception.DBDataError) def test_bad_data_incorrect_string(self): # Error sourced from https://bugs.launchpad.net/cinder/+bug/1393871 self._run_bad_data_test("mysql", '(1366, "Incorrect string value: \'\\xF0\' ' 'for column \'resource\' at row 1"', self.OperationalError) def test_bad_data_out_of_range(self): # Error sourced from https://bugs.launchpad.net/cinder/+bug/1463379 self._run_bad_data_test("mysql", '(1264, "Out of range value for column ' '\'resource\' at row 1"', self.DataError) def test_data_too_long_for_column(self): self._run_bad_data_test("mysql", '(1406, "Data too long for column ' '\'resource\' at row 1"', self.DataError) class IntegrationTest(db_test_base._DbTestCase): """Test an actual error-raising round trips against the database.""" def setUp(self): super(IntegrationTest, self).setUp() meta = sqla.MetaData() self.test_table = sqla.Table( _TABLE_NAME, meta, sqla.Column('id', sqla.Integer, primary_key=True, nullable=False), sqla.Column('counter', sqla.Integer, nullable=False), sqla.UniqueConstraint('counter', name='uniq_counter')) self.test_table.create(self.engine) self.addCleanup(self.test_table.drop, self.engine) reg = registry() class Foo(object): def __init__(self, counter): self.counter = counter reg.map_imperatively(Foo, self.test_table) self.Foo = Foo def test_flush_wrapper_duplicate_entry(self): """test a duplicate entry exception.""" _session = self.sessionmaker() with _session.begin(): foo = self.Foo(counter=1) _session.add(foo) _session.begin() self.addCleanup(_session.rollback) foo = self.Foo(counter=1) _session.add(foo) self.assertRaises(exception.DBDuplicateEntry, _session.flush) def test_autoflush_wrapper_duplicate_entry(self): """Test a duplicate entry exception raised. test a duplicate entry exception raised via query.all()-> autoflush """ _session = self.sessionmaker() with _session.begin(): foo = self.Foo(counter=1) _session.add(foo) _session.begin() self.addCleanup(_session.rollback) foo = self.Foo(counter=1) _session.add(foo) self.assertTrue(_session.autoflush) self.assertRaises(exception.DBDuplicateEntry, _session.query(self.Foo).all) def test_flush_wrapper_plain_integrity_error(self): """test a plain integrity error wrapped as DBError.""" _session = self.sessionmaker() with _session.begin(): foo = self.Foo(counter=1) _session.add(foo) _session.begin() self.addCleanup(_session.rollback) foo = self.Foo(counter=None) _session.add(foo) self.assertRaises(exception.DBError, _session.flush) def test_flush_wrapper_operational_error(self): """test an operational error from flush() raised as-is.""" _session = self.sessionmaker() with _session.begin(): foo = self.Foo(counter=1) _session.add(foo) _session.begin() self.addCleanup(_session.rollback) foo = self.Foo(counter=sqla.func.imfake(123)) _session.add(foo) matched = self.assertRaises(sqla.exc.OperationalError, _session.flush) self.assertIn("no such function", str(matched)) def test_query_wrapper_operational_error(self): """test an operational error from query.all() raised as-is.""" _session = self.sessionmaker() _session.begin() self.addCleanup(_session.rollback) q = _session.query(self.Foo).filter( self.Foo.counter == sqla.func.imfake(123)) matched = self.assertRaises(sqla.exc.OperationalError, q.all) self.assertIn("no such function", str(matched)) class TestDBDisconnectedFixture(TestsExceptionFilter): native_pre_ping = False def _test_ping_listener_disconnected( self, dialect_name, exc_obj, is_disconnect=True, ): with self._fixture( dialect_name, exc_obj, False, is_disconnect, ) as engine: conn = engine.connect() with conn.begin(): self.assertEqual( 1, conn.execute(sqla.select(1)).scalars().first(), ) self.assertFalse(conn.closed) self.assertFalse(conn.invalidated) self.assertTrue(conn.in_transaction()) with self._fixture( dialect_name, exc_obj, True, is_disconnect, ) as engine: self.assertRaises( exception.DBConnectionError, engine.connect ) # test implicit execution with self._fixture(dialect_name, exc_obj, False) as engine: with engine.connect() as conn: self.assertEqual( 1, conn.execute(sqla.select(1)).scalars().first(), ) @contextlib.contextmanager def _fixture( self, dialect_name, exception, db_stays_down, is_disconnect=True, ): """Fixture for testing the ping listener. For SQLAlchemy 2.0, the mocking is placed more deeply in the stack within the DBAPI connection / cursor so that we can also effectively mock out the "pre ping" condition. :param dialect_name: dialect to use. "postgresql" or "mysql" :param exception: an exception class to raise :param db_stays_down: if True, the database will stay down after the first ping fails :param is_disconnect: whether or not the SQLAlchemy dialect should consider the exception object as a "disconnect error". Openstack's own exception handlers upgrade various DB exceptions to be "disconnect" scenarios that SQLAlchemy itself does not, such as some specific Galera error messages. The importance of an exception being a "disconnect error" means that SQLAlchemy knows it can discard the connection and then reconnect. If the error is not a "disconnection error", then it raises. """ connect_args = {} patchers = [] db_disconnected = False class DisconnectCursorMixin: def execute(self, *arg, **kw): if db_disconnected: raise exception else: return super().execute(*arg, **kw) if dialect_name == "postgresql": import psycopg2.extensions class Curs(DisconnectCursorMixin, psycopg2.extensions.cursor): pass connect_args = {"cursor_factory": Curs} elif dialect_name == "mysql": import pymysql def fake_ping(self, *arg, **kw): if db_disconnected: raise exception else: return True class Curs(DisconnectCursorMixin, pymysql.cursors.Cursor): pass connect_args = {"cursorclass": Curs} patchers.append( mock.patch.object( pymysql.Connection, "ping", fake_ping ) ) else: raise NotImplementedError() with mock.patch.object( compat, "native_pre_ping_event_support", self.native_pre_ping, ): engine = engines.create_engine( self.engine.url, max_retries=0) # 1. override how we connect. if we want the DB to be down # for the moment, but recover, reset db_disconnected after # connect is called. If we want the DB to stay down, then # make sure connect raises the error also. @event.listens_for(engine, "do_connect") def _connect(dialect, connrec, cargs, cparams): nonlocal db_disconnected # while we're here, add our cursor classes to the DBAPI # connect args cparams.update(connect_args) if db_disconnected: if db_stays_down: raise exception else: db_disconnected = False # 2. initialize the dialect with a first connect conn = engine.connect() conn.close() # 3. add additional patchers patchers.extend([ mock.patch.object( engine.dialect.dbapi, "Error", self.Error, ), mock.patch.object( engine.dialect, "is_disconnect", mock.Mock(return_value=is_disconnect), ), ]) with test_utils.nested(*patchers): # "disconnect" the DB db_disconnected = True yield engine class MySQLPrePingHandlerTests( db_test_base._MySQLOpportunisticTestCase, TestDBDisconnectedFixture, ): def test_mariadb_error_1927(self): for code in [1927]: self._test_ping_listener_disconnected( "mysql", self.InternalError('%d Connection was killed' % code), is_disconnect=False ) def test_packet_sequence_wrong_error(self): self._test_ping_listener_disconnected( "mysql", self.InternalError( 'Packet sequence number wrong - got 35 expected 1'), is_disconnect=False ) def test_mysql_ping_listener_disconnected(self): for code in [2006, 2013, 2014, 2045, 2055]: self._test_ping_listener_disconnected( "mysql", self.OperationalError('%d MySQL server has gone away' % code) ) def test_mysql_ping_listener_disconnected_regex_only(self): # intentionally set the is_disconnect flag to False # in the "sqlalchemy" layer to make sure the regexp # on _is_db_connection_error is catching for code in [2002, 2003, 2006, 2013]: self._test_ping_listener_disconnected( "mysql", self.OperationalError('%d MySQL server has gone away' % code), is_disconnect=False ) def test_mysql_galera_non_primary_disconnected(self): self._test_ping_listener_disconnected( "mysql", self.OperationalError('(1047, \'Unknown command\') ' '\'SELECT DATABASE()\' ()') ) def test_mysql_galera_non_primary_disconnected_regex_only(self): # intentionally set the is_disconnect flag to False # in the "sqlalchemy" layer to make sure the regexp # on _is_db_connection_error is catching self._test_ping_listener_disconnected( "mysql", self.OperationalError('(1047, \'Unknown command\') ' '\'SELECT DATABASE()\' ()'), is_disconnect=False ) def test_mysql_w_disconnect_flag(self): for code in [2002, 2003, 2002]: self._test_ping_listener_disconnected( "mysql", self.OperationalError('%d MySQL server has gone away' % code) ) def test_mysql_wo_disconnect_flag(self): for code in [2002, 2003]: self._test_ping_listener_disconnected( "mysql", self.OperationalError('%d MySQL server has gone away' % code), is_disconnect=False ) class PostgreSQLPrePingHandlerTests( db_test_base._PostgreSQLOpportunisticTestCase, TestDBDisconnectedFixture): def test_postgresql_ping_listener_disconnected(self): self._test_ping_listener_disconnected( "postgresql", self.OperationalError( "could not connect to server: Connection refused"), ) def test_postgresql_ping_listener_disconnected_regex_only(self): self._test_ping_listener_disconnected( "postgresql", self.OperationalError( "could not connect to server: Connection refused"), is_disconnect=False ) if compat.sqla_2: class MySQLNativePrePingTests(MySQLPrePingHandlerTests): native_pre_ping = True class PostgreSQLNativePrePingTests(PostgreSQLPrePingHandlerTests): native_pre_ping = True class TestDBConnectPingListener(TestsExceptionFilter): def setUp(self): super().setUp() event.listen( self.engine, "engine_connect", engines._connect_ping_listener) @contextlib.contextmanager def _fixture( self, dialect_name, exception, good_conn_count, is_disconnect=True): engine = self.engine # empty out the connection pool engine.dispose() connect_fn = engine.dialect.connect real_do_execute = engine.dialect.do_execute counter = itertools.count(1) def cant_execute(*arg, **kw): value = next(counter) if value > good_conn_count: raise exception else: return real_do_execute(*arg, **kw) def cant_connect(*arg, **kw): value = next(counter) if value > good_conn_count: raise exception else: return connect_fn(*arg, **kw) with self._dbapi_fixture(dialect_name, is_disconnect=is_disconnect): with mock.patch.object(engine.dialect, "connect", cant_connect): with mock.patch.object( engine.dialect, "do_execute", cant_execute): yield def _test_ping_listener_disconnected( self, dialect_name, exc_obj, is_disconnect=True): with self._fixture(dialect_name, exc_obj, 3, is_disconnect): conn = self.engine.connect() self.assertEqual(1, conn.scalar(sqla.select(1))) conn.close() with self._fixture(dialect_name, exc_obj, 1, is_disconnect): self.assertRaises( exception.DBConnectionError, self.engine.connect ) self.assertRaises( exception.DBConnectionError, self.engine.connect ) self.assertRaises( exception.DBConnectionError, self.engine.connect ) with self._fixture(dialect_name, exc_obj, 1, is_disconnect): self.assertRaises( exception.DBConnectionError, self.engine.connect ) self.assertRaises( exception.DBConnectionError, self.engine.connect ) self.assertRaises( exception.DBConnectionError, self.engine.connect ) def test_mysql_w_disconnect_flag(self): for code in [2002, 2003, 2002]: self._test_ping_listener_disconnected( "mysql", self.OperationalError('%d MySQL server has gone away' % code) ) def test_mysql_wo_disconnect_flag(self): for code in [2002, 2003]: self._test_ping_listener_disconnected( "mysql", self.OperationalError('%d MySQL server has gone away' % code), is_disconnect=False ) class TestDBConnectRetry(TestsExceptionFilter): def _run_test(self, dialect_name, exception, count, retries): counter = itertools.count() engine = self.engine # empty out the connection pool engine.dispose() connect_fn = engine.dialect.connect def cant_connect(*arg, **kw): if next(counter) < count: raise exception else: return connect_fn(*arg, **kw) with self._dbapi_fixture(dialect_name): with mock.patch.object(engine.dialect, "connect", cant_connect): return engines._test_connection(engine, retries, .01) def test_connect_no_retries(self): conn = self._run_test( "mysql", self.OperationalError("Error: (2003) something wrong"), 2, 0 ) # didnt connect because nothing was tried self.assertIsNone(conn) def test_connect_inifinite_retries(self): conn = self._run_test( "mysql", self.OperationalError("Error: (2003) something wrong"), 2, -1 ) # conn is good self.assertEqual(1, conn.scalar(sqla.select(1))) def test_connect_retry_past_failure(self): conn = self._run_test( "mysql", self.OperationalError("Error: (2003) something wrong"), 2, 3 ) # conn is good self.assertEqual(1, conn.scalar(sqla.select(1))) def test_connect_retry_not_candidate_exception(self): self.assertRaises( sqla.exc.OperationalError, # remember, we pass OperationalErrors # through at the moment :) self._run_test, "mysql", self.OperationalError("Error: (2015) I can't connect period"), 2, 3 ) def test_connect_retry_stops_infailure(self): self.assertRaises( exception.DBConnectionError, self._run_test, "mysql", self.OperationalError("Error: (2003) something wrong"), 3, 2 ) class TestsErrorHandler(TestsExceptionFilter): def test_multiple_error_handlers(self): handler = mock.MagicMock(return_value=None) sqla.event.listen(self.engine, "handle_error", handler, retval=True) # cause an error in DB API self._run_test( "mysql", "select you_made_a_programming_error", self.ProgrammingError("Error 123, you made a mistake"), exception.DBError ) # expect custom handler to be called together with oslo.db's one self.assertEqual(1, handler.call_count, 'Custom handler should be called') def test_chained_exceptions(self): class CustomError(Exception): pass def handler(context): return CustomError('Custom Error') sqla.event.listen(self.engine, "handle_error", handler, retval=True) # cause an error in DB API, expect exception from custom handler self._run_test( "mysql", "select you_made_a_programming_error", self.ProgrammingError("Error 123, you made a mistake"), CustomError ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/sqlalchemy/test_fixtures.py0000664000175000017500000001445400000000000023400 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import testresources import testscenarios import unittest from unittest import mock from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import provision from oslo_db.sqlalchemy import test_fixtures from oslo_db.tests import base as test_base start_dir = os.path.dirname(__file__) class BackendSkipTest(test_base.BaseTestCase): def test_skip_no_dbapi(self): class FakeDatabaseOpportunisticFixture( test_fixtures.OpportunisticDbFixture): DRIVER = 'postgresql' class SomeTest(test_fixtures.OpportunisticDBTestMixin, test_base.BaseTestCase): FIXTURE = FakeDatabaseOpportunisticFixture def runTest(self): pass st = SomeTest() # patch in replacement lookup dictionaries to avoid # leaking from/to other tests with mock.patch( "oslo_db.sqlalchemy.provision." "Backend.backends_by_database_type", { "postgresql": provision.Backend("postgresql", "postgresql://")}): st._database_resources = {} st._db_not_available = {} st._schema_resources = {} with mock.patch( "sqlalchemy.create_engine", mock.Mock(side_effect=ImportError())): self.assertEqual([], st.resources) ex = self.assertRaises( self.skipException, st.setUp ) self.assertEqual( "Backend 'postgresql' is unavailable: No DBAPI installed", str(ex) ) def test_skip_no_such_backend(self): class FakeDatabaseOpportunisticFixture( test_fixtures.OpportunisticDbFixture): DRIVER = 'postgresql+nosuchdbapi' class SomeTest(test_fixtures.OpportunisticDBTestMixin, test_base.BaseTestCase): FIXTURE = FakeDatabaseOpportunisticFixture def runTest(self): pass st = SomeTest() ex = self.assertRaises( self.skipException, st.setUp ) self.assertEqual( "Backend 'postgresql+nosuchdbapi' is unavailable: No such backend", str(ex) ) class EnginefacadeIntegrationTest(test_base.BaseTestCase): def test_db_fixture(self): normal_mgr = enginefacade.transaction_context() normal_mgr.configure( connection="sqlite://", sqlite_fk=True, mysql_sql_mode="FOOBAR", max_overflow=38 ) class MyFixture(test_fixtures.OpportunisticDbFixture): def get_enginefacade(self): return normal_mgr test = mock.Mock(SCHEMA_SCOPE=None) fixture = MyFixture(test=test) resources = fixture._get_resources() testresources.setUpResources(test, resources, None) self.addCleanup( testresources.tearDownResources, test, resources, None ) fixture.setUp() self.addCleanup(fixture.cleanUp) self.assertTrue(normal_mgr._factory._started) test.engine = normal_mgr.writer.get_engine() self.assertEqual("sqlite://", str(test.engine.url)) self.assertIs(test.engine, normal_mgr._factory._writer_engine) engine_args = normal_mgr._factory._engine_args_for_conf(None) self.assertTrue(engine_args['sqlite_fk']) self.assertEqual("FOOBAR", engine_args["mysql_sql_mode"]) self.assertEqual(38, engine_args["max_overflow"]) fixture.cleanUp() fixture._clear_cleanups() # so the real cleanUp works self.assertFalse(normal_mgr._factory._started) class TestLoadHook(unittest.TestCase): """Test the 'load_tests' hook supplied by test_base. The purpose of this loader is to organize tests into an OptimisingTestSuite using the standard unittest load_tests hook. The hook needs to detect if it is being invoked at the module level or at the package level. It has to behave completely differently in these two cases. """ def test_module_level(self): load_tests = test_fixtures.optimize_module_test_loader() loader = unittest.TestLoader() found_tests = loader.discover(start_dir, pattern="test_fixtures.py") new_loader = load_tests(loader, found_tests, "test_fixtures.py") self.assertIsInstance(new_loader, testresources.OptimisingTestSuite) actual_tests = unittest.TestSuite( testscenarios.generate_scenarios(found_tests) ) self.assertEqual( new_loader.countTestCases(), actual_tests.countTestCases() ) def test_package_level(self): self._test_package_level(test_fixtures.optimize_package_test_loader) def _test_package_level(self, fn): load_tests = fn( os.path.join(start_dir, "__init__.py")) loader = unittest.TestLoader() new_loader = load_tests( loader, unittest.suite.TestSuite(), "test_fixtures.py") self.assertIsInstance(new_loader, testresources.OptimisingTestSuite) actual_tests = unittest.TestSuite( testscenarios.generate_scenarios( loader.discover(start_dir, pattern="test_fixtures.py")) ) self.assertEqual( new_loader.countTestCases(), actual_tests.countTestCases() ) class TestWScenarios(unittest.TestCase): """a 'do nothing' test suite. Should generate exactly four tests when testscenarios is used. """ def test_one(self): pass def test_two(self): pass scenarios = [ ('scenario1', dict(scenario='scenario 1')), ('scenario2', dict(scenario='scenario 2')) ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/sqlalchemy/test_migrate_cli.py0000664000175000017500000002540000000000000023777 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import alembic import sqlalchemy from oslo_db import exception from oslo_db.sqlalchemy.migration_cli import ext_alembic from oslo_db.sqlalchemy.migration_cli import manager from oslo_db.tests import base as test_base class MockWithCmp(mock.MagicMock): order = 0 def __init__(self, *args, **kwargs): super(MockWithCmp, self).__init__(*args, **kwargs) self.__lt__ = lambda self, other: self.order < other.order @mock.patch(('oslo_db.sqlalchemy.migration_cli.' 'ext_alembic.alembic.command')) class TestAlembicExtension(test_base.BaseTestCase): def setUp(self): self.migration_config = {'alembic_ini_path': '.', 'db_url': 'sqlite://'} self.engine = sqlalchemy.create_engine(self.migration_config['db_url']) self.alembic = ext_alembic.AlembicExtension( self.engine, self.migration_config) super(TestAlembicExtension, self).setUp() def test_check_enabled_true(self, command): """Check enabled returns True Verifies that enabled returns True on non empty alembic_ini_path conf variable """ self.assertTrue(self.alembic.enabled) def test_check_enabled_false(self, command): """Check enabled returns False Verifies enabled returns False on empty alembic_ini_path variable """ self.migration_config['alembic_ini_path'] = '' alembic = ext_alembic.AlembicExtension( self.engine, self.migration_config) self.assertFalse(alembic.enabled) def test_upgrade_none(self, command): self.alembic.upgrade(None) command.upgrade.assert_called_once_with(self.alembic.config, 'head') def test_upgrade_normal(self, command): self.alembic.upgrade('131daa') command.upgrade.assert_called_once_with(self.alembic.config, '131daa') def test_downgrade_none(self, command): self.alembic.downgrade(None) command.downgrade.assert_called_once_with(self.alembic.config, 'base') def test_downgrade_int(self, command): self.alembic.downgrade(111) command.downgrade.assert_called_once_with(self.alembic.config, 'base') def test_downgrade_normal(self, command): self.alembic.downgrade('131daa') command.downgrade.assert_called_once_with( self.alembic.config, '131daa') def test_revision(self, command): self.alembic.revision(message='test', autogenerate=True) command.revision.assert_called_once_with( self.alembic.config, message='test', autogenerate=True) def test_stamp(self, command): self.alembic.stamp('stamp') command.stamp.assert_called_once_with( self.alembic.config, revision='stamp') def test_version(self, command): version = self.alembic.version() self.assertIsNone(version) def test_has_revision(self, command): with mock.patch(('oslo_db.sqlalchemy.migration_cli.' 'ext_alembic.alembic_script')) as mocked: self.alembic.config.get_main_option = mock.Mock() # since alembic_script is mocked and no exception is raised, call # will result in success self.assertIs(True, self.alembic.has_revision('test')) self.alembic.config.get_main_option.assert_called_once_with( 'script_location') mocked.ScriptDirectory().get_revision.assert_called_once_with( 'test') self.assertIs(True, self.alembic.has_revision(None)) self.assertIs(True, self.alembic.has_revision('head')) # relative revision, should be True for alembic self.assertIs(True, self.alembic.has_revision('+1')) def test_has_revision_negative(self, command): with mock.patch(('oslo_db.sqlalchemy.migration_cli.' 'ext_alembic.alembic_script')) as mocked: mocked.ScriptDirectory().get_revision.side_effect = ( alembic.util.CommandError) self.alembic.config.get_main_option = mock.Mock() # exception is raised, the call should be false self.assertIs(False, self.alembic.has_revision('test')) self.alembic.config.get_main_option.assert_called_once_with( 'script_location') mocked.ScriptDirectory().get_revision.assert_called_once_with( 'test') class TestMigrationManager(test_base.BaseTestCase): def setUp(self): self.migration_config = {'alembic_ini_path': '.', 'migrate_repo_path': '.', 'db_url': 'sqlite://'} engine = sqlalchemy.create_engine(self.migration_config['db_url']) self.migration_manager = manager.MigrationManager( self.migration_config, engine) self.ext = mock.Mock() self.ext.obj.version = mock.Mock(return_value=0) self.migration_manager._manager.extensions = [self.ext] super(TestMigrationManager, self).setUp() def test_manager_update(self): self.migration_manager.upgrade('head') self.ext.obj.upgrade.assert_called_once_with('head') def test_manager_update_revision_none(self): self.migration_manager.upgrade(None) self.ext.obj.upgrade.assert_called_once_with(None) def test_downgrade_normal_revision(self): self.migration_manager.downgrade('111abcd') self.ext.obj.downgrade.assert_called_once_with('111abcd') def test_version(self): self.migration_manager.version() self.ext.obj.version.assert_called_once_with() def test_version_return_value(self): version = self.migration_manager.version() self.assertEqual(0, version) def test_revision_message_autogenerate(self): self.migration_manager.revision('test', True) self.ext.obj.revision.assert_called_once_with('test', True) def test_revision_only_message(self): self.migration_manager.revision('test', False) self.ext.obj.revision.assert_called_once_with('test', False) def test_stamp(self): self.migration_manager.stamp('stamp') self.ext.obj.stamp.assert_called_once_with('stamp') def test_wrong_config(self): err = self.assertRaises(ValueError, manager.MigrationManager, {'wrong_key': 'sqlite://'}) self.assertEqual('Either database url or engine must be provided.', err.args[0]) class TestMigrationMultipleExtensions(test_base.BaseTestCase): def setUp(self): self.migration_config = {'alembic_ini_path': '.', 'migrate_repo_path': '.', 'db_url': 'sqlite://'} engine = sqlalchemy.create_engine(self.migration_config['db_url']) self.migration_manager = manager.MigrationManager( self.migration_config, engine) self.first_ext = MockWithCmp() self.first_ext.obj.order = 1 self.first_ext.obj.upgrade.return_value = 100 self.first_ext.obj.downgrade.return_value = 0 self.second_ext = MockWithCmp() self.second_ext.obj.order = 2 self.second_ext.obj.upgrade.return_value = 200 self.second_ext.obj.downgrade.return_value = 100 self.migration_manager._manager.extensions = [self.first_ext, self.second_ext] super(TestMigrationMultipleExtensions, self).setUp() def test_upgrade_right_order(self): results = self.migration_manager.upgrade(None) self.assertEqual([100, 200], results) def test_downgrade_right_order(self): results = self.migration_manager.downgrade(None) self.assertEqual([100, 0], results) def test_upgrade_does_not_go_too_far(self): self.first_ext.obj.has_revision.return_value = True self.second_ext.obj.has_revision.return_value = False self.second_ext.obj.upgrade.side_effect = AssertionError( 'this method should not have been called') results = self.migration_manager.upgrade(100) self.assertEqual([100], results) def test_downgrade_does_not_go_too_far(self): self.second_ext.obj.has_revision.return_value = True self.first_ext.obj.has_revision.return_value = False self.first_ext.obj.downgrade.side_effect = AssertionError( 'this method should not have been called') results = self.migration_manager.downgrade(100) self.assertEqual([100], results) def test_upgrade_checks_rev_existence(self): self.first_ext.obj.has_revision.return_value = False self.second_ext.obj.has_revision.return_value = False # upgrade to a specific non-existent revision should fail self.assertRaises(exception.DBMigrationError, self.migration_manager.upgrade, 100) # upgrade to the "head" should succeed self.assertEqual([100, 200], self.migration_manager.upgrade(None)) # let's assume the second ext has the revision, upgrade should succeed self.second_ext.obj.has_revision.return_value = True self.assertEqual([100, 200], self.migration_manager.upgrade(200)) # upgrade to the "head" should still succeed self.assertEqual([100, 200], self.migration_manager.upgrade(None)) def test_downgrade_checks_rev_existence(self): self.first_ext.obj.has_revision.return_value = False self.second_ext.obj.has_revision.return_value = False # upgrade to a specific non-existent revision should fail self.assertRaises(exception.DBMigrationError, self.migration_manager.downgrade, 100) # downgrade to the "base" should succeed self.assertEqual([100, 0], self.migration_manager.downgrade(None)) # let's assume the second ext has the revision, downgrade should # succeed self.first_ext.obj.has_revision.return_value = True self.assertEqual([100, 0], self.migration_manager.downgrade(200)) # downgrade to the "base" should still succeed self.assertEqual([100, 0], self.migration_manager.downgrade(None)) self.assertEqual([100, 0], self.migration_manager.downgrade('base')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/sqlalchemy/test_migrations.py0000664000175000017500000002267400000000000023706 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright 2012-2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import sqlalchemy as sa from sqlalchemy import orm from oslo_db.sqlalchemy import test_migrations as migrate from oslo_db.tests.sqlalchemy import base as db_test_base class ModelsMigrationSyncMixin(db_test_base._DbTestCase): def setUp(self): super(ModelsMigrationSyncMixin, self).setUp() self.metadata = sa.MetaData() self.metadata_migrations = sa.MetaData() sa.Table( 'testtbl', self.metadata_migrations, sa.Column('id', sa.Integer, primary_key=True), sa.Column('spam', sa.String(10), nullable=False), sa.Column('eggs', sa.DateTime), sa.Column('foo', sa.Boolean, server_default=sa.sql.expression.true()), sa.Column('bool_wo_default', sa.Boolean), sa.Column('bar', sa.Numeric(10, 5)), sa.Column('defaulttest', sa.Integer, server_default='5'), sa.Column('defaulttest2', sa.String(8), server_default=''), sa.Column('defaulttest3', sa.String(5), server_default="test"), sa.Column('defaulttest4', sa.Enum('first', 'second', name='testenum'), server_default="first"), sa.Column("defaulttest5", sa.Integer, server_default=sa.text('0')), sa.Column('variant', sa.BigInteger()), sa.Column('variant2', sa.BigInteger(), server_default='0'), sa.Column('fk_check', sa.String(36), nullable=False), sa.UniqueConstraint('spam', 'eggs', name='uniq_cons'), ) BASE = orm.declarative_base(metadata=self.metadata) class TestModel(BASE): __tablename__ = 'testtbl' __table_args__ = ( sa.UniqueConstraint('spam', 'eggs', name='uniq_cons'), ) id = sa.Column('id', sa.Integer, primary_key=True) spam = sa.Column('spam', sa.String(10), nullable=False) eggs = sa.Column('eggs', sa.DateTime) foo = sa.Column('foo', sa.Boolean, server_default=sa.sql.expression.true()) fk_check = sa.Column('fk_check', sa.String(36), nullable=False) bool_wo_default = sa.Column('bool_wo_default', sa.Boolean) defaulttest = sa.Column('defaulttest', sa.Integer, server_default='5') defaulttest2 = sa.Column('defaulttest2', sa.String(8), server_default='') defaulttest3 = sa.Column('defaulttest3', sa.String(5), server_default="test") defaulttest4 = sa.Column('defaulttest4', sa.Enum('first', 'second', name='testenum'), server_default="first") defaulttest5 = sa.Column("defaulttest5", sa.Integer, server_default=sa.text('0')) variant = sa.Column(sa.BigInteger().with_variant( sa.Integer(), 'sqlite')) variant2 = sa.Column(sa.BigInteger().with_variant( sa.Integer(), 'sqlite'), server_default='0') bar = sa.Column('bar', sa.Numeric(10, 5)) class ModelThatShouldNotBeCompared(BASE): __tablename__ = 'testtbl2' id = sa.Column('id', sa.Integer, primary_key=True) spam = sa.Column('spam', sa.String(10), nullable=False) def get_metadata(self): return self.metadata def get_engine(self): return self.engine def db_sync(self, engine): self.metadata_migrations.create_all(bind=engine) def include_object(self, object_, name, type_, reflected, compare_to): if type_ == 'table': return name == 'testtbl' return True def _test_models_not_sync_filtered(self): self.metadata_migrations.clear() sa.Table( 'table', self.metadata_migrations, sa.Column('fk_check', sa.String(36), nullable=False), sa.PrimaryKeyConstraint('fk_check'), mysql_engine='InnoDB' ) sa.Table( 'testtbl', self.metadata_migrations, sa.Column('id', sa.Integer, primary_key=True), sa.Column('spam', sa.String(8), nullable=True), sa.Column('eggs', sa.DateTime), sa.Column('foo', sa.Boolean, server_default=sa.sql.expression.false()), sa.Column('bool_wo_default', sa.Boolean, unique=True), sa.Column('bar', sa.BigInteger), sa.Column('defaulttest', sa.Integer, server_default='7'), sa.Column('defaulttest2', sa.String(8), server_default=''), sa.Column('defaulttest3', sa.String(5), server_default="fake"), sa.Column('defaulttest4', sa.Enum('first', 'second', name='testenum'), server_default="first"), sa.Column("defaulttest5", sa.Integer, server_default=sa.text('0')), sa.Column('fk_check', sa.String(36), nullable=False), sa.UniqueConstraint('spam', 'foo', name='uniq_cons'), sa.ForeignKeyConstraint(['fk_check'], ['table.fk_check']), mysql_engine='InnoDB' ) with mock.patch.object(self, 'filter_metadata_diff') as filter_mock: def filter_diffs(diffs): # test filter returning only constraint related diffs return [ diff for diff in diffs if 'constraint' in diff[0] ] filter_mock.side_effect = filter_diffs msg = str(self.assertRaises(AssertionError, self.test_models_sync)) self.assertNotIn('defaulttest', msg) self.assertNotIn('defaulttest3', msg) self.assertNotIn('remove_fk', msg) self.assertIn('constraint', msg) def _test_models_not_sync(self): self.metadata_migrations.clear() sa.Table( 'table', self.metadata_migrations, sa.Column('fk_check', sa.String(36), nullable=False), sa.PrimaryKeyConstraint('fk_check'), mysql_engine='InnoDB' ) sa.Table( 'testtbl', self.metadata_migrations, sa.Column('id', sa.Integer, primary_key=True), sa.Column('spam', sa.String(8), nullable=True), sa.Column('eggs', sa.DateTime), sa.Column('foo', sa.Boolean, server_default=sa.sql.expression.false()), sa.Column('bool_wo_default', sa.Boolean, unique=True), sa.Column('bar', sa.BigInteger), sa.Column('defaulttest', sa.Integer, server_default='7'), sa.Column('defaulttest2', sa.String(8), server_default=''), sa.Column('defaulttest3', sa.String(5), server_default="fake"), sa.Column('defaulttest4', sa.Enum('first', 'second', name='testenum'), server_default="first"), sa.Column("defaulttest5", sa.Integer, server_default=sa.text('0')), sa.Column('variant', sa.String(10)), sa.Column('fk_check', sa.String(36), nullable=False), sa.UniqueConstraint('spam', 'foo', name='uniq_cons'), sa.ForeignKeyConstraint(['fk_check'], ['table.fk_check']), mysql_engine='InnoDB' ) msg = str(self.assertRaises(AssertionError, self.test_models_sync)) # NOTE(I159): Check mentioning of the table and columns. # The log is invalid json, so we can't parse it and check it for # full compliance. We have no guarantee of the log items ordering, # so we can't use regexp. self.assertTrue(msg.startswith( 'Models and migration scripts aren\'t in sync:')) self.assertIn('testtbl', msg) self.assertIn('spam', msg) self.assertIn('eggs', msg) # test that the unique constraint is added self.assertIn('foo', msg) self.assertIn('bar', msg) self.assertIn('bool_wo_default', msg) self.assertIn('defaulttest', msg) self.assertIn('defaulttest3', msg) self.assertIn('remove_fk', msg) self.assertIn('variant', msg) class ModelsMigrationsSyncMySQL( ModelsMigrationSyncMixin, migrate.ModelsMigrationsSync, db_test_base._MySQLOpportunisticTestCase, ): def test_models_not_sync(self): self._test_models_not_sync() def test_models_not_sync_filtered(self): self._test_models_not_sync_filtered() class ModelsMigrationsSyncPostgreSQL( ModelsMigrationSyncMixin, migrate.ModelsMigrationsSync, db_test_base._PostgreSQLOpportunisticTestCase, ): def test_models_not_sync(self): self._test_models_not_sync() def test_models_not_sync_filtered(self): self._test_models_not_sync_filtered() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/sqlalchemy/test_models.py0000664000175000017500000001643500000000000023013 0ustar00zuulzuul00000000000000# Copyright 2012 Cloudscaling Group, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import abc import datetime from unittest import mock from sqlalchemy import Column from sqlalchemy import Integer, String from sqlalchemy import event from sqlalchemy.orm import declarative_base from oslo_db.sqlalchemy import models from oslo_db.tests import base as test_base from oslo_db.tests.sqlalchemy import base as db_test_base BASE = declarative_base() class ModelBaseTest(db_test_base._DbTestCase): def setUp(self): super(ModelBaseTest, self).setUp() self.mb = models.ModelBase() self.ekm = ExtraKeysModel() def test_modelbase_has_dict_methods(self): dict_methods = ('__getitem__', '__setitem__', '__contains__', 'get', 'update', 'save', 'items', 'iteritems', 'keys') for method in dict_methods: self.assertTrue(hasattr(models.ModelBase, method), "Method %s() is not found" % method) def test_modelbase_is_iterable(self): self.assertTrue(issubclass(models.ModelBase, abc.Iterable)) def test_modelbase_set(self): self.mb['world'] = 'hello' self.assertEqual('hello', self.mb['world']) def test_modelbase_update(self): h = {'a': '1', 'b': '2'} self.mb.update(h) for key in h.keys(): self.assertEqual(h[key], self.mb[key]) def test_modelbase_contains(self): mb = models.ModelBase() h = {'a': '1', 'b': '2'} mb.update(h) for key in h.keys(): # Test 'in' syntax (instead of using .assertIn) self.assertIn(key, mb) self.assertNotIn('non-existent-key', mb) def test_modelbase_contains_exc(self): class ErrorModel(models.ModelBase): @property def bug(self): raise ValueError model = ErrorModel() model.update({'attr': 5}) self.assertIn('attr', model) self.assertRaises(ValueError, lambda: 'bug' in model) def test_modelbase_items_iteritems(self): h = {'a': '1', 'b': '2'} expected = { 'id': None, 'smth': None, 'name': 'NAME', 'a': '1', 'b': '2', } self.ekm.update(h) self.assertEqual(expected, dict(self.ekm.items())) self.assertEqual(expected, dict(self.ekm.iteritems())) def test_modelbase_dict(self): h = {'a': '1', 'b': '2'} expected = { 'id': None, 'smth': None, 'name': 'NAME', 'a': '1', 'b': '2', } self.ekm.update(h) self.assertEqual(expected, dict(self.ekm)) def test_modelbase_iter(self): expected = { 'id': None, 'smth': None, 'name': 'NAME', } i = iter(self.ekm) found_items = 0 while True: r = next(i, None) if r is None: break self.assertEqual(expected[r[0]], r[1]) found_items += 1 self.assertEqual(len(expected), found_items) def test_modelbase_keys(self): self.assertEqual(set(('id', 'smth', 'name')), set(self.ekm.keys())) self.ekm.update({'a': '1', 'b': '2'}) self.assertEqual(set(('a', 'b', 'id', 'smth', 'name')), set(self.ekm.keys())) def test_modelbase_several_iters(self): mb = ExtraKeysModel() it1 = iter(mb) it2 = iter(mb) self.assertFalse(it1 is it2) self.assertEqual(dict(mb), dict(it1)) self.assertEqual(dict(mb), dict(it2)) def test_extra_keys_empty(self): """Test verifies that by default extra_keys return empty list.""" self.assertEqual([], self.mb._extra_keys) def test_extra_keys_defined(self): """Property _extra_keys will return list with attributes names.""" self.assertEqual(['name'], self.ekm._extra_keys) def test_model_with_extra_keys(self): data = dict(self.ekm) self.assertEqual({'smth': None, 'id': None, 'name': 'NAME'}, data) class ExtraKeysModel(BASE, models.ModelBase): __tablename__ = 'test_model' id = Column(Integer, primary_key=True) smth = Column(String(255)) @property def name(self): return 'NAME' @property def _extra_keys(self): return ['name'] class TimestampMixinTest(test_base.BaseTestCase): def test_timestampmixin_attr(self): methods = ('created_at', 'updated_at') for method in methods: self.assertTrue(hasattr(models.TimestampMixin, method), "Method %s() is not found" % method) class SoftDeletedModel(BASE, models.ModelBase, models.SoftDeleteMixin): __tablename__ = 'test_model_soft_deletes' id = Column('id', Integer, primary_key=True) smth = Column('smth', String(255)) class SoftDeleteMixinTest(db_test_base._DbTestCase): def setUp(self): super(SoftDeleteMixinTest, self).setUp() t = BASE.metadata.tables['test_model_soft_deletes'] t.create(self.engine) self.addCleanup(t.drop, self.engine) self.session = self.sessionmaker(autocommit=False) self.addCleanup(self.session.close) @mock.patch('oslo_utils.timeutils.utcnow') def test_soft_delete(self, mock_utcnow): dt = datetime.datetime.utcnow().replace(microsecond=0) mock_utcnow.return_value = dt m = SoftDeletedModel(id=123456, smth='test') self.session.add(m) self.session.commit() self.assertEqual(0, m.deleted) self.assertIsNone(m.deleted_at) m.soft_delete(self.session) self.assertEqual(123456, m.deleted) self.assertIs(dt, m.deleted_at) def test_soft_delete_coerce_deleted_to_integer(self): def listener(conn, cur, stmt, params, context, executemany): if 'insert' in stmt.lower(): # ignore SELECT 1 and BEGIN self.assertNotIn('False', str(params)) event.listen(self.engine, 'before_cursor_execute', listener) self.addCleanup(event.remove, self.engine, 'before_cursor_execute', listener) m = SoftDeletedModel(id=1, smth='test', deleted=False) self.session.add(m) self.session.commit() def test_deleted_set_to_null(self): m = SoftDeletedModel(id=123456, smth='test') self.session.add(m) self.session.commit() m.deleted = None self.session.commit() self.assertIsNone(m.deleted) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/sqlalchemy/test_options.py0000664000175000017500000000463000000000000023215 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_db import options from oslo_db.tests import base as test_base class DbApiOptionsTestCase(test_base.BaseTestCase): def setUp(self): super().setUp() self.conf = self.useFixture(config_fixture.Config()).conf self.conf.register_opts(options.database_opts, group='database') def test_session_parameters(self): path = self.create_tempfiles([["tmp", b"""[database] connection=x://y.z max_pool_size=20 max_retries=30 retry_interval=40 max_overflow=50 connection_debug=60 connection_trace=True pool_timeout=7 """]])[0] self.conf(['--config-file', path]) self.assertEqual('x://y.z', self.conf.database.connection) self.assertEqual(20, self.conf.database.max_pool_size) self.assertEqual(30, self.conf.database.max_retries) self.assertEqual(40, self.conf.database.retry_interval) self.assertEqual(50, self.conf.database.max_overflow) self.assertEqual(60, self.conf.database.connection_debug) self.assertEqual(True, self.conf.database.connection_trace) self.assertEqual(7, self.conf.database.pool_timeout) def test_dbapi_parameters(self): path = self.create_tempfiles([['tmp', b'[database]\n' b'backend=test_123\n' ]])[0] self.conf(['--config-file', path]) self.assertEqual('test_123', self.conf.database.backend) def test_set_defaults(self): conf = cfg.ConfigOpts() options.set_defaults(conf, connection='sqlite:///:memory:') self.assertTrue(len(conf.database.items()) > 1) self.assertEqual('sqlite:///:memory:', conf.database.connection) self.assertEqual(None, self.conf.database.mysql_wsrep_sync_wait) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/sqlalchemy/test_provision.py0000664000175000017500000001433000000000000023550 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock from sqlalchemy.engine import url as sqla_url from sqlalchemy import exc as sa_exc from sqlalchemy import inspect from sqlalchemy import schema from sqlalchemy import types from oslo_db import exception from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import provision from oslo_db.sqlalchemy import test_fixtures from oslo_db.tests import base as test_base from oslo_db.tests.sqlalchemy import base as db_test_base class DropAllObjectsTest(db_test_base._DbTestCase): def setUp(self): super(DropAllObjectsTest, self).setUp() self.metadata = metadata = schema.MetaData() schema.Table( 'a', metadata, schema.Column('id', types.Integer, primary_key=True), mysql_engine='InnoDB' ) schema.Table( 'b', metadata, schema.Column('id', types.Integer, primary_key=True), schema.Column('a_id', types.Integer, schema.ForeignKey('a.id')), mysql_engine='InnoDB' ) schema.Table( 'c', metadata, schema.Column('id', types.Integer, primary_key=True), schema.Column('b_id', types.Integer, schema.ForeignKey('b.id')), schema.Column( 'd_id', types.Integer, schema.ForeignKey('d.id', use_alter=True, name='c_d_fk')), mysql_engine='InnoDB' ) schema.Table( 'd', metadata, schema.Column('id', types.Integer, primary_key=True), schema.Column('c_id', types.Integer, schema.ForeignKey('c.id')), mysql_engine='InnoDB' ) metadata.create_all(self.engine, checkfirst=False) # will drop nothing if the test worked self.addCleanup(metadata.drop_all, self.engine, checkfirst=True) def test_drop_all(self): insp = inspect(self.engine) self.assertEqual( set(['a', 'b', 'c', 'd']), set(insp.get_table_names()) ) self._get_default_provisioned_db().\ backend.drop_all_objects(self.engine) insp = inspect(self.engine) self.assertEqual( [], insp.get_table_names() ) class BackendNotAvailableTest(test_base.BaseTestCase): def test_no_dbapi(self): backend = provision.Backend( "postgresql", "postgresql+nosuchdbapi://hostname/dsn") with mock.patch( "sqlalchemy.create_engine", mock.Mock(side_effect=ImportError("nosuchdbapi"))): # NOTE(zzzeek): Call and test the _verify function twice, as it # exercises a different code path on subsequent runs vs. # the first run ex = self.assertRaises( exception.BackendNotAvailable, backend._verify) self.assertEqual( "Backend 'postgresql+nosuchdbapi' is unavailable: " "No DBAPI installed", str(ex)) ex = self.assertRaises( exception.BackendNotAvailable, backend._verify) self.assertEqual( "Backend 'postgresql+nosuchdbapi' is unavailable: " "No DBAPI installed", str(ex)) def test_cant_connect(self): backend = provision.Backend( "postgresql", "postgresql+nosuchdbapi://hostname/dsn") with mock.patch( "sqlalchemy.create_engine", mock.Mock(return_value=mock.Mock(connect=mock.Mock( side_effect=sa_exc.OperationalError( "can't connect", None, None)) )) ): # NOTE(zzzeek): Call and test the _verify function twice, as it # exercises a different code path on subsequent runs vs. # the first run ex = self.assertRaises( exception.BackendNotAvailable, backend._verify) self.assertEqual( "Backend 'postgresql+nosuchdbapi' is unavailable: " "Could not connect", str(ex)) ex = self.assertRaises( exception.BackendNotAvailable, backend._verify) self.assertEqual( "Backend 'postgresql+nosuchdbapi' is unavailable: " "Could not connect", str(ex)) class MySQLDropAllObjectsTest( DropAllObjectsTest, db_test_base._MySQLOpportunisticTestCase, ): pass class PostgreSQLDropAllObjectsTest( DropAllObjectsTest, db_test_base._PostgreSQLOpportunisticTestCase, ): pass class AdHocURLTest(test_base.BaseTestCase): def test_sqlite_setup_teardown(self): fixture = test_fixtures.AdHocDbFixture("sqlite:///foo.db") fixture.setUp() self.assertEqual( enginefacade._context_manager._factory._writer_engine.url, sqla_url.make_url("sqlite:///foo.db") ) self.assertTrue(os.path.exists("foo.db")) fixture.cleanUp() self.assertFalse(os.path.exists("foo.db")) def test_mysql_setup_teardown(self): try: mysql_backend = provision.Backend.backend_for_database_type( "mysql") except exception.BackendNotAvailable: self.skipTest("mysql backend not available") mysql_backend.create_named_database("adhoc_test") self.addCleanup( mysql_backend.drop_named_database, "adhoc_test" ) url = mysql_backend.provisioned_database_url("adhoc_test") fixture = test_fixtures.AdHocDbFixture(url) fixture.setUp() self.assertEqual( enginefacade._context_manager._factory._writer_engine.url, url ) fixture.cleanUp() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/sqlalchemy/test_sqlalchemy.py0000664000175000017500000007564300000000000023700 0ustar00zuulzuul00000000000000# coding=utf-8 # Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for SQLAlchemy specific code.""" import logging import os from unittest import mock import fixtures from oslo_config import cfg import sqlalchemy from sqlalchemy.engine import base as base_engine from sqlalchemy import exc from sqlalchemy.pool import NullPool from sqlalchemy import sql from sqlalchemy import Column, MetaData, Table from sqlalchemy import Integer, String from sqlalchemy.orm import declarative_base from oslo_db import exception from oslo_db import options as db_options from oslo_db.sqlalchemy import compat from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import engines from oslo_db.sqlalchemy import models from oslo_db.sqlalchemy import session from oslo_db.sqlalchemy import utils from oslo_db.tests import base as test_base from oslo_db.tests.sqlalchemy import base as db_test_base BASE = declarative_base() _TABLE_NAME = '__tmp__test__tmp__' _REGEXP_TABLE_NAME = _TABLE_NAME + "regexp" class RegexpTable(BASE, models.ModelBase): __tablename__ = _REGEXP_TABLE_NAME id = Column(Integer, primary_key=True) bar = Column(String(255)) class RegexpFilterTestCase(db_test_base._DbTestCase): def setUp(self): super(RegexpFilterTestCase, self).setUp() meta = MetaData() test_table = Table(_REGEXP_TABLE_NAME, meta, Column('id', Integer, primary_key=True, nullable=False), Column('bar', String(255))) test_table.create(self.engine) self.addCleanup(test_table.drop, self.engine) def _test_regexp_filter(self, regexp, expected): with enginefacade.writer.using(db_test_base.context): _session = db_test_base.context.session for i in ['10', '20', '♥']: tbl = RegexpTable() tbl.update({'bar': i}) tbl.save(session=_session) regexp_op = RegexpTable.bar.op('REGEXP')(regexp) result = _session.query(RegexpTable).filter(regexp_op).all() self.assertEqual(expected, [r.bar for r in result]) def test_regexp_filter(self): self._test_regexp_filter('10', ['10']) def test_regexp_filter_nomatch(self): self._test_regexp_filter('11', []) def test_regexp_filter_unicode(self): self._test_regexp_filter('♥', ['♥']) def test_regexp_filter_unicode_nomatch(self): self._test_regexp_filter('♦', []) class SQLiteSavepointTest(db_test_base._DbTestCase): def setUp(self): super(SQLiteSavepointTest, self).setUp() meta = MetaData() self.test_table = Table( "test_table", meta, Column('id', Integer, primary_key=True), Column('data', String(10))) self.test_table.create(self.engine) self.addCleanup(self.test_table.drop, self.engine) def test_plain_transaction(self): conn = self.engine.connect() trans = conn.begin() conn.execute( self.test_table.insert(), {'data': 'data 1'} ) self.assertEqual( [(1, 'data 1')], conn.execute( self.test_table.select(). order_by(self.test_table.c.id) ).fetchall() ) trans.rollback() with self.engine.connect() as conn: self.assertEqual( 0, conn.scalar( sqlalchemy.select( sqlalchemy.func.count(self.test_table.c.id), ).select_from(self.test_table) ) ) def test_savepoint_middle(self): with self.engine.begin() as conn: conn.execute( self.test_table.insert(), {'data': 'data 1'} ) savepoint = conn.begin_nested() conn.execute( self.test_table.insert(), {'data': 'data 2'} ) savepoint.rollback() conn.execute( self.test_table.insert(), {'data': 'data 3'} ) self.assertEqual( [(1, 'data 1'), (2, 'data 3')], conn.execute( self.test_table.select(). order_by(self.test_table.c.id) ).fetchall() ) def test_savepoint_beginning(self): with self.engine.begin() as conn: savepoint = conn.begin_nested() conn.execute( self.test_table.insert(), {'data': 'data 1'} ) savepoint.rollback() conn.execute( self.test_table.insert(), {'data': 'data 2'} ) self.assertEqual( [(1, 'data 2')], conn.execute( self.test_table.select(). order_by(self.test_table.c.id) ).fetchall() ) class FakeDBAPIConnection(object): def cursor(self): return FakeCursor() class FakeCursor(object): def execute(self, sql): pass class FakeConnectionProxy(object): pass class FakeConnectionRec(object): pass class OperationalError(Exception): pass class ProgrammingError(Exception): pass class QueryParamTest(db_test_base._DbTestCase): def _fixture(self): from sqlalchemy import create_engine def _mock_create_engine(*arg, **kw): return create_engine("sqlite://") return mock.patch( "oslo_db.sqlalchemy.engines.sqlalchemy.create_engine", side_effect=_mock_create_engine) def _normalize_query_dict(self, qdict): # SQLAlchemy 1.4 returns url.query as: # immutabledict({k1: v1, k2: (v2a, v2b, ...), ...}) # that is with tuples not lists for multiparams return { k: list(v) if isinstance(v, tuple) else v for k, v in qdict.items() } def test_add_assorted_params(self): with self._fixture() as ce: engines.create_engine( "mysql+pymysql://foo:bar@bat", connection_parameters="foo=bar&bat=hoho&bat=param2") self.assertEqual( self._normalize_query_dict(ce.mock_calls[0][1][0].query), {'bat': ['hoho', 'param2'], 'foo': 'bar'} ) def test_add_no_params(self): with self._fixture() as ce: engines.create_engine( "mysql+pymysql://foo:bar@bat") self.assertEqual( ce.mock_calls[0][1][0].query, self._normalize_query_dict({}) ) def test_combine_params(self): with self._fixture() as ce: engines.create_engine( "mysql+pymysql://foo:bar@bat/" "?charset=utf8¶m_file=tripleo.cnf", connection_parameters="plugin=sqlalchemy_collectd&" "collectd_host=127.0.0.1&" "bind_host=192.168.1.5") self.assertEqual( self._normalize_query_dict(ce.mock_calls[0][1][0].query), { 'bind_host': '192.168.1.5', 'charset': 'utf8', 'collectd_host': '127.0.0.1', 'param_file': 'tripleo.cnf', 'plugin': 'sqlalchemy_collectd' } ) def test_combine_multi_params(self): with self._fixture() as ce: engines.create_engine( "mysql+pymysql://foo:bar@bat/" "?charset=utf8¶m_file=tripleo.cnf&plugin=connmon", connection_parameters="plugin=sqlalchemy_collectd&" "collectd_host=127.0.0.1&" "bind_host=192.168.1.5") self.assertEqual( self._normalize_query_dict(ce.mock_calls[0][1][0].query), { 'bind_host': '192.168.1.5', 'charset': 'utf8', 'collectd_host': '127.0.0.1', 'param_file': 'tripleo.cnf', 'plugin': ['connmon', 'sqlalchemy_collectd'] } ) class MySQLDefaultModeTestCase(db_test_base._MySQLOpportunisticTestCase): def test_default_is_traditional(self): with self.engine.connect() as conn: sql_mode = conn.execute( sql.text("SHOW VARIABLES LIKE 'sql_mode'") ).first()[1] self.assertIn("TRADITIONAL", sql_mode) class MySQLModeTestCase(db_test_base._MySQLOpportunisticTestCase): def __init__(self, *args, **kwargs): super(MySQLModeTestCase, self).__init__(*args, **kwargs) # By default, run in empty SQL mode. # Subclasses override this with specific modes. self.mysql_mode = '' def setUp(self): super(MySQLModeTestCase, self).setUp() mode_engine = session.create_engine( self.engine.url, mysql_sql_mode=self.mysql_mode) self.connection = mode_engine.connect() meta = MetaData() self.test_table = Table(_TABLE_NAME + "mode", meta, Column('id', Integer, primary_key=True), Column('bar', String(255))) with self.connection.begin(): self.test_table.create(self.connection) def cleanup(): with self.connection.begin(): self.test_table.drop(self.connection) self.connection.close() mode_engine.dispose() self.addCleanup(cleanup) def _test_string_too_long(self, value): with self.connection.begin(): self.connection.execute(self.test_table.insert(), {'bar': value}) result = self.connection.execute(self.test_table.select()) return result.fetchone().bar def test_string_too_long(self): value = 'a' * 512 # String is too long. # With no SQL mode set, this gets truncated. self.assertNotEqual(value, self._test_string_too_long(value)) class MySQLStrictAllTablesModeTestCase(MySQLModeTestCase): "Test data integrity enforcement in MySQL STRICT_ALL_TABLES mode." def __init__(self, *args, **kwargs): super(MySQLStrictAllTablesModeTestCase, self).__init__(*args, **kwargs) self.mysql_mode = 'STRICT_ALL_TABLES' def test_string_too_long(self): value = 'a' * 512 # String is too long. # With STRICT_ALL_TABLES or TRADITIONAL mode set, this is an error. self.assertRaises(exception.DBError, self._test_string_too_long, value) class MySQLTraditionalModeTestCase(MySQLStrictAllTablesModeTestCase): """Test data integrity enforcement in MySQL TRADITIONAL mode. Since TRADITIONAL includes STRICT_ALL_TABLES, this inherits all STRICT_ALL_TABLES mode tests. """ def __init__(self, *args, **kwargs): super(MySQLTraditionalModeTestCase, self).__init__(*args, **kwargs) self.mysql_mode = 'TRADITIONAL' class EngineFacadeTestCase(test_base.BaseTestCase): def setUp(self): super(EngineFacadeTestCase, self).setUp() self.facade = session.EngineFacade('sqlite://') def test_get_engine(self): eng1 = self.facade.get_engine() eng2 = self.facade.get_engine() self.assertIs(eng1, eng2) def test_get_session(self): ses1 = self.facade.get_session() ses2 = self.facade.get_session() self.assertIsNot(ses1, ses2) def test_get_session_arguments_override_default_settings(self): ses = self.facade.get_session(expire_on_commit=True) self.assertTrue(ses.expire_on_commit) @mock.patch('oslo_db.sqlalchemy.orm.get_maker') @mock.patch('oslo_db.sqlalchemy.engines.create_engine') def test_creation_from_config(self, create_engine, get_maker): conf = cfg.ConfigOpts() conf.register_opts(db_options.database_opts, group='database') overrides = { 'connection': 'sqlite:///:memory:', 'slave_connection': None, 'connection_debug': 100, 'max_pool_size': 10, 'mysql_sql_mode': 'TRADITIONAL', } for optname, optvalue in overrides.items(): conf.set_override(optname, optvalue, group='database') session.EngineFacade.from_config(conf, expire_on_commit=True) create_engine.assert_called_once_with( sql_connection='sqlite:///:memory:', connection_debug=100, max_pool_size=10, mysql_sql_mode='TRADITIONAL', mysql_wsrep_sync_wait=None, sqlite_fk=False, connection_recycle_time=mock.ANY, retry_interval=mock.ANY, max_retries=mock.ANY, max_overflow=mock.ANY, connection_trace=mock.ANY, sqlite_synchronous=mock.ANY, pool_timeout=mock.ANY, thread_checkin=mock.ANY, json_serializer=None, json_deserializer=None, connection_parameters='', logging_name=mock.ANY, ) get_maker.assert_called_once_with( engine=create_engine(), expire_on_commit=True, ) def test_slave_connection(self): paths = self.create_tempfiles([('db.master', ''), ('db.slave', '')], ext='') master_path = 'sqlite:///' + paths[0] slave_path = 'sqlite:///' + paths[1] facade = session.EngineFacade( sql_connection=master_path, slave_connection=slave_path ) master = facade.get_engine() self.assertEqual(master_path, str(master.url)) slave = facade.get_engine(use_slave=True) self.assertEqual(slave_path, str(slave.url)) master_session = facade.get_session() self.assertEqual(master_path, str(master_session.bind.url)) slave_session = facade.get_session(use_slave=True) self.assertEqual(slave_path, str(slave_session.bind.url)) def test_slave_connection_string_not_provided(self): master_path = 'sqlite:///' + self.create_tempfiles( [('db.master', '')], ext='')[0] facade = session.EngineFacade(sql_connection=master_path) master = facade.get_engine() slave = facade.get_engine(use_slave=True) self.assertIs(master, slave) self.assertEqual(master_path, str(master.url)) master_session = facade.get_session() self.assertEqual(master_path, str(master_session.bind.url)) slave_session = facade.get_session(use_slave=True) self.assertEqual(master_path, str(slave_session.bind.url)) class SQLiteConnectTest(test_base.BaseTestCase): def _fixture(self, **kw): return session.create_engine("sqlite://", **kw) def test_sqlite_fk_listener(self): engine = self._fixture(sqlite_fk=True) with engine.connect() as conn: self.assertEqual( 1, conn.execute( sql.text('pragma foreign_keys') ).scalars().first(), ) engine = self._fixture(sqlite_fk=False) with engine.connect() as conn: self.assertEqual( 0, conn.execute( sql.text('pragma foreign_keys') ).scalars().first(), ) def test_sqlite_synchronous_listener(self): engine = self._fixture() # "The default setting is synchronous=FULL." (e.g. 2) # http://www.sqlite.org/pragma.html#pragma_synchronous with engine.connect() as conn: self.assertEqual( 2, conn.execute(sql.text('pragma synchronous')).scalars().first(), ) engine = self._fixture(sqlite_synchronous=False) with engine.connect() as conn: self.assertEqual( 0, conn.execute(sql.text('pragma synchronous')).scalars().first(), ) class MysqlConnectTest(db_test_base._MySQLOpportunisticTestCase): def _fixture(self, sql_mode=None, mysql_wsrep_sync_wait=None): kw = {} if sql_mode is not None: kw["mysql_sql_mode"] = sql_mode if mysql_wsrep_sync_wait is not None: kw["mysql_wsrep_sync_wait"] = mysql_wsrep_sync_wait return session.create_engine(self.engine.url, **kw) def _assert_sql_mode(self, engine, sql_mode_present, sql_mode_non_present): with engine.connect() as conn: mode = conn.execute( sql.text("SHOW VARIABLES LIKE 'sql_mode'") ).fetchone()[1] self.assertIn( sql_mode_present, mode ) if sql_mode_non_present: self.assertNotIn( sql_mode_non_present, mode ) def test_mysql_wsrep_sync_wait_listener(self): with self.engine.connect() as conn: try: conn.execute( sql.text("show variables like '%wsrep_sync_wait%'") ).scalars(1).one() except exc.NoResultFound: self.skipTest("wsrep_sync_wait option is not available") engine = self._fixture() with engine.connect() as conn: self.assertEqual( "0", conn.execute( sql.text("show variables like '%wsrep_sync_wait%'") ).scalars(1).one(), ) for wsrep_val in (2, 1, 5): engine = self._fixture(mysql_wsrep_sync_wait=wsrep_val) with engine.connect() as conn: self.assertEqual( str(wsrep_val), conn.execute( sql.text("show variables like '%wsrep_sync_wait%'") ).scalars(1).one(), ) def test_set_mode_traditional(self): engine = self._fixture(sql_mode='TRADITIONAL') self._assert_sql_mode(engine, "TRADITIONAL", "ANSI") def test_set_mode_ansi(self): engine = self._fixture(sql_mode='ANSI') self._assert_sql_mode(engine, "ANSI", "TRADITIONAL") def test_set_mode_no_mode(self): # If _mysql_set_mode_callback is called with sql_mode=None, then # the SQL mode is NOT set on the connection. # get the GLOBAL sql_mode, not the @@SESSION, so that # we get what is configured for the MySQL database, as opposed # to what our own session.create_engine() has set it to. with self.engine.connect() as conn: expected = conn.execute( sql.text("SELECT @@GLOBAL.sql_mode") ).scalar() engine = self._fixture(sql_mode=None) self._assert_sql_mode(engine, expected, None) def test_fail_detect_mode(self): # If "SHOW VARIABLES LIKE 'sql_mode'" results in no row, then # we get a log indicating can't detect the mode. log = self.useFixture(fixtures.FakeLogger(level=logging.WARN)) mysql_conn = self.engine.raw_connection() self.addCleanup(mysql_conn.close) mysql_conn.detach() mysql_cursor = mysql_conn.cursor() def execute(statement, parameters=()): if "SHOW VARIABLES LIKE 'sql_mode'" in statement: statement = "SHOW VARIABLES LIKE 'i_dont_exist'" return mysql_cursor.execute(statement, parameters) test_engine = sqlalchemy.create_engine(self.engine.url, _initialize=False) with mock.patch.object( test_engine.pool, '_creator', mock.Mock( return_value=mock.Mock( cursor=mock.Mock( return_value=mock.Mock( execute=execute, fetchone=mysql_cursor.fetchone, fetchall=mysql_cursor.fetchall ) ) ) ) ): engines._init_events.dispatch_on_drivername("mysql")(test_engine) test_engine.raw_connection() self.assertIn('Unable to detect effective SQL mode', log.output) def test_logs_real_mode(self): # If "SHOW VARIABLES LIKE 'sql_mode'" results in a value, then # we get a log with the value. log = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) engine = self._fixture(sql_mode='TRADITIONAL') with engine.connect() as conn: actual_mode = conn.execute( sql.text("SHOW VARIABLES LIKE 'sql_mode'") ).fetchone()[1] self.assertIn('MySQL server mode set to %s' % actual_mode, log.output) def test_warning_when_not_traditional(self): # If "SHOW VARIABLES LIKE 'sql_mode'" results in a value that doesn't # include 'TRADITIONAL', then a warning is logged. log = self.useFixture(fixtures.FakeLogger(level=logging.WARN)) self._fixture(sql_mode='ANSI') self.assertIn("consider enabling TRADITIONAL or STRICT_ALL_TABLES", log.output) def test_no_warning_when_traditional(self): # If "SHOW VARIABLES LIKE 'sql_mode'" results in a value that includes # 'TRADITIONAL', then no warning is logged. log = self.useFixture(fixtures.FakeLogger(level=logging.WARN)) self._fixture(sql_mode='TRADITIONAL') self.assertNotIn("consider enabling TRADITIONAL or STRICT_ALL_TABLES", log.output) def test_no_warning_when_strict_all_tables(self): # If "SHOW VARIABLES LIKE 'sql_mode'" results in a value that includes # 'STRICT_ALL_TABLES', then no warning is logged. log = self.useFixture(fixtures.FakeLogger(level=logging.WARN)) self._fixture(sql_mode='TRADITIONAL') self.assertNotIn("consider enabling TRADITIONAL or STRICT_ALL_TABLES", log.output) class CreateEngineTest(test_base.BaseTestCase): """Test that dialect-specific arguments/ listeners are set up correctly. """ def setUp(self): super(CreateEngineTest, self).setUp() self.args = {'connect_args': {}} def test_queuepool_args(self): engines._init_connection_args( utils.make_url("mysql+pymysql://u:p@host/test"), self.args, {'max_pool_size': 10, 'max_overflow': 10}, ) self.assertEqual(10, self.args['pool_size']) self.assertEqual(10, self.args['max_overflow']) def test_sqlite_memory_pool_args(self): for _url in ("sqlite://", "sqlite:///:memory:"): engines._init_connection_args( utils.make_url(_url), self.args, {'max_pool_size': 10, 'max_overflow': 10}, ) # queuepool arguments are not present self.assertNotIn('pool_size', self.args) self.assertNotIn('max_overflow', self.args) self.assertEqual(False, self.args['connect_args']['check_same_thread']) # due to memory connection self.assertIn('poolclass', self.args) def test_sqlite_file_pool_args(self): engines._init_connection_args( utils.make_url("sqlite:///somefile.db"), self.args, {'max_pool_size': 10, 'max_overflow': 10}, ) # queuepool arguments are not peresnet self.assertNotIn('pool_size', self.args) self.assertNotIn( 'max_overflow', self.args) self.assertFalse(self.args['connect_args']) if not compat.sqla_2: # NullPool is the default for file based connections, # no need to specify this self.assertNotIn('poolclass', self.args) else: self.assertIs(self.args["poolclass"], NullPool) def _test_mysql_connect_args_default(self, connect_args): self.assertEqual({'charset': 'utf8', 'use_unicode': 1}, connect_args) def test_mysql_connect_args_default(self): engines._init_connection_args( utils.make_url("mysql://u:p@host/test"), self.args, {}) self._test_mysql_connect_args_default(self.args['connect_args']) def test_mysql_pymysql_connect_args_default(self): engines._init_connection_args( utils.make_url("mysql+pymysql://u:p@host/test"), self.args, {}) self.assertEqual({'charset': 'utf8'}, self.args['connect_args']) def test_mysql_mysqldb_connect_args_default(self): engines._init_connection_args( utils.make_url("mysql+mysqldb://u:p@host/test"), self.args, {}) self._test_mysql_connect_args_default(self.args['connect_args']) def test_postgresql_connect_args_default(self): engines._init_connection_args( utils.make_url("postgresql://u:p@host/test"), self.args, {}) self.assertEqual('utf8', self.args['client_encoding']) self.assertFalse(self.args['connect_args']) def test_mysqlconnector_raise_on_warnings_default(self): engines._init_connection_args( utils.make_url("mysql+mysqlconnector://u:p@host/test"), self.args, {}) self.assertEqual(False, self.args['connect_args']['raise_on_warnings']) def test_mysqlconnector_raise_on_warnings_override(self): engines._init_connection_args( utils.make_url( "mysql+mysqlconnector://u:p@host/test" "?raise_on_warnings=true"), self.args, {} ) self.assertNotIn('raise_on_warnings', self.args['connect_args']) def test_thread_checkin(self): with mock.patch("sqlalchemy.event.listens_for"): with mock.patch("sqlalchemy.event.listen") as listen_evt: engines._init_events.dispatch_on_drivername( "sqlite")(mock.Mock()) self.assertEqual( listen_evt.mock_calls[0][1][-1], engines._thread_yield ) def test_warn_on_missing_driver(self): warnings = mock.Mock() def warn_interpolate(msg, args): # test the interpolation itself to ensure the password # is concealed warnings.warning(msg % (args, )) with mock.patch( "oslo_db.sqlalchemy.engines.LOG.warning", warn_interpolate): engines._vet_url( utils.make_url("mysql://scott:tiger@some_host/some_db")) engines._vet_url(utils.make_url( "mysql+mysqldb://scott:tiger@some_host/some_db")) engines._vet_url(utils.make_url( "mysql+pymysql://scott:tiger@some_host/some_db")) engines._vet_url(utils.make_url( "postgresql+psycopg2://scott:tiger@some_host/some_db")) engines._vet_url(utils.make_url( "postgresql://scott:tiger@some_host/some_db")) self.assertEqual( [ mock.call.warning( "URL mysql://scott:***@some_host/some_db does not contain " "a '+drivername' portion, " "and will make use of a default driver. " "A full dbname+drivername:// protocol is recommended. " "For MySQL, it is strongly recommended that " "mysql+pymysql:// " "be specified for maximum service compatibility", ), mock.call.warning( "URL postgresql://scott:***@some_host/some_db does not " "contain a '+drivername' portion, " "and will make use of a default driver. " "A full dbname+drivername:// protocol is recommended." ) ], warnings.mock_calls ) class ProcessGuardTest(db_test_base._DbTestCase): def test_process_guard(self): self.engine.dispose() def get_parent_pid(): return 4 def get_child_pid(): return 5 with mock.patch("os.getpid", get_parent_pid): with self.engine.connect() as conn: dbapi_id = id(compat.driver_connection(conn)) with mock.patch("os.getpid", get_child_pid): with self.engine.connect() as conn: new_dbapi_id = id(compat.driver_connection(conn)) self.assertNotEqual(dbapi_id, new_dbapi_id) # ensure it doesn't trip again with mock.patch("os.getpid", get_child_pid): with self.engine.connect() as conn: newer_dbapi_id = id(compat.driver_connection(conn)) self.assertEqual(new_dbapi_id, newer_dbapi_id) class PatchStacktraceTest(db_test_base._DbTestCase): def test_trace(self): engine = self.engine # NOTE(viktors): The code in oslo_db.sqlalchemy.session filters out # lines from modules under oslo_db, so we should remove # "oslo_db/" from file path in traceback. import traceback orig_extract_stack = traceback.extract_stack def extract_stack(): return [(row[0].replace("oslo_db/", ""), row[1], row[2], row[3]) for row in orig_extract_stack()] with mock.patch("traceback.extract_stack", side_effect=extract_stack): engines._add_trace_comments(engine) conn = engine.connect() orig_do_exec = engine.dialect.do_execute with mock.patch.object(engine.dialect, "do_execute") as mock_exec: mock_exec.side_effect = orig_do_exec conn.execute(sql.text("select 1")) call = mock_exec.mock_calls[0] # we're the caller, see that we're in there caller = os.path.join("tests", "sqlalchemy", "test_sqlalchemy.py") self.assertIn(caller, call[1][1]) class MySQLConnectPingListenerTest(db_test_base._MySQLOpportunisticTestCase): def test__connect_ping_listener(self): for idx in range(2): with self.engine.begin() as conn: self.assertIsInstance(conn._transaction, base_engine.RootTransaction) # TODO(ralonsoh): drop this check once SQLAlchemy minimum # version is 2.0. if compat.sqla_2: engines._connect_ping_listener(conn) self.assertIsNone(conn._transaction) else: engines._connect_ping_listener(conn, False) self.assertIsInstance(conn._transaction, base_engine.RootTransaction) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/sqlalchemy/test_types.py0000664000175000017500000000710400000000000022665 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for JSON SQLAlchemy types.""" from sqlalchemy import Column, Integer from sqlalchemy.dialects import mysql from sqlalchemy.orm import declarative_base from oslo_db import exception as db_exc from oslo_db.sqlalchemy import models from oslo_db.sqlalchemy import types from oslo_db.tests.sqlalchemy import base as test_base BASE = declarative_base() class JsonTable(BASE, models.ModelBase): __tablename__ = 'test_json_types' id = Column(Integer, primary_key=True) jdict = Column(types.JsonEncodedDict) jlist = Column(types.JsonEncodedList) json = Column(types.JsonEncodedType) class JsonTypesTestCase(test_base._DbTestCase): def setUp(self): super(JsonTypesTestCase, self).setUp() JsonTable.__table__.create(self.engine) self.addCleanup(JsonTable.__table__.drop, self.engine) self.session = self.sessionmaker() self.addCleanup(self.session.close) def test_default_value(self): with self.session.begin(): JsonTable(id=1).save(self.session) obj = self.session.query(JsonTable).filter_by(id=1).one() self.assertEqual([], obj.jlist) self.assertEqual({}, obj.jdict) self.assertIsNone(obj.json) def test_dict(self): test = {'a': 42, 'b': [1, 2, 3]} with self.session.begin(): JsonTable(id=1, jdict=test).save(self.session) obj = self.session.query(JsonTable).filter_by(id=1).one() self.assertEqual(test, obj.jdict) def test_list(self): test = [1, True, "hello", {}] with self.session.begin(): JsonTable(id=1, jlist=test).save(self.session) obj = self.session.query(JsonTable).filter_by(id=1).one() self.assertEqual(test, obj.jlist) def test_dict_type_check(self): self.assertRaises(db_exc.DBError, JsonTable(id=1, jdict=[]).save, self.session) def test_list_type_check(self): self.assertRaises(db_exc.DBError, JsonTable(id=1, jlist={}).save, self.session) def test_generic(self): tested = [ "string", 42, True, None, [1, 2, 3], {'a': 'b'} ] for i, test in enumerate(tested): JsonTable(id=i, json=test).save(self.session) obj = self.session.query(JsonTable).filter_by(id=i).one() self.assertEqual(test, obj.json) def test_mysql_variants(self): self.assertEqual( "LONGTEXT", str( types.JsonEncodedDict(mysql_as_long=True).compile( dialect=mysql.dialect()) ) ) self.assertEqual( "MEDIUMTEXT", str( types.JsonEncodedDict(mysql_as_medium=True).compile( dialect=mysql.dialect()) ) ) self.assertRaises( TypeError, lambda: types.JsonEncodedDict( mysql_as_long=True, mysql_as_medium=True) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/sqlalchemy/test_update_match.py0000664000175000017500000003277700000000000024175 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import orm from sqlalchemy import schema from sqlalchemy import sql from sqlalchemy import types as sqltypes from oslo_db.sqlalchemy import update_match from oslo_db.tests import base as test_base from oslo_db.tests.sqlalchemy import base as db_test_base Base = orm.declarative_base() class MyModel(Base): __tablename__ = 'my_table' id = schema.Column(sqltypes.Integer, primary_key=True) uuid = schema.Column(sqltypes.String(36), nullable=False, unique=True) x = schema.Column(sqltypes.Integer) y = schema.Column(sqltypes.String(40)) z = schema.Column(sqltypes.String(40)) class ManufactureCriteriaTest(test_base.BaseTestCase): def test_instance_criteria_basic(self): specimen = MyModel( y='y1', z='z3', uuid='136254d5-3869-408f-9da7-190e0072641a' ) self.assertEqual( "my_table.uuid = :uuid_1 AND my_table.y = :y_1 " "AND my_table.z = :z_1", str(update_match.manufacture_entity_criteria(specimen).compile()) ) def test_instance_criteria_basic_wnone(self): specimen = MyModel( y='y1', z=None, uuid='136254d5-3869-408f-9da7-190e0072641a' ) self.assertEqual( "my_table.uuid = :uuid_1 AND my_table.y = :y_1 " "AND my_table.z IS NULL", str(update_match.manufacture_entity_criteria(specimen).compile()) ) def test_instance_criteria_tuples(self): specimen = MyModel( y='y1', z=('z1', 'z2'), ) self.assertRegex( str(update_match.manufacture_entity_criteria(specimen).compile()), r"my_table.y = :y_1 AND my_table.z IN \(.+?\)", ) def test_instance_criteria_tuples_wnone(self): specimen = MyModel( y='y1', z=('z1', 'z2', None), ) self.assertRegex( str(update_match.manufacture_entity_criteria(specimen).compile()), r"my_table.y = :y_1 AND \(my_table.z IS NULL OR " r"my_table.z IN \(.+?\)\)", ) def test_instance_criteria_none_list(self): specimen = MyModel( y='y1', z=[None], ) self.assertEqual( "my_table.y = :y_1 AND my_table.z IS NULL", str(update_match.manufacture_entity_criteria(specimen).compile()) ) class UpdateMatchTest(db_test_base._DbTestCase): def setUp(self): super(UpdateMatchTest, self).setUp() Base.metadata.create_all(self.engine) self.addCleanup(Base.metadata.drop_all, self.engine) # self.engine.echo = 'debug' self.session = self.sessionmaker(autocommit=False) self.addCleanup(self.session.close) self.session.add_all([ MyModel( id=1, uuid='23cb9224-9f8e-40fe-bd3c-e7577b7af37d', x=5, y='y1', z='z1'), MyModel( id=2, uuid='136254d5-3869-408f-9da7-190e0072641a', x=6, y='y1', z='z2'), MyModel( id=3, uuid='094eb162-d5df-494b-a458-a91a1b2d2c65', x=7, y='y1', z='z1'), MyModel( id=4, uuid='94659b3f-ea1f-4ffd-998d-93b28f7f5b70', x=8, y='y2', z='z2'), MyModel( id=5, uuid='bdf3893c-ee3c-40a0-bc79-960adb6cd1d4', x=8, y='y2', z=None), ]) self.session.commit() def _assert_row(self, pk, values): row = self.session.execute( sql.select(MyModel.__table__).where(MyModel.__table__.c.id == pk) ).first() values['id'] = pk self.assertEqual(values, dict(row._mapping)) def test_update_specimen_successful(self): uuid = '136254d5-3869-408f-9da7-190e0072641a' specimen = MyModel( y='y1', z='z2', uuid=uuid ) result = self.session.query(MyModel).update_on_match( specimen, 'uuid', values={'x': 9, 'z': 'z3'} ) self.assertEqual(uuid, result.uuid) self.assertEqual(2, result.id) self.assertEqual('z3', result.z) self.assertIn(result, self.session) self._assert_row( 2, { 'uuid': '136254d5-3869-408f-9da7-190e0072641a', 'x': 9, 'y': 'y1', 'z': 'z3' } ) def test_update_specimen_include_only(self): uuid = '136254d5-3869-408f-9da7-190e0072641a' specimen = MyModel( y='y9', z='z5', x=6, uuid=uuid ) # Query the object first to test that we merge when the object is # already cached in the session. self.session.query(MyModel).filter(MyModel.uuid == uuid).one() result = self.session.query(MyModel).update_on_match( specimen, 'uuid', values={'x': 9, 'z': 'z3'}, include_only=('x', ) ) self.assertEqual(uuid, result.uuid) self.assertEqual(2, result.id) self.assertEqual('z3', result.z) self.assertIn(result, self.session) self.assertNotIn(result, self.session.dirty) self._assert_row( 2, { 'uuid': '136254d5-3869-408f-9da7-190e0072641a', 'x': 9, 'y': 'y1', 'z': 'z3' } ) def test_update_specimen_no_rows(self): specimen = MyModel( y='y1', z='z3', uuid='136254d5-3869-408f-9da7-190e0072641a' ) exc = self.assertRaises( update_match.NoRowsMatched, self.session.query(MyModel).update_on_match, specimen, 'uuid', values={'x': 9, 'z': 'z3'} ) self.assertEqual("Zero rows matched for 3 attempts", exc.args[0]) def test_update_specimen_process_query_no_rows(self): specimen = MyModel( y='y1', z='z2', uuid='136254d5-3869-408f-9da7-190e0072641a' ) def process_query(query): return query.filter_by(x=10) exc = self.assertRaises( update_match.NoRowsMatched, self.session.query(MyModel).update_on_match, specimen, 'uuid', values={'x': 9, 'z': 'z3'}, process_query=process_query ) self.assertEqual("Zero rows matched for 3 attempts", exc.args[0]) def test_update_specimen_given_query_no_rows(self): specimen = MyModel( y='y1', z='z2', uuid='136254d5-3869-408f-9da7-190e0072641a' ) query = self.session.query(MyModel).filter_by(x=10) exc = self.assertRaises( update_match.NoRowsMatched, query.update_on_match, specimen, 'uuid', values={'x': 9, 'z': 'z3'}, ) self.assertEqual("Zero rows matched for 3 attempts", exc.args[0]) def test_update_specimen_multi_rows(self): specimen = MyModel( y='y1', z='z1', ) exc = self.assertRaises( update_match.MultiRowsMatched, self.session.query(MyModel).update_on_match, specimen, 'y', values={'x': 9, 'z': 'z3'} ) self.assertEqual("2 rows matched; expected one", exc.args[0]) def test_update_specimen_query_mismatch_error(self): specimen = MyModel( y='y1' ) q = self.session.query(MyModel.x, MyModel.y) exc = self.assertRaises( AssertionError, q.update_on_match, specimen, 'y', values={'x': 9, 'z': 'z3'}, ) self.assertEqual("Query does not match given specimen", exc.args[0]) def test_custom_handle_failure_raise_new(self): class MyException(Exception): pass def handle_failure(query): # ensure the query is usable result = query.count() self.assertEqual(0, result) raise MyException("test: %d" % result) specimen = MyModel( y='y1', z='z3', uuid='136254d5-3869-408f-9da7-190e0072641a' ) exc = self.assertRaises( MyException, self.session.query(MyModel).update_on_match, specimen, 'uuid', values={'x': 9, 'z': 'z3'}, handle_failure=handle_failure ) self.assertEqual("test: 0", exc.args[0]) def test_custom_handle_failure_cancel_raise(self): uuid = '136254d5-3869-408f-9da7-190e0072641a' class MyException(Exception): pass def handle_failure(query): # ensure the query is usable result = query.count() self.assertEqual(0, result) return True specimen = MyModel( id=2, y='y1', z='z3', uuid=uuid ) result = self.session.query(MyModel).update_on_match( specimen, 'uuid', values={'x': 9, 'z': 'z3'}, handle_failure=handle_failure ) self.assertEqual(uuid, result.uuid) self.assertEqual(2, result.id) self.assertEqual('z3', result.z) self.assertEqual(9, result.x) self.assertIn(result, self.session) def test_update_specimen_on_none_successful(self): uuid = 'bdf3893c-ee3c-40a0-bc79-960adb6cd1d4' specimen = MyModel( y='y2', z=None, uuid=uuid ) result = self.session.query(MyModel).update_on_match( specimen, 'uuid', values={'x': 9, 'z': 'z3'}, ) self.assertIn(result, self.session) self.assertEqual(uuid, result.uuid) self.assertEqual(5, result.id) self.assertEqual('z3', result.z) self._assert_row( 5, { 'uuid': 'bdf3893c-ee3c-40a0-bc79-960adb6cd1d4', 'x': 9, 'y': 'y2', 'z': 'z3' } ) def test_update_specimen_on_multiple_nonnone_successful(self): uuid = '094eb162-d5df-494b-a458-a91a1b2d2c65' specimen = MyModel( y=('y1', 'y2'), x=(5, 7), uuid=uuid ) result = self.session.query(MyModel).update_on_match( specimen, 'uuid', values={'x': 9, 'z': 'z3'}, ) self.assertIn(result, self.session) self.assertEqual(uuid, result.uuid) self.assertEqual(3, result.id) self.assertEqual('z3', result.z) self._assert_row( 3, { 'uuid': '094eb162-d5df-494b-a458-a91a1b2d2c65', 'x': 9, 'y': 'y1', 'z': 'z3' } ) def test_update_specimen_on_multiple_wnone_successful(self): uuid = 'bdf3893c-ee3c-40a0-bc79-960adb6cd1d4' specimen = MyModel( y=('y1', 'y2'), x=(8, 7), z=('z1', 'z2', None), uuid=uuid ) result = self.session.query(MyModel).update_on_match( specimen, 'uuid', values={'x': 9, 'z': 'z3'}, ) self.assertIn(result, self.session) self.assertEqual(uuid, result.uuid) self.assertEqual(5, result.id) self.assertEqual('z3', result.z) self._assert_row( 5, { 'uuid': 'bdf3893c-ee3c-40a0-bc79-960adb6cd1d4', 'x': 9, 'y': 'y2', 'z': 'z3' } ) def test_update_returning_pk_matched(self): pk = self.session.query(MyModel).\ filter_by(y='y1', z='z2').update_returning_pk( {'x': 9, 'z': 'z3'}, ('uuid', '136254d5-3869-408f-9da7-190e0072641a') ) self.assertEqual((2,), pk) self._assert_row( 2, { 'uuid': '136254d5-3869-408f-9da7-190e0072641a', 'x': 9, 'y': 'y1', 'z': 'z3' } ) def test_update_returning_wrong_uuid(self): exc = self.assertRaises( update_match.NoRowsMatched, self.session.query(MyModel). filter_by(y='y1', z='z2').update_returning_pk, {'x': 9, 'z': 'z3'}, ('uuid', '23cb9224-9f8e-40fe-bd3c-e7577b7af37d') ) self.assertEqual("No rows matched the UPDATE", exc.args[0]) def test_update_returning_no_rows(self): exc = self.assertRaises( update_match.NoRowsMatched, self.session.query(MyModel). filter_by(y='y1', z='z3').update_returning_pk, {'x': 9, 'z': 'z3'}, ('uuid', '136254d5-3869-408f-9da7-190e0072641a') ) self.assertEqual("No rows matched the UPDATE", exc.args[0]) def test_update_multiple_rows(self): exc = self.assertRaises( update_match.MultiRowsMatched, self.session.query(MyModel). filter_by(y='y1', z='z1').update_returning_pk, {'x': 9, 'z': 'z3'}, ('y', 'y1') ) self.assertEqual("2 rows matched; expected one", exc.args[0]) class PGUpdateMatchTest( UpdateMatchTest, db_test_base._PostgreSQLOpportunisticTestCase, ): pass class MySQLUpdateMatchTest( UpdateMatchTest, db_test_base._MySQLOpportunisticTestCase, ): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/sqlalchemy/test_utils.py0000664000175000017500000015064000000000000022665 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me). # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from urllib import parse import fixtures import sqlalchemy from sqlalchemy import Boolean, Index, Integer, DateTime, String from sqlalchemy import MetaData, Table, Column from sqlalchemy import ForeignKey, ForeignKeyConstraint from sqlalchemy.dialects.postgresql import psycopg2 from sqlalchemy.exc import OperationalError from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import column_property from sqlalchemy.orm import declarative_base from sqlalchemy.orm import registry from sqlalchemy.orm import Session from sqlalchemy import sql from sqlalchemy.sql.expression import cast from sqlalchemy.sql import select from sqlalchemy.types import UserDefinedType from oslo_db import exception from oslo_db.sqlalchemy import models from oslo_db.sqlalchemy import provision from oslo_db.sqlalchemy import session from oslo_db.sqlalchemy import utils from oslo_db.tests import base as test_base from oslo_db.tests.sqlalchemy import base as db_test_base Base = declarative_base() class TestSanitizeDbUrl(test_base.BaseTestCase): def test_url_with_cred(self): db_url = 'myproto://johndoe:secret@localhost/myschema' expected = 'myproto://****:****@localhost/myschema' actual = utils.sanitize_db_url(db_url) self.assertEqual(expected, actual) def test_url_with_no_cred(self): db_url = 'sqlite:///mysqlitefile' actual = utils.sanitize_db_url(db_url) self.assertEqual(db_url, actual) class CustomType(UserDefinedType): """Dummy column type for testing unsupported types.""" def get_col_spec(self): return "CustomType" class FakeTable(Base): __tablename__ = 'fake_table' user_id = Column(String(50), primary_key=True) project_id = Column(String(50)) snapshot_id = Column(String(50)) updated_at = Column(DateTime, nullable=True) enabled = Column(Boolean, default=True) _some_hybrid = column_property(project_id + snapshot_id) @hybrid_property def some_hybrid(self): raise NotImplementedError() @some_hybrid.expression def some_hybrid(cls): return cls._some_hybrid def foo(self): pass class FakeTableJoinedInh(FakeTable): __tablename__ = 'fake_table_inh' id = Column(String(50), ForeignKey('fake_table.user_id'), primary_key=True) class FakeTableSingleInh(FakeTable): __mapper_args__ = {'polymorphic_identity': 'foo'} class FakeTableWithMultipleKeys(Base): __tablename__ = 'fake_table_multiple_keys' key1 = Column(String(50), primary_key=True) key2 = Column(String(50), primary_key=True) key3 = Column(String(50)) class FakeTableWithIndexes(Base): __tablename__ = 'fake_table_unique_index' id = Column(String(50), primary_key=True) key1 = Column(String(50)) key2 = Column(String(50)) key3 = Column(String(50)) __table_args__ = ( Index('idx_unique', 'key1', 'key2', unique=True), Index('idx_unique', 'key1', 'key3', unique=False), ) class FakeTableClassicalyMapped(object): pass fake_table = Table( 'fake_table_classically_mapped', Base.metadata, Column('id', Integer, primary_key=True), Column('key', String(50)) ) reg = registry() reg.map_imperatively(FakeTableClassicalyMapped, fake_table) class FakeModel(object): def __init__(self, values): self.values = values def __getattr__(self, name): try: value = self.values[name] except KeyError: raise AttributeError(name) return value def __getitem__(self, key): if key in self.values: return self.values[key] else: raise NotImplementedError() def __repr__(self): return '' % self.values class TestPaginateQuery(test_base.BaseTestCase): def setUp(self): super(TestPaginateQuery, self).setUp() self.query = mock.Mock() self.mock_asc = self.useFixture( fixtures.MockPatchObject(sqlalchemy, 'asc')).mock self.mock_desc = self.useFixture( fixtures.MockPatchObject(sqlalchemy, 'desc')).mock self.marker = FakeTable(user_id='user', project_id='p', snapshot_id='s', updated_at=None) self.model = FakeTable def test_paginate_query_no_pagination_no_sort_dirs(self): self.query.order_by.return_value = self.query self.mock_asc.side_effect = [ 'asc_3', 'asc_2', 'asc_1' ] utils.paginate_query(self.query, self.model, 5, ['user_id', 'project_id', 'snapshot_id']) self.mock_asc.assert_has_calls([ mock.call(self.model.user_id), mock.call(self.model.project_id), mock.call(self.model.snapshot_id), ]) self.query.order_by.assert_has_calls([ mock.call('asc_3'), mock.call('asc_2'), mock.call('asc_1'), ]) self.query.limit.assert_called_once_with(5) def test_paginate_query_no_pagination(self): self.query.order_by.return_value = self.query self.mock_asc.side_effect = ['asc'] self.mock_desc.side_effect = ['desc'] utils.paginate_query(self.query, self.model, 5, ['user_id', 'project_id'], sort_dirs=['asc', 'desc']) self.mock_asc.assert_called_once_with(self.model.user_id) self.mock_desc.assert_called_once_with(self.model.project_id) self.query.order_by.assert_has_calls([ mock.call('asc'), mock.call('desc'), ]) self.query.limit.assert_called_once_with(5) def test_invalid_sort_key_str(self): self.assertEqual("Sort key supplied is invalid: None", str(exception.InvalidSortKey())) self.assertEqual("Sort key supplied is invalid: lol", str(exception.InvalidSortKey("lol"))) def test_invalid_unicode_paramater_str(self): self.assertEqual( "Invalid Parameter: Encoding directive wasn't provided.", str(exception.DBInvalidUnicodeParameter())) def test_paginate_query_attribute_error(self): self.mock_asc.return_value = 'asc' self.assertRaises(exception.InvalidSortKey, utils.paginate_query, self.query, self.model, 5, ['user_id', 'non-existent key']) self.mock_asc.assert_called_once_with(self.model.user_id) self.query.order_by.assert_called_once_with('asc') def test_paginate_query_attribute_error_invalid_sortkey(self): self.assertRaises(exception.InvalidSortKey, utils.paginate_query, self.query, self.model, 5, ['bad_user_id']) def test_paginate_query_attribute_error_invalid_sortkey_2(self): self.assertRaises(exception.InvalidSortKey, utils.paginate_query, self.query, self.model, 5, ['foo']) def test_paginate_query_attribute_error_invalid_sortkey_3(self): self.assertRaises(exception.InvalidSortKey, utils.paginate_query, self.query, self.model, 5, ['asc-nullinvalid']) def test_paginate_query_assertion_error(self): self.assertRaises(AssertionError, utils.paginate_query, self.query, self.model, 5, ['user_id'], marker=self.marker, sort_dir='asc', sort_dirs=['asc']) def test_paginate_query_assertion_error_2(self): self.assertRaises(AssertionError, utils.paginate_query, self.query, self.model, 5, ['user_id'], marker=self.marker, sort_dir=None, sort_dirs=['asc', 'desk']) @mock.patch.object(sqlalchemy.sql, 'and_') @mock.patch.object(sqlalchemy.sql, 'or_') def test_paginate_query(self, mock_or, mock_and): self.query.order_by.return_value = self.query self.query.filter.return_value = self.query self.mock_asc.return_value = 'asc_1' self.mock_desc.return_value = 'desc_1' mock_and.side_effect = ['some_crit', 'another_crit'] mock_or.return_value = 'some_f' utils.paginate_query(self.query, self.model, 5, ['user_id', 'project_id'], marker=self.marker, sort_dirs=['asc', 'desc']) self.mock_asc.assert_called_once_with(self.model.user_id) self.mock_desc.assert_called_once_with(self.model.project_id) self.query.order_by.assert_has_calls([ mock.call('asc_1'), mock.call('desc_1'), ]) mock_and.assert_has_calls([ mock.call(mock.ANY), mock.call(mock.ANY, mock.ANY) ]) mock_or.assert_called_once_with('some_crit', 'another_crit') self.query.filter.assert_called_once_with('some_f') self.query.limit.assert_called_once_with(5) @mock.patch.object(sqlalchemy.sql, 'and_') @mock.patch.object(sqlalchemy.sql, 'or_') def test_paginate_query_null(self, mock_or, mock_and): self.query.order_by.return_value = self.query self.query.filter.return_value = self.query self.mock_desc.side_effect = [ 'asc_null_2', 'desc_null_2', 'desc_1', ] self.mock_asc.side_effect = [ 'asc_1' ] mock_or.side_effect = [ 'or_1', 'or_2', 'some_f', ] mock_and.side_effect = [ 'some_crit', 'another_crit', ] with mock.patch.object( self.model.user_id.comparator.expression, 'is_not' ) as mock_is_not, \ mock.patch.object( self.model.user_id.comparator.expression, 'is_' ) as mock_is_a, \ mock.patch.object( self.model.project_id.comparator.expression, 'is_' ) as mock_is_b: mock_is_not.return_value = 'asc_null_1' mock_is_a.side_effect = [ 'desc_null_filter_1', 'desc_null_filter_2', ] mock_is_b.side_effect = [ 'desc_null_1', 'asc_null_filter', ] utils.paginate_query(self.query, self.model, 5, ['user_id', 'project_id'], marker=self.marker, sort_dirs=[ 'asc-nullslast', 'desc-nullsfirst']) mock_is_not.assert_called_once_with(None) mock_is_a.assert_has_calls([ mock.call(None), mock.call(None), ]) mock_is_b.assert_has_calls([ mock.call(None), mock.call(None), ]) self.mock_desc.assert_has_calls([ mock.call('asc_null_1'), mock.call('desc_null_1'), mock.call(self.model.project_id), ]) self.mock_asc.assert_has_calls([ mock.call(self.model.user_id), ]) mock_or.assert_has_calls([ mock.call(mock.ANY, 'desc_null_filter_2'), mock.call(mock.ANY, 'asc_null_filter'), mock.call('some_crit', 'another_crit'), ]) mock_and.assert_has_calls([ mock.call('or_1'), mock.call(mock.ANY, 'or_2'), ]) self.query.order_by.assert_has_calls([ mock.call('asc_null_2'), mock.call('asc_1'), mock.call('desc_null_2'), mock.call('desc_1'), ]) self.query.filter.assert_called_once_with('some_f') self.query.limit.assert_called_once_with(5) @mock.patch.object(sqlalchemy.sql, 'and_') @mock.patch.object(sqlalchemy.sql, 'or_') def test_paginate_query_marker_null(self, mock_or, mock_and): self.mock_asc.side_effect = [ 'asc_1' ] self.mock_desc.side_effect = [ 'asc_null_2', 'desc_null_2', 'desc_1', ] self.query.order_by.return_value = self.query self.query.filter.return_value = self.query mock_and.return_value = 'some_crit' mock_or.side_effect = ['or_1', 'some_f'] with mock.patch.object( self.model.user_id.comparator.expression, 'is_not' ) as mock_is_not, \ mock.patch.object( self.model.updated_at.comparator.expression, 'is_' ) as mock_is_a, \ mock.patch.object( self.model.user_id.comparator.expression, 'is_' ) as mock_is_b: mock_is_not.return_value = 'asc_null_1' mock_is_a.return_value = 'desc_null_1' mock_is_b.side_effect = ['asc_null_filter_1', 'asc_null_filter_2'] utils.paginate_query(self.query, self.model, 5, ['user_id', 'updated_at'], marker=self.marker, sort_dirs=[ 'asc-nullslast', 'desc-nullsfirst']) mock_is_not.assert_called_once_with(None) mock_is_a.assert_called_once_with(None) mock_is_b.assert_has_calls([mock.call(None), mock.call(None)]) self.mock_asc.assert_called_once_with(self.model.user_id) self.mock_desc.assert_has_calls([ mock.call('asc_null_1'), mock.call('desc_null_1'), mock.call(self.model.updated_at), ]) mock_and.assert_called_once_with('or_1') mock_or.assert_has_calls([ mock.call(mock.ANY, 'asc_null_filter_2'), mock.call('some_crit'), ]) self.query.order_by.assert_has_calls([ mock.call('asc_null_2'), mock.call('asc_1'), mock.call('desc_null_2'), mock.call('desc_1'), ]) self.query.filter.assert_called_once_with('some_f') self.query.limit.assert_called_once_with(5) @mock.patch.object(sqlalchemy.sql, 'and_') @mock.patch.object(sqlalchemy.sql, 'or_') def test_paginate_query_marker_null_with_two_primary_keys( self, mock_or, mock_and): self.mock_asc.return_value = 'asc_1' self.mock_desc.side_effect = [ 'asc_null_2', 'desc_null_2', 'desc_1', 'desc_null_4', 'desc_4', ] self.query.order_by.return_value = self.query mock_or.side_effect = [ 'or_1', 'or_2', 'some_f', ] mock_and.side_effect = [ 'some_crit', 'other_crit', ] self.query.filter.return_value = self.query with mock.patch.object( self.model.user_id.comparator.expression, 'is_not' ) as mock_is_not, \ mock.patch.object( self.model.updated_at.comparator.expression, 'is_' ) as mock_is_a, \ mock.patch.object( self.model.user_id.comparator.expression, 'is_' ) as mock_is_b, \ mock.patch.object( self.model.project_id.comparator.expression, 'is_' ) as mock_is_c: mock_is_not.return_value = 'asc_null_1' mock_is_a.return_value = 'desc_null_1' mock_is_b.side_effect = ['asc_null_filter_1', 'asc_null_filter_2'] mock_is_c.side_effect = ['desc_null_3', 'desc_null_filter_3'] utils.paginate_query(self.query, self.model, 5, ['user_id', 'updated_at', 'project_id'], marker=self.marker, sort_dirs=['asc-nullslast', 'desc-nullsfirst', 'desc-nullsfirst']) mock_is_not.assert_called_once_with(None) mock_is_a.assert_called_once_with(None) mock_is_b.assert_has_calls([mock.call(None), mock.call(None)]) mock_is_c.assert_has_calls([mock.call(None), mock.call(None)]) self.mock_asc.assert_called_once_with(self.model.user_id) self.mock_desc.assert_has_calls([ mock.call('asc_null_1'), mock.call('desc_null_1'), mock.call(self.model.updated_at), mock.call('desc_null_3'), mock.call(self.model.project_id), ]) self.query.order_by.assert_has_calls([ mock.call('asc_null_2'), mock.call('asc_1'), mock.call('desc_null_2'), mock.call('desc_1'), mock.call('desc_null_4'), mock.call('desc_4'), ]) mock_or.assert_has_calls([ mock.call(mock.ANY, 'asc_null_filter_2'), mock.call(mock.ANY, 'desc_null_filter_3'), mock.call('some_crit', 'other_crit'), ]) mock_and.assert_has_calls([ mock.call('or_1'), mock.call(mock.ANY, 'or_2'), ]) self.query.filter.assert_called_once_with('some_f') self.query.limit.assert_called_once_with(5) def test_paginate_query_value_error(self): self.mock_asc.return_value = 'asc_1' self.query.order_by.return_value = self.query self.assertRaises(ValueError, utils.paginate_query, self.query, self.model, 5, ['user_id', 'project_id'], marker=self.marker, sort_dirs=['asc', 'mixed']) self.mock_asc.assert_called_once_with(self.model.user_id) self.query.order_by.assert_called_once_with('asc_1') def test_paginate_on_hybrid(self): self.mock_asc.return_value = 'asc_1' self.mock_desc.return_value = 'desc_1' self.query.order_by.return_value = self.query utils.paginate_query(self.query, self.model, 5, ['user_id', 'some_hybrid'], sort_dirs=['asc', 'desc']) self.mock_asc.assert_called_once_with(self.model.user_id) self.mock_desc.assert_called_once_with(self.model.some_hybrid) self.query.order_by.assert_has_calls([ mock.call('asc_1'), mock.call('desc_1'), ]) self.query.limit.assert_called_once_with(5) class Test_UnstableSortingOrder(test_base.BaseTestCase): def test_multiple_primary_keys_stable(self): self.assertTrue( utils._stable_sorting_order( FakeTableWithMultipleKeys, ['key1', 'key2'])) def test_classically_mapped_primary_keys_stable(self): self.assertTrue( utils._stable_sorting_order(FakeTableClassicalyMapped, ['id'])) def test_multiple_primary_keys_unstable(self): self.assertFalse( utils._stable_sorting_order( FakeTableWithMultipleKeys, ['key1', 'key3'])) def test_joined_inh_stable(self): self.assertTrue( utils._stable_sorting_order(FakeTableJoinedInh, ['user_id']) ) def test_single_inh_stable(self): self.assertTrue( utils._stable_sorting_order(FakeTableSingleInh, ['user_id']) ) def test_unknown_primary_keys_stable(self): self.assertIsNone( utils._stable_sorting_order(object, ['key1', 'key2'])) def test_unique_index_stable(self): self.assertTrue( utils._stable_sorting_order( FakeTableWithIndexes, ['key1', 'key2'])) def test_unique_index_unstable(self): self.assertFalse( utils._stable_sorting_order( FakeTableWithIndexes, ['key1', 'key3'])) class TestGetUniqueKeys(test_base.BaseTestCase): def test_multiple_primary_keys(self): self.assertEqual( [{'key1', 'key2'}], utils.get_unique_keys(FakeTableWithMultipleKeys)) def test_unique_index(self): self.assertEqual( [{'id'}, {'key1', 'key2'}], utils.get_unique_keys(FakeTableWithIndexes)) def test_unknown_primary_keys(self): self.assertIsNone(utils.get_unique_keys(object)) def test_cache(self): class CacheTable(object): info = {} constraints_called = 0 indexes_called = 0 @property def constraints(self): self.constraints_called += 1 return [] @property def indexes(self): self.indexes_called += 1 return [] class CacheModel(object): pass table = CacheTable() mapper_mock = mock.Mock(mapped_table=table, local_table=table) mapper_mock.base_mapper = mapper_mock mock_inspect = mock.Mock( return_value=mapper_mock) model = CacheModel() self.assertNotIn('oslodb_unique_keys', CacheTable.info) with mock.patch("oslo_db.sqlalchemy.utils.inspect", mock_inspect): utils.get_unique_keys(model) self.assertIn('oslodb_unique_keys', CacheTable.info) self.assertEqual(1, table.constraints_called) self.assertEqual(1, table.indexes_called) for i in range(10): utils.get_unique_keys(model) self.assertEqual(1, table.constraints_called) self.assertEqual(1, table.indexes_called) class TestPaginateQueryActualSQL(test_base.BaseTestCase): def test_paginate_with_boolean_sort(self): s = Session() q = s.query(FakeTable) q = utils.paginate_query(q, FakeTable, 5, ['enabled'], sort_dirs=['asc'], marker=FakeTable(user_id='hello', enabled=False)) expected_core_sql = ( select(FakeTable). order_by(sqlalchemy.asc(FakeTable.enabled)). where(cast(FakeTable.enabled, Integer) > 0). limit(5) ) self.assertEqual( str(expected_core_sql.compile()), str(q.statement.compile()) ) def test_paginate_on_hybrid_assert_stmt(self): s = Session() q = s.query(FakeTable) q = utils.paginate_query( q, FakeTable, 5, ['user_id', 'some_hybrid'], sort_dirs=['asc', 'desc']) expected_core_sql = ( select(FakeTable). order_by(sqlalchemy.asc(FakeTable.user_id)). order_by(sqlalchemy.desc(FakeTable.some_hybrid)). limit(5) ) self.assertEqual( str(expected_core_sql.compile()), str(q.statement.compile()) ) class TestMigrationUtils(db_test_base._DbTestCase): """Class for testing utils that are used in db migrations.""" def setUp(self): super(TestMigrationUtils, self).setUp() self.meta = MetaData() self.conn = self.engine.connect() # self.conn would be better here but does not work right now self.addCleanup(self.meta.drop_all, self.engine) self.addCleanup(self.conn.close) def _populate_db_for_drop_duplicate_entries(self, engine, meta, table_name): values = [ {'id': 11, 'a': 3, 'b': 10, 'c': 'abcdef'}, {'id': 12, 'a': 5, 'b': 10, 'c': 'abcdef'}, {'id': 13, 'a': 6, 'b': 10, 'c': 'abcdef'}, {'id': 14, 'a': 7, 'b': 10, 'c': 'abcdef'}, {'id': 21, 'a': 1, 'b': 20, 'c': 'aa'}, {'id': 31, 'a': 1, 'b': 20, 'c': 'bb'}, {'id': 41, 'a': 1, 'b': 30, 'c': 'aef'}, {'id': 42, 'a': 2, 'b': 30, 'c': 'aef'}, {'id': 43, 'a': 3, 'b': 30, 'c': 'aef'} ] test_table = Table(table_name, meta, Column('id', Integer, primary_key=True, nullable=False), Column('a', Integer), Column('b', Integer), Column('c', String(255)), Column('deleted', Integer, default=0), Column('deleted_at', DateTime), Column('updated_at', DateTime)) test_table.create(engine) with engine.connect() as conn, conn.begin(): conn.execute(test_table.insert(), values) return test_table, values def test_drop_old_duplicate_entries_from_table(self): table_name = "__test_tmp_table__" test_table, values = self._populate_db_for_drop_duplicate_entries( self.engine, self.meta, table_name) utils.drop_old_duplicate_entries_from_table( self.engine, table_name, False, 'b', 'c') uniq_values = set() expected_ids = [] for value in sorted(values, key=lambda x: x['id'], reverse=True): uniq_value = (('b', value['b']), ('c', value['c'])) if uniq_value in uniq_values: continue uniq_values.add(uniq_value) expected_ids.append(value['id']) with self.engine.connect() as conn, conn.begin(): real_ids = [ row[0] for row in conn.execute(select(test_table.c.id)).fetchall() ] self.assertEqual(len(expected_ids), len(real_ids)) for id_ in expected_ids: self.assertIn(id_, real_ids) def test_drop_dup_entries_in_file_conn(self): table_name = "__test_tmp_table__" tmp_db_file = self.create_tempfiles([['name', '']], ext='.sql')[0] in_file_engine = session.EngineFacade( 'sqlite:///%s' % tmp_db_file).get_engine() meta = MetaData() test_table, values = self._populate_db_for_drop_duplicate_entries( in_file_engine, meta, table_name) utils.drop_old_duplicate_entries_from_table( in_file_engine, table_name, False, 'b', 'c') def test_drop_old_duplicate_entries_from_table_soft_delete(self): table_name = "__test_tmp_table__" table, values = self._populate_db_for_drop_duplicate_entries( self.engine, self.meta, table_name) utils.drop_old_duplicate_entries_from_table(self.engine, table_name, True, 'b', 'c') uniq_values = set() expected_values = [] soft_deleted_values = [] for value in sorted(values, key=lambda x: x['id'], reverse=True): uniq_value = (('b', value['b']), ('c', value['c'])) if uniq_value in uniq_values: soft_deleted_values.append(value) continue uniq_values.add(uniq_value) expected_values.append(value) base_select = table.select() with self.engine.connect() as conn, conn.begin(): rows_select = base_select.where(table.c.deleted != table.c.id) row_ids = [ row.id for row in conn.execute(rows_select).fetchall() ] self.assertEqual(len(expected_values), len(row_ids)) for value in expected_values: self.assertIn(value['id'], row_ids) deleted_rows_select = base_select.where( table.c.deleted == table.c.id) deleted_rows_ids = [ row.id for row in conn.execute(deleted_rows_select).fetchall() ] self.assertEqual(len(values) - len(row_ids), len(deleted_rows_ids)) for value in soft_deleted_values: self.assertIn(value['id'], deleted_rows_ids) def test_get_foreign_key_constraint_name(self): table_1 = Table('table_name_1', self.meta, Column('id', Integer, primary_key=True), Column('deleted', Integer)) table_2 = Table('table_name_2', self.meta, Column('id', Integer, primary_key=True), Column('foreign_id', Integer), ForeignKeyConstraint(['foreign_id'], ['table_name_1.id'], name='table_name_2_fk1'), Column('deleted', Integer)) self.meta.create_all(self.engine, tables=[table_1, table_2]) fkc = utils.get_foreign_key_constraint_name(self.engine, 'table_name_2', 'foreign_id') self.assertEqual(fkc, 'table_name_2_fk1') class PostgresqlTestMigrations(TestMigrationUtils, db_test_base._PostgreSQLOpportunisticTestCase): """Test migrations on PostgreSQL.""" pass class MySQLTestMigrations(TestMigrationUtils, db_test_base._MySQLOpportunisticTestCase): """Test migrations on MySQL.""" pass class TestConnectionUtils(test_base.BaseTestCase): def setUp(self): super(TestConnectionUtils, self).setUp() self.full_credentials = {'backend': 'postgresql+psycopg2', 'database': 'test', 'user': 'dude', 'passwd': 'pass'} self.connect_string = 'postgresql+psycopg2://dude:pass@localhost/test' # NOTE(rpodolyaka): mock the dialect parts, so that we don't depend # on psycopg2 (or any other DBAPI implementation) in these tests @classmethod def fake_dbapi(cls): return mock.MagicMock() class OurDialect(psycopg2.PGDialect_psycopg2): def dbapi(self): return fake_dbapi def import_dbapi(self): return fake_dbapi patch_dbapi = mock.patch.object( psycopg2, "PGDialect_psycopg2", new=OurDialect, ) patch_dbapi.start() self.addCleanup(patch_dbapi.stop) patch_onconnect = mock.patch.object(psycopg2.PGDialect_psycopg2, 'on_connect') patch_onconnect.start() self.addCleanup(patch_onconnect.stop) def test_ensure_backend_available(self): with mock.patch.object( sqlalchemy.engine.base.Engine, 'connect') as mock_connect: fake_connection = mock.Mock() mock_connect.return_value = fake_connection eng = provision.Backend._ensure_backend_available( self.connect_string) self.assertIsInstance(eng, sqlalchemy.engine.base.Engine) self.assertEqual(utils.make_url(self.connect_string), eng.url) mock_connect.assert_called_once() fake_connection.close.assert_called_once() def test_ensure_backend_available_no_connection_raises(self): log = self.useFixture(fixtures.FakeLogger()) err = OperationalError("Can't connect to database", None, None) with mock.patch.object( sqlalchemy.engine.base.Engine, 'connect') as mock_connect: mock_connect.side_effect = err exc = self.assertRaises( exception.BackendNotAvailable, provision.Backend._ensure_backend_available, self.connect_string) self.assertEqual( "Backend 'postgresql+psycopg2' is unavailable: " "Could not connect", str(exc)) self.assertEqual( "The postgresql+psycopg2 backend is unavailable: %s" % err, log.output.strip()) def test_ensure_backend_available_no_dbapi_raises(self): log = self.useFixture(fixtures.FakeLogger()) with mock.patch.object(sqlalchemy, 'create_engine') as mock_create: mock_create.side_effect = ImportError( "Can't import DBAPI module foobar") exc = self.assertRaises( exception.BackendNotAvailable, provision.Backend._ensure_backend_available, self.connect_string) mock_create.assert_called_once_with( utils.make_url(self.connect_string)) self.assertEqual( "Backend 'postgresql+psycopg2' is unavailable: " "No DBAPI installed", str(exc)) self.assertEqual( "The postgresql+psycopg2 backend is unavailable: Can't import " "DBAPI module foobar", log.output.strip()) def test_get_db_connection_info(self): conn_pieces = parse.urlparse(self.connect_string) self.assertEqual(('dude', 'pass', 'test', 'localhost'), utils.get_db_connection_info(conn_pieces)) class MyModelSoftDeletedProjectId(declarative_base(), models.ModelBase, models.SoftDeleteMixin): __tablename__ = 'soft_deleted_project_id_test_model' id = Column(Integer, primary_key=True) project_id = Column(Integer) class MyModel(declarative_base(), models.ModelBase): __tablename__ = 'test_model' id = Column(Integer, primary_key=True) class MyModelSoftDeleted(declarative_base(), models.ModelBase, models.SoftDeleteMixin): __tablename__ = 'soft_deleted_test_model' id = Column(Integer, primary_key=True) class TestModelQuery(test_base.BaseTestCase): def setUp(self): super(TestModelQuery, self).setUp() self.session = mock.MagicMock() self.session.query.return_value = self.session.query self.session.query.filter.return_value = self.session.query def test_wrong_model(self): self.assertRaises(TypeError, utils.model_query, FakeModel, session=self.session) def test_no_soft_deleted(self): self.assertRaises(ValueError, utils.model_query, MyModel, session=self.session, deleted=True) def test_deleted_false(self): mock_query = utils.model_query( MyModelSoftDeleted, session=self.session, deleted=False) deleted_filter = mock_query.filter.call_args[0][0] self.assertEqual('soft_deleted_test_model.deleted = :deleted_1', str(deleted_filter)) self.assertEqual(deleted_filter.right.value, MyModelSoftDeleted.__mapper__.c.deleted.default.arg) def test_deleted_true(self): mock_query = utils.model_query( MyModelSoftDeleted, session=self.session, deleted=True) deleted_filter = mock_query.filter.call_args[0][0] self.assertEqual(str(deleted_filter), 'soft_deleted_test_model.deleted != :deleted_1') self.assertEqual(deleted_filter.right.value, MyModelSoftDeleted.__mapper__.c.deleted.default.arg) @mock.patch.object(utils, "_read_deleted_filter") def test_no_deleted_value(self, _read_deleted_filter): utils.model_query(MyModelSoftDeleted, session=self.session) self.assertEqual(0, _read_deleted_filter.call_count) def test_project_filter(self): project_id = 10 mock_query = utils.model_query( MyModelSoftDeletedProjectId, session=self.session, project_only=True, project_id=project_id) deleted_filter = mock_query.filter.call_args[0][0] self.assertEqual( 'soft_deleted_project_id_test_model.project_id = :project_id_1', str(deleted_filter)) self.assertEqual(project_id, deleted_filter.right.value) def test_project_filter_wrong_model(self): self.assertRaises(ValueError, utils.model_query, MyModelSoftDeleted, session=self.session, project_id=10) def test_project_filter_allow_none(self): mock_query = utils.model_query( MyModelSoftDeletedProjectId, session=self.session, project_id=(10, None)) self.assertEqual( 'soft_deleted_project_id_test_model.project_id' ' IN (:project_id_1, NULL)', str(mock_query.filter.call_args[0][0]) ) def test_model_query_common(self): utils.model_query(MyModel, args=(MyModel.id,), session=self.session) self.session.query.assert_called_with(MyModel.id) class TestUtils(db_test_base._DbTestCase): def setUp(self): super(TestUtils, self).setUp() meta = MetaData() self.test_table = Table( 'test_table', meta, Column('a', Integer), Column('b', Integer) ) self.test_table.create(self.engine) self.addCleanup(meta.drop_all, self.engine) def test_get_indexes(self): Index('index_a', self.test_table.c.a).create(self.engine) Index('index_b', self.test_table.c.b).create(self.engine) indexes = utils.get_indexes(self.engine, "test_table") indexes = [(index['name'], index['column_names']) for index in indexes] self.assertIn(('index_a', ['a']), indexes) self.assertIn(('index_b', ['b']), indexes) def test_index_exists(self): self.assertFalse(utils.index_exists(self.engine, 'test_table', 'new_index')) Index('new_index', self.test_table.c.a).create(self.engine) self.assertTrue(utils.index_exists(self.engine, 'test_table', 'new_index')) def test_index_exists_on_columns(self): columns = [self.test_table.c.a, self.test_table.c.b] Index('new_index', *columns).create(self.engine) self.assertTrue(utils.index_exists_on_columns(self.engine, 'test_table', ('a', 'b'))) def test_add_index(self): self.assertFalse(utils.index_exists(self.engine, 'test_table', 'new_index')) utils.add_index(self.engine, 'test_table', 'new_index', ('a',)) self.assertTrue(utils.index_exists(self.engine, 'test_table', 'new_index')) def test_add_existing_index(self): Index('new_index', self.test_table.c.a).create(self.engine) self.assertRaises(ValueError, utils.add_index, self.engine, 'test_table', 'new_index', ('a',)) def test_drop_index(self): Index('new_index', self.test_table.c.a).create(self.engine) utils.drop_index(self.engine, 'test_table', 'new_index') self.assertFalse(utils.index_exists(self.engine, 'test_table', 'new_index')) def test_drop_unexisting_index(self): self.assertRaises(ValueError, utils.drop_index, self.engine, 'test_table', 'new_index') @mock.patch('oslo_db.sqlalchemy.utils.drop_index') @mock.patch('oslo_db.sqlalchemy.utils.add_index') def test_change_index_columns(self, add_index, drop_index): utils.change_index_columns(self.engine, 'test_table', 'a_index', ('a',)) utils.drop_index.assert_called_once_with(self.engine, 'test_table', 'a_index') utils.add_index.assert_called_once_with(self.engine, 'test_table', 'a_index', ('a',)) def test_column_exists(self): for col in ['a', 'b']: self.assertTrue(utils.column_exists(self.engine, 'test_table', col)) self.assertFalse(utils.column_exists(self.engine, 'test_table', 'fake_column')) class TestUtilsMysqlOpportunistically( TestUtils, db_test_base._MySQLOpportunisticTestCase): pass class TestUtilsPostgresqlOpportunistically( TestUtils, db_test_base._PostgreSQLOpportunisticTestCase): pass class TestDialectFunctionDispatcher(test_base.BaseTestCase): def _single_fixture(self): callable_fn = mock.Mock() dispatcher = orig = utils.dispatch_for_dialect("*")( callable_fn.default) dispatcher = dispatcher.dispatch_for("sqlite")(callable_fn.sqlite) dispatcher = dispatcher.dispatch_for("mysql+pymysql")( callable_fn.mysql_pymysql) dispatcher = dispatcher.dispatch_for("mysql")( callable_fn.mysql) dispatcher = dispatcher.dispatch_for("postgresql")( callable_fn.postgresql) self.assertTrue(dispatcher is orig) return dispatcher, callable_fn def _multiple_fixture(self): callable_fn = mock.Mock() for targ in [ callable_fn.default, callable_fn.sqlite, callable_fn.mysql, callable_fn.mysql_pymysql, callable_fn.postgresql, callable_fn.postgresql_psycopg2, callable_fn.pyodbc ]: targ.return_value = None dispatcher = orig = utils.dispatch_for_dialect("*", multiple=True)( callable_fn.default) dispatcher = dispatcher.dispatch_for("sqlite")(callable_fn.sqlite) dispatcher = dispatcher.dispatch_for("mysql+pymysql")( callable_fn.mysql_pymysql) dispatcher = dispatcher.dispatch_for("mysql")( callable_fn.mysql) dispatcher = dispatcher.dispatch_for("postgresql+*")( callable_fn.postgresql) dispatcher = dispatcher.dispatch_for("postgresql+psycopg2")( callable_fn.postgresql_psycopg2) dispatcher = dispatcher.dispatch_for("*+pyodbc")( callable_fn.pyodbc) self.assertTrue(dispatcher is orig) return dispatcher, callable_fn def test_single(self): dispatcher, callable_fn = self._single_fixture() dispatcher("sqlite://", 1) dispatcher("postgresql+psycopg2://u:p@h/t", 2) dispatcher("mysql+pymysql://u:p@h/t", 3) dispatcher("mysql://u:p@h/t", 4) dispatcher("mysql+mysqlconnector://u:p@h/t", 5) self.assertEqual( [ mock.call.sqlite('sqlite://', 1), mock.call.postgresql("postgresql+psycopg2://u:p@h/t", 2), mock.call.mysql_pymysql("mysql+pymysql://u:p@h/t", 3), mock.call.mysql("mysql://u:p@h/t", 4), mock.call.mysql("mysql+mysqlconnector://u:p@h/t", 5), ], callable_fn.mock_calls) def test_single_kwarg(self): dispatcher, callable_fn = self._single_fixture() dispatcher("sqlite://", foo='bar') dispatcher("postgresql+psycopg2://u:p@h/t", 1, x='y') self.assertEqual( [ mock.call.sqlite('sqlite://', foo='bar'), mock.call.postgresql( "postgresql+psycopg2://u:p@h/t", 1, x='y'), ], callable_fn.mock_calls) def test_dispatch_on_target(self): callable_fn = mock.Mock() @utils.dispatch_for_dialect("*") def default_fn(url, x, y): callable_fn.default(url, x, y) @default_fn.dispatch_for("sqlite") def sqlite_fn(url, x, y): callable_fn.sqlite(url, x, y) default_fn.dispatch_on_drivername("*")(url, x, y) default_fn("sqlite://", 4, 5) self.assertEqual( [ mock.call.sqlite("sqlite://", 4, 5), mock.call.default("sqlite://", 4, 5) ], callable_fn.mock_calls ) def test_single_no_dispatcher(self): callable_fn = mock.Mock() dispatcher = utils.dispatch_for_dialect("sqlite")(callable_fn.sqlite) dispatcher = dispatcher.dispatch_for("mysql")(callable_fn.mysql) exc = self.assertRaises( ValueError, dispatcher, "postgresql://s:t@localhost/test" ) self.assertEqual( "No default function found for driver: 'postgresql+psycopg2'", str(exc) ) def test_multiple_no_dispatcher(self): callable_fn = mock.Mock() dispatcher = utils.dispatch_for_dialect("sqlite", multiple=True)( callable_fn.sqlite) dispatcher = dispatcher.dispatch_for("mysql")(callable_fn.mysql) dispatcher("postgresql://s:t@localhost/test") self.assertEqual( [], callable_fn.mock_calls ) def test_multiple_no_driver(self): callable_fn = mock.Mock( default=mock.Mock(return_value=None), sqlite=mock.Mock(return_value=None) ) dispatcher = utils.dispatch_for_dialect("*", multiple=True)( callable_fn.default) dispatcher = dispatcher.dispatch_for("sqlite")( callable_fn.sqlite) dispatcher.dispatch_on_drivername("sqlite")("foo") self.assertEqual( [mock.call.sqlite("foo"), mock.call.default("foo")], callable_fn.mock_calls ) def test_multiple_nesting(self): callable_fn = mock.Mock( default=mock.Mock(return_value=None), mysql=mock.Mock(return_value=None) ) dispatcher = utils.dispatch_for_dialect("*", multiple=True)( callable_fn.default) dispatcher = dispatcher.dispatch_for("mysql+mysqlconnector")( dispatcher.dispatch_for("mysql+mysqldb")( callable_fn.mysql ) ) mysqldb_url = utils.make_url("mysql+mysqldb://") mysqlconnector_url = utils.make_url("mysql+mysqlconnector://") sqlite_url = utils.make_url("sqlite://") dispatcher(mysqldb_url, 1) dispatcher(mysqlconnector_url, 2) dispatcher(sqlite_url, 3) self.assertEqual( [ mock.call.mysql(mysqldb_url, 1), mock.call.default(mysqldb_url, 1), mock.call.mysql(mysqlconnector_url, 2), mock.call.default(mysqlconnector_url, 2), mock.call.default(sqlite_url, 3) ], callable_fn.mock_calls ) def test_single_retval(self): dispatcher, callable_fn = self._single_fixture() callable_fn.mysql_pymysql.return_value = 5 self.assertEqual( 5, dispatcher("mysql+pymysql://u:p@h/t", 3) ) def test_engine(self): eng = sqlalchemy.create_engine("sqlite:///path/to/my/db.db") dispatcher, callable_fn = self._single_fixture() dispatcher(eng) self.assertEqual( [mock.call.sqlite(eng)], callable_fn.mock_calls ) def test_url_pymysql(self): url = utils.make_url("mysql+pymysql://scott:tiger@localhost/test") dispatcher, callable_fn = self._single_fixture() dispatcher(url, 15) self.assertEqual( [mock.call.mysql_pymysql(url, 15)], callable_fn.mock_calls ) def test_url_mysql_generic(self): url = utils.make_url("mysql://scott:tiger@localhost/test") dispatcher, callable_fn = self._single_fixture() dispatcher(url, 15) self.assertEqual( [mock.call.mysql(url, 15)], callable_fn.mock_calls ) def test_invalid_target(self): dispatcher, callable_fn = self._single_fixture() exc = self.assertRaises( ValueError, dispatcher, 20 ) self.assertEqual("Invalid target type: 20", str(exc)) def test_invalid_dispatch(self): callable_fn = mock.Mock() dispatcher = utils.dispatch_for_dialect("*")(callable_fn.default) exc = self.assertRaises( ValueError, dispatcher.dispatch_for("+pyodbc"), callable_fn.pyodbc ) self.assertEqual( "Couldn't parse database[+driver]: '+pyodbc'", str(exc) ) def test_single_only_one_target(self): callable_fn = mock.Mock() dispatcher = utils.dispatch_for_dialect("*")(callable_fn.default) dispatcher = dispatcher.dispatch_for("sqlite")(callable_fn.sqlite) exc = self.assertRaises( TypeError, dispatcher.dispatch_for("sqlite"), callable_fn.sqlite2 ) self.assertEqual( "Multiple functions for expression 'sqlite'", str(exc) ) def test_multiple(self): dispatcher, callable_fn = self._multiple_fixture() dispatcher("postgresql+pyodbc://", 1) dispatcher("mysql+pymysql://", 2) dispatcher("postgresql+psycopg2://", 4) dispatcher("postgresql://", 5) # TODO(zzzeek): there is a deterministic order here, but we might # want to tweak it, or maybe provide options. default first? # most specific first? is *+pyodbc or postgresql+* more specific? self.assertEqual( [ mock.call.postgresql('postgresql+pyodbc://', 1), mock.call.pyodbc('postgresql+pyodbc://', 1), mock.call.default('postgresql+pyodbc://', 1), mock.call.mysql_pymysql('mysql+pymysql://', 2), mock.call.mysql('mysql+pymysql://', 2), mock.call.default('mysql+pymysql://', 2), mock.call.postgresql_psycopg2('postgresql+psycopg2://', 4), mock.call.postgresql('postgresql+psycopg2://', 4), mock.call.default('postgresql+psycopg2://', 4), # note this is called because we resolve the default # DBAPI for the url mock.call.postgresql_psycopg2('postgresql://', 5), mock.call.postgresql('postgresql://', 5), mock.call.default('postgresql://', 5), ], callable_fn.mock_calls ) def test_multiple_no_return_value(self): dispatcher, callable_fn = self._multiple_fixture() callable_fn.sqlite.return_value = 5 exc = self.assertRaises( TypeError, dispatcher, "sqlite://" ) self.assertEqual( "Return value not allowed for multiple filtered function", str(exc) ) class TestGetInnoDBTables(db_test_base._MySQLOpportunisticTestCase): def test_all_tables_use_innodb(self): with self.engine.connect() as conn, conn.begin(): conn.execute( sql.text( "CREATE TABLE customers " "(a INT, b CHAR (20), INDEX (a)) ENGINE=InnoDB")) self.assertEqual([], utils.get_non_innodb_tables(self.engine)) def test_all_tables_use_innodb_false(self): with self.engine.connect() as conn, conn.begin(): conn.execute( sql.text("CREATE TABLE employee (i INT) ENGINE=MEMORY") ) self.assertEqual(['employee'], utils.get_non_innodb_tables(self.engine)) def test_skip_tables_use_default_value(self): with self.engine.connect() as conn, conn.begin(): conn.execute( sql.text("CREATE TABLE migrate_version (i INT) ENGINE=MEMORY") ) self.assertEqual([], utils.get_non_innodb_tables(self.engine)) def test_skip_tables_use_passed_value(self): with self.engine.connect() as conn, conn.begin(): conn.execute( sql.text("CREATE TABLE some_table (i INT) ENGINE=MEMORY")) self.assertEqual([], utils.get_non_innodb_tables( self.engine, skip_tables=('some_table',))) def test_skip_tables_use_empty_list(self): with self.engine.connect() as conn, conn.begin(): conn.execute( sql.text("CREATE TABLE some_table_3 (i INT) ENGINE=MEMORY")) self.assertEqual(['some_table_3'], utils.get_non_innodb_tables( self.engine, skip_tables=())) def test_skip_tables_use_several_values(self): with self.engine.connect() as conn, conn.begin(): conn.execute( sql.text("CREATE TABLE some_table_1 (i INT) ENGINE=MEMORY")) conn.execute( sql.text("CREATE TABLE some_table_2 (i INT) ENGINE=MEMORY")) self.assertEqual([], utils.get_non_innodb_tables( self.engine, skip_tables=('some_table_1', 'some_table_2'))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/test_api.py0000664000175000017500000003025200000000000020130 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for DB API.""" from unittest import mock from oslo_config import cfg from oslo_utils import importutils from oslo_db import api from oslo_db import exception from oslo_db.tests import base as test_base sqla = importutils.try_import('sqlalchemy') if not sqla: raise ImportError("Unable to import module 'sqlalchemy'.") def get_backend(): return DBAPI() class DBAPI(object): def _api_raise(self, *args, **kwargs): """Simulate raising a database-has-gone-away error This method creates a fake OperationalError with an ID matching a valid MySQL "database has gone away" situation. It also decrements the error_counter so that we can artificially keep track of how many times this function is called by the wrapper. When error_counter reaches zero, this function returns True, simulating the database becoming available again and the query succeeding. """ if self.error_counter > 0: self.error_counter -= 1 orig = sqla.exc.DBAPIError(False, False, False) orig.args = [2006, 'Test raise operational error'] e = exception.DBConnectionError(orig) raise e else: return True def api_raise_default(self, *args, **kwargs): return self._api_raise(*args, **kwargs) @api.safe_for_db_retry def api_raise_enable_retry(self, *args, **kwargs): return self._api_raise(*args, **kwargs) def api_class_call1(_self, *args, **kwargs): return args, kwargs class DBAPITestCase(test_base.BaseTestCase): def test_dbapi_full_path_module_method(self): dbapi = api.DBAPI('oslo_db.tests.test_api') result = dbapi.api_class_call1(1, 2, kwarg1='meow') expected = ((1, 2), {'kwarg1': 'meow'}) self.assertEqual(expected, result) def test_dbapi_unknown_invalid_backend(self): self.assertRaises(ImportError, api.DBAPI, 'tests.unit.db.not_existent') def test_dbapi_lazy_loading(self): dbapi = api.DBAPI('oslo_db.tests.test_api', lazy=True) self.assertIsNone(dbapi._backend) dbapi.api_class_call1(1, 'abc') self.assertIsNotNone(dbapi._backend) def test_dbapi_from_config(self): conf = cfg.ConfigOpts() dbapi = api.DBAPI.from_config(conf, backend_mapping={'sqlalchemy': __name__}) self.assertIsNotNone(dbapi._backend) class DBReconnectTestCase(DBAPITestCase): def setUp(self): super().setUp() self.test_db_api = DBAPI() patcher = mock.patch(__name__ + '.get_backend', return_value=self.test_db_api) patcher.start() self.addCleanup(patcher.stop) def test_raise_connection_error(self): self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__}) self.test_db_api.error_counter = 5 self.assertRaises(exception.DBConnectionError, self.dbapi._api_raise) def test_raise_connection_error_decorated(self): self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__}) self.test_db_api.error_counter = 5 self.assertRaises(exception.DBConnectionError, self.dbapi.api_raise_enable_retry) self.assertEqual(4, self.test_db_api.error_counter, 'Unexpected retry') def test_raise_connection_error_enabled(self): self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__}, use_db_reconnect=True) self.test_db_api.error_counter = 5 self.assertRaises(exception.DBConnectionError, self.dbapi.api_raise_default) self.assertEqual(4, self.test_db_api.error_counter, 'Unexpected retry') @mock.patch('oslo_db.api.time.sleep', return_value=None) def test_retry_one(self, p_time_sleep): self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__}, use_db_reconnect=True, retry_interval=1) try: func = self.dbapi.api_raise_enable_retry self.test_db_api.error_counter = 1 self.assertTrue(func(), 'Single retry did not succeed.') except Exception: self.fail('Single retry raised an un-wrapped error.') p_time_sleep.assert_called_with(1) self.assertEqual( 0, self.test_db_api.error_counter, 'Counter not decremented, retry logic probably failed.') @mock.patch('oslo_db.api.time.sleep', return_value=None) def test_retry_two(self, p_time_sleep): self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__}, use_db_reconnect=True, retry_interval=1, inc_retry_interval=False) try: func = self.dbapi.api_raise_enable_retry self.test_db_api.error_counter = 2 self.assertTrue(func(), 'Multiple retry did not succeed.') except Exception: self.fail('Multiple retry raised an un-wrapped error.') p_time_sleep.assert_called_with(1) self.assertEqual( 0, self.test_db_api.error_counter, 'Counter not decremented, retry logic probably failed.') @mock.patch('oslo_db.api.time.sleep', return_value=None) def test_retry_float_interval(self, p_time_sleep): self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__}, use_db_reconnect=True, retry_interval=0.5) try: func = self.dbapi.api_raise_enable_retry self.test_db_api.error_counter = 1 self.assertTrue(func(), 'Single retry did not succeed.') except Exception: self.fail('Single retry raised an un-wrapped error.') p_time_sleep.assert_called_with(0.5) self.assertEqual( 0, self.test_db_api.error_counter, 'Counter not decremented, retry logic probably failed.') @mock.patch('oslo_db.api.time.sleep', return_value=None) def test_retry_until_failure(self, p_time_sleep): self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__}, use_db_reconnect=True, retry_interval=1, inc_retry_interval=False, max_retries=3) func = self.dbapi.api_raise_enable_retry self.test_db_api.error_counter = 5 self.assertRaises( exception.DBError, func, 'Retry of permanent failure did not throw DBError exception.') p_time_sleep.assert_called_with(1) self.assertNotEqual( 0, self.test_db_api.error_counter, 'Retry did not stop after sql_max_retries iterations.') class DBRetryRequestCase(DBAPITestCase): def test_retry_wrapper_succeeds(self): @api.wrap_db_retry(max_retries=10) def some_method(): pass some_method() @mock.patch('oslo_db.api.time.sleep', return_value=None) def test_retry_wrapper_reaches_limit(self, mock_sleep): max_retries = 2 @api.wrap_db_retry(max_retries=max_retries) def some_method(res): res['result'] += 1 raise exception.RetryRequest(ValueError()) res = {'result': 0} self.assertRaises(ValueError, some_method, res) self.assertEqual(max_retries + 1, res['result']) @mock.patch('oslo_db.api.time.sleep', return_value=None) def test_retry_wrapper_exception_checker(self, mock_sleep): def exception_checker(exc): return isinstance(exc, ValueError) and exc.args[0] < 5 @api.wrap_db_retry(max_retries=10, exception_checker=exception_checker) def some_method(res): res['result'] += 1 raise ValueError(res['result']) res = {'result': 0} self.assertRaises(ValueError, some_method, res) # our exception checker should have stopped returning True after 5 self.assertEqual(5, res['result']) @mock.patch.object(DBAPI, 'api_class_call1') @mock.patch.object(api, 'wrap_db_retry') def test_mocked_methods_are_not_wrapped(self, mocked_wrap, mocked_method): dbapi = api.DBAPI('oslo_db.tests.test_api') dbapi.api_class_call1() self.assertFalse(mocked_wrap.called) @mock.patch('oslo_db.api.LOG') def test_retry_wrapper_non_db_error_not_logged(self, mock_log): # Tests that if the retry wrapper hits a non-db error (raised from the # wrapped function), then that exception is reraised but not logged. @api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def some_method(): raise AttributeError('test') self.assertRaises(AttributeError, some_method) self.assertFalse(mock_log.called) @mock.patch('oslo_db.api.time.sleep', return_value=None) def test_retry_wrapper_deadlock(self, mock_sleep): # Tests that jitter is False, if the retry wrapper hits a # non-deadlock error @api.wrap_db_retry(max_retries=1, retry_on_deadlock=True) def some_method_no_deadlock(): raise exception.RetryRequest(ValueError()) with mock.patch( 'oslo_db.api.wrap_db_retry._get_inc_interval') as mock_get: mock_get.return_value = 2, 2 self.assertRaises(ValueError, some_method_no_deadlock) mock_get.assert_called_once_with(1, False) # Tests that jitter is True, if the retry wrapper hits a deadlock # error. @api.wrap_db_retry(max_retries=1, retry_on_deadlock=True) def some_method_deadlock(): raise exception.DBDeadlock('test') with mock.patch( 'oslo_db.api.wrap_db_retry._get_inc_interval') as mock_get: mock_get.return_value = 0.1, 2 self.assertRaises(exception.DBDeadlock, some_method_deadlock) mock_get.assert_called_once_with(1, True) # Tests that jitter is True, if the jitter is enable by user @api.wrap_db_retry(max_retries=1, retry_on_deadlock=True, jitter=True) def some_method_no_deadlock_exp(): raise exception.RetryRequest(ValueError()) with mock.patch( 'oslo_db.api.wrap_db_retry._get_inc_interval') as mock_get: mock_get.return_value = 0.1, 2 self.assertRaises(ValueError, some_method_no_deadlock_exp) mock_get.assert_called_once_with(1, True) def test_wrap_db_retry_get_interval(self): x = api.wrap_db_retry(max_retries=5, retry_on_deadlock=True, max_retry_interval=11) self.assertEqual(11, x.max_retry_interval) for i in (1, 2, 4): # With jitter: sleep_time = [0, 2 ** retry_times) sleep_time, n = x._get_inc_interval(i, True) self.assertEqual(2 * i, n) self.assertTrue(2 * i > sleep_time) # Without jitter: sleep_time = 2 ** retry_times sleep_time, n = x._get_inc_interval(i, False) self.assertEqual(2 * i, n) self.assertEqual(2 * i, sleep_time) for i in (8, 16, 32): sleep_time, n = x._get_inc_interval(i, False) self.assertEqual(x.max_retry_interval, sleep_time) self.assertEqual(2 * i, n) sleep_time, n = x._get_inc_interval(i, True) self.assertTrue(x.max_retry_interval >= sleep_time) self.assertEqual(2 * i, n) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/tests/utils.py0000664000175000017500000000145200000000000017460 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib @contextlib.contextmanager def nested(*contexts): with contextlib.ExitStack() as stack: yield [stack.enter_context(c) for c in contexts] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/oslo_db/warning.py0000664000175000017500000000220500000000000016620 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Custom warnings.""" class NotSupportedWarning(Warning): """Warn that an argument or call that was passed is not supported. This subclasses Warning so that it can be filtered as a distinct category. .. seealso:: https://docs.python.org/2/library/warnings.html """ class OsloDBDeprecationWarning(DeprecationWarning): """Issued per usage of a deprecated API. This subclasses DeprecationWarning so that it can be filtered as a distinct category. .. seealso:: https://docs.python.org/2/library/warnings.html """ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5205948 oslo.db-16.0.0/releasenotes/0000775000175000017500000000000000000000000015652 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1720086402.544606 oslo.db-16.0.0/releasenotes/notes/0000775000175000017500000000000000000000000017002 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/MySQL-python-no-longer-tested-2a6c32cce6b03215.yaml0000664000175000017500000000067100000000000027536 0ustar00zuulzuul00000000000000--- deprecations: - | PyMySQL is a default MySQL DB API driver for oslo.db, as well as for the whole OpenStack. So far it was possible to use MySQL-python as an alternative DB API driver. This driver is no longer being tested in this release, hence it should be considered unsupported. Please switch to PyMySQL, which is an adequate replacement. Refer to https://wiki.openstack.org/wiki/PyMySQL_evaluation for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/add-reno-e5c2f63e73c25959.yaml0000664000175000017500000000007100000000000023530 0ustar00zuulzuul00000000000000--- other: - Introduce reno for deployer release notes.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/add_connection_parameters-231aa7d8b7d2d416.yaml0000664000175000017500000000044700000000000027273 0ustar00zuulzuul00000000000000--- features: - | Added new option connection_parameters which allows SQLAlchemy query parameters to be stated separately from the URL itself, to allow URL-persistence schemes like Nova cells to use controller-local query parameters that aren't broadcast to all other servers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/add_facade_started-14f9bc34fac89371.yaml0000664000175000017500000000055100000000000025653 0ustar00zuulzuul00000000000000--- features: - | Added new ``.is_started`` boolean flag to enginefacade context manager and factory objects, so that double-configure scenarios can be prevented by calling code. Additionally, the ``TypeError`` raised when configure is called after the factory is started is now a specific subclass ``enginefacade.AlreadyStartedError``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/add_wsrep_sync_wait-e3c5a9f4bc08b203.yaml0000664000175000017500000000057700000000000026214 0ustar00zuulzuul00000000000000--- features: - | Added new option mysql_wsrep_sync_wait which sets the Galera "wsrep_sync_wait" variable on server login. This session-level variable allows Galera to ensure that writesets are fully up to date before running new queries, and may be used to tune application behavior when multiple Galera masters are targeted for SQL operations simultaneously. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/connection_debug_min_max-bf6d53d49be7ca52.yaml0000664000175000017500000000051100000000000027261 0ustar00zuulzuul00000000000000--- upgrade: - The allowed values for the ``connection_debug`` option are now restricted to the range between 0 and 100 (inclusive). Previously a number lower than 0 or higher than 100 could be given without error. But now, a ``ConfigFileValueError`` will be raised when the option value is outside this range. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/deprecate-TpoolDbapiWrapper-2ce78aa7cbb9e585.yaml0000664000175000017500000000066500000000000027613 0ustar00zuulzuul00000000000000--- deprecations: - | The ``oslo_db.concurrency.TpoolDbapiWrapper`` class and supporting ``[database] use_tpool`` config option are now deprecated. This feature never graduated from experimental status and is slated for removal due to lack of maintenance and test coverage. Users should switch to ``oslo_db.api.DBAPI.from_config`` and remove references to the deprecated config option from their documentation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/deprecate-insert-from-select-ea831381ebd7e7cf.yaml0000664000175000017500000000041100000000000027720 0ustar00zuulzuul00000000000000--- deprecations: - class ``InsertFromSelect`` from module ``oslo_db.sqlalchemy.utils`` is deprecated in favor of ``sqlalchemy.sql.expression.Insert.from_select()`` method of Insert expression, that is available in SQLAlchemy versions 1.0.0 and newer ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/deprecate-mysql-ndb-cluster-support-cdcaa177b6a6773c.yaml0000664000175000017500000000024500000000000031273 0ustar00zuulzuul00000000000000--- deprecations: - | MySQL NDB Cluster support has been deprecated for removal. It appears no one is using this functionality and it's poorly understood. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/deprecate-sqlalchemy-migrate-6f899935615d6984.yaml0000664000175000017500000000130500000000000027366 0ustar00zuulzuul00000000000000--- deprecations: - | The ``oslo_db.sqlalchemy.migration`` module is deprecated for removal. It only supports ``sqlalchemy-migrate``, which is no longer under active development and has been effectively replaced by ``alembic``. Users of this module should consider switching to ``alembic`` or, if necessary, using ``sqlalchemy-migrate`` directly. - | The ``oslo_db.sqlalchemy.migration_cli`` module is deprecated for removal. It was intended to provide an abstraction layer over different migration backends - specifically ``sqlalchemy-migrate`` and ``alembic`` - however, takeup has been limited and its expected that users will use ``alembic`` directly nowadays. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/deprecate_config_sqlite_db-bd41d49343049319.yaml0000664000175000017500000000034400000000000027173 0ustar00zuulzuul00000000000000--- deprecations: - | The configuration option ``sqlite_db`` is now deprecated and will be removed in the future. Please use configuration option ``connection`` or ``slave_connection`` to connect to the database. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/deprecate_idle_timeout-029d9f2cb7184b28.yaml0000664000175000017500000000142000000000000026524 0ustar00zuulzuul00000000000000--- deprecations: - | The configuration option ``idle_timeout`` is now deprecated and has been renamed to ``connection_recycle_time``, including within the main oslo.db options, as well as in the keyword arguments to ``engines.create_engine()``, ``enginefacade.configure()`` and ``enginefacade.configure_defaults()``. The new name more accurately describes what this option does, in that it is not directly related to the "idle" time of the connection itself, nor is the connection disconnected at any specific time. It refers to a rule stating that any connection which has been present more than N seconds as a member of the connection pool will be automatically discarded and replaced the next time it is checked out from the pool. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/drop-db2-support-6e70fe42268d2238.yaml0000664000175000017500000000024000000000000025070 0ustar00zuulzuul00000000000000--- upgrade: - | Checks specific to the DB2 database have been removed. This database has not been supported by any OpenStack project for many years. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/drop-python27-support-2308d7fbcd66cc22.yaml0000664000175000017500000000017700000000000026333 0ustar00zuulzuul00000000000000--- upgrade: - | Support for Python 2.7 has been dropped. The minimum version of Python now supported is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/enginefacade_decorators-4660862fe22d2669.yaml0000664000175000017500000000040600000000000026513 0ustar00zuulzuul00000000000000--- features: - enginefacade decorators can now be used for class and instance methods, which implicitly receive the first positional argument. Previously, it was required that all decorated functions receive a context value as the first argument. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=oslo.db-16.0.0/releasenotes/notes/fix-mysql-duplicate-key-error-information-update-548888bc44b8dbd7.yaml 22 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/fix-mysql-duplicate-key-error-information-update-548888bc44b8dbd7.0000664000175000017500000000052400000000000032663 0ustar00zuulzuul00000000000000--- fixes: - | In mysql 8.0.19, duplicate key error information is extended to include the table name of the key. Previously, duplicate key error information included only the key value and key name. This extends capabilities to handle changes in duplicate key error information with newer mysql version since 8.0.19. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/fix_mysql_wsrsp-0ef98dec5ea3759f.yaml0000664000175000017500000000074100000000000025537 0ustar00zuulzuul00000000000000--- fixes: - | The newly added mysql_wsrep_sync_wait parameter now defaults to non-present in the enginefacade's default configuration options, so that it is not configured in a MySQL / MariaDB database by default, unless passed in the options explicitly. Previously, the default value was "0", meaning the wsrep_sync_wait parameter would be set unconditionally on new connections, which would fail for MySQL backends that don't provide for this setting. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/fix_synchronous_reader-ca442ca9f07470ec.yaml0000664000175000017500000000054300000000000026742 0ustar00zuulzuul00000000000000--- fixes: - | Repaired the "synchronous_reader" modifier of enginefacade so that it refers to the "writer" engine when set to True, thereby allowing "synchronous" behavior with the writer. When set to False, this is "asynchronous", so this should be associated with the async engines. The flag had the reverse behavior previously. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/increase-default-max-overflow-0af787268807f926.yaml0000664000175000017500000000175500000000000027555 0ustar00zuulzuul00000000000000--- upgrade: - | The default value of ``max_overflow`` config option has been increased from 10 to 50 in order to allow OpenStack services heavily using DBs to better handle spikes of concurrent requests and lower the probability of getting a pool timeout issue. This change potentially leads to increasing of the number of open connections to an RDBMS server. Depending on the configuration, you may see "too many connections" errors in logs of OpenStack services / RDBMS server. The max limit of connections can be set by the means of these config options: http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_connections http://www.postgresql.org/docs/current/static/runtime-config-connection.html#GUC-MAX-CONNECTIONS For details, please see the following LP: https://bugs.launchpad.net/oslo.db/+bug/1535375 and the ML thread: http://lists.openstack.org/pipermail/openstack-dev/2015-December/082717.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/new-db-fixtures-58223e3926122413.yaml0000664000175000017500000000027000000000000024537 0ustar00zuulzuul00000000000000--- deprecations: - base test classes from ``oslo_db.sqlalchemy.test_base`` are deprecated in favor of new fixtures introduced in ``oslo_db.sqlalchemy.test_fixtures`` module ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=oslo.db-16.0.0/releasenotes/notes/remove-ModelsMigrationsSync-check_foreign_keys-467e0dbeb65a8c86.yaml 22 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/remove-ModelsMigrationsSync-check_foreign_keys-467e0dbeb65a8c86.ya0000664000175000017500000000110200000000000033063 0ustar00zuulzuul00000000000000--- upgrade: - | The ``check_foreign_keys`` helper of the ``oslo_db.sqlalchemy.test_migrations.ModelsMigrationsSync`` base test class has been removed. This was deprecated in 1.4.1 as alembic now supports this capability. - The ``_walk_versions``, ``_migrate_down``, and ``_migrate_up`` methods of the ``oslo_db.sqlalchemy.test_migrations.ModelsMigrationsSync`` base test class have been removed. These were deprecated in 0.5.0 in favour of their non-private equivalents, ``walk_versions``, ``migrate_down``, and ``migrate_up`` respectively. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/remove-NotCommitting-utils-fed6df0e2f85edfa.yaml0000664000175000017500000000101700000000000027717 0ustar00zuulzuul00000000000000--- upgrade: - | The following helpers have been removed from the ``oslo_db.sqlalchemy.utils`` module: - ``NonCommittingConnectable`` - ``NonCommittingEngine`` - ``NonCommittingConnection`` - ``NonCommittingTransaction`` These were unused outside of oslo.db and were not compatible with SQLAlchemy 2.0. In addition, the ``RollsBackTransaction`` fixture has been removed from ``oslo_db.sqlalchemy.test_fixtures``. This was similarly unused and presented similar compatibility issues. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/remove-base-test-classes-557889ec4f072781.yaml0000664000175000017500000000203100000000000026521 0ustar00zuulzuul00000000000000--- upgrade: - | The following test fixtures and base test classes were deprecated and have now been removed: - ``oslo_db.sqlalchemy.test_base.DbFixture`` - ``oslo_db.sqlalchemy.test_base.DbTestCase`` - ``oslo_db.sqlalchemy.test_base.OpportunisticTestCase`` - ``oslo_db.sqlalchemy.test_base.MySQLOpportunisticFixture`` - ``oslo_db.sqlalchemy.test_base.PostgreSQLOpportunisticFixture`` - ``oslo_db.sqlalchemy.test_base.MySQLOpportunisticTestCase`` - ``oslo_db.sqlalchemy.test_base.PostgreSQLOpportunisticTestCase`` They have all been replaced by equivalent test fixtures and test class mixins in ``oslo_db.sqlalchemy.test_fixtures``. In addition, the following test cases were being inadvertently used publicly despite being private to oslo.db. They were also deprecated and have now been removed: - ``oslo_db.tests.sqlalchemy.base.DbTestCase`` - ``oslo_db.tests.sqlalchemy.base.MySQLOpportunisticTestCase`` - ``oslo_db.tests.sqlalchemy.base.PostgreSQLOpportunisticTestCase`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/remove-config-option-sqlite_db-7b7c6459135fd8c9.yaml0000664000175000017500000000025600000000000030062 0ustar00zuulzuul00000000000000--- upgrade: - The configuration option ``sqlite_db`` is removed. Pease use configuration option ``connection`` or ``slave_connection`` to connect to the database. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/remove-deprecated-opts-1d095911e82fee3b.yaml0000664000175000017500000000163500000000000026470 0ustar00zuulzuul00000000000000--- upgrade: - | The following deprecated option aliases have been removed: - ``[DEFAULT] sqlite_synchronous`` (use ``[database] sqlite_synchronous``) - ``[DEFAULT] db_backend`` (use ``[database] backend``) - ``[DEFAULT] sql_connection``, ``[DATABASE] sql_connection``, ``[sql] connection`` (use ``[database] connection``) - ``[DEFAULT] sql_max_retries``, ``[DATABASE] sql_max_retries`` (use ``[database] max_retries``) - ``[DEFAULT] sql_retry_interval``, ``[DATABASE] reconnect_interval`` (use ``[database] retry_interval``) - ``[DEFAULT] sql_max_overflow``, ``[DATABASE] sqlalchemy_max_overflow`` (use ``[database] max_overflow``) - ``[DEFAULT] sql_connection_debug`` (use ``[database] connection_debug``) - ``[DEFAULT] sql_connection_trace`` (use ``[database] connection_trace``) - ``[DATABASE] sqlalchemy_pool_timeout`` (use ``[database] pool_timeout``) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/remove-mysql-ndb-cluster-support-fdb19029595070fa.yaml0000664000175000017500000000010300000000000030411 0ustar00zuulzuul00000000000000--- upgrade: - | MySQL NDB Cluster support has been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/remove-sqlalchemy-migrate-f69c805004e6bac1.yaml0000664000175000017500000000060100000000000027156 0ustar00zuulzuul00000000000000--- upgrade: - | The ``oslo_db.sqlalchemy.migration`` module and ``migrate`` backend for the ``oslo_db.sqalchemy.migration_cli`` module, both of which were first deprecated in the 8.5.0 release, have now been removed. ``sqlalchemy-migrate`` is no longer under active development, does not support SQLAlchemy 2.0, and has been effectively replaced by ``alembic``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/remove-use_tpool-29a8bf9fc68a9bb2.yaml0000664000175000017500000000023000000000000025556 0ustar00zuulzuul00000000000000--- upgrade: - | The ``oslo_db.concurrency.TpoolDbapiWrapper`` class and supporting ``[database] use_tpool`` config option have been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/removed-deprecated-idle-timeout-051a6a9a792bd8de.yaml0000664000175000017500000000042500000000000030323 0ustar00zuulzuul00000000000000--- fixes: - | Removed the ``[DATABASE] idle_timeout``, ``[database] idle_timeout``, ``[sql] idle_timeout``, ``[DEFAULT] sql_idle_timeout`` and ``[DATABASE] sql_idle_timeout`` options. These were all legacy aliases for ``[database] connection_recycle_time``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/removed-deprecated-min-pool-size-1f351d79fe232129.yaml0000664000175000017500000000034600000000000030207 0ustar00zuulzuul00000000000000--- fixes: - | Removed deprecated database option ``min_pool_size``. This option had no effect and was deprecated in Rocky. For more information see bug `1764786 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/removed-deprecated-sql-max-pool-size-c9b7bfc14c3b6b14.yaml0000664000175000017500000000011700000000000031272 0ustar00zuulzuul00000000000000--- fixes: - | Removed deprecated database option ``sql_max_pool_size``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/sqlalchemy-20-0a193a01c70f805a.yaml0000664000175000017500000000060100000000000024357 0ustar00zuulzuul00000000000000--- features: - | oslo.db now supports SQLAlchemy 2.0. - | A new ``oslo_db.compat`` module has been added. This provides a number of shims for handling differences between SQLAlchemy 1.x and 2.x. upgrade: - | The ability to create engine facades that used autocommit, first deprecated in 12.1.0, has now been removed. This is not supported in SQLAlchemy 2.x. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/warn-incomplete-url-c44cd03baf630c7c.yaml0000664000175000017500000000113000000000000026126 0ustar00zuulzuul00000000000000--- upgrade: - | oslo.db now logs a warning when the connection URL does not explicitly mention a driver. The default driver is still used, but in some cases, such as MySQL, the default is incompatible with the concurrency library eventlet. - | It is strongly recommended to use the `PyMySQL `__ driver when connecting to a MySQL-compatible database to ensure the best compatibility with the concurrency library eventlet. To use PyMySQL, ensure the connection URL is specified with ``mysql+pymysql://`` as the scheme. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/notes/wrap_db_retry-34c7ff2d82afa3f5.yaml0000664000175000017500000000040300000000000025106 0ustar00zuulzuul00000000000000--- fixes: - Decorator ``oslo_db.api.wrap_db_retry`` now defaults to 10 retries. Previously the number of attempts was 0, and users had to explicitly pass ``max_retry_interval`` value greater than 0 to actually enable retries on errors. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1720086402.548608 oslo.db-16.0.0/releasenotes/source/0000775000175000017500000000000000000000000017152 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/2023.1.rst0000664000175000017500000000020200000000000020423 0ustar00zuulzuul00000000000000=========================== 2023.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/2023.2.rst0000664000175000017500000000020200000000000020424 0ustar00zuulzuul00000000000000=========================== 2023.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/2024.1.rst0000664000175000017500000000020200000000000020424 0ustar00zuulzuul00000000000000=========================== 2024.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.1 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1720086402.548608 oslo.db-16.0.0/releasenotes/source/_static/0000775000175000017500000000000000000000000020600 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000000000000000023051 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1720086402.548608 oslo.db-16.0.0/releasenotes/source/_templates/0000775000175000017500000000000000000000000021307 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/_templates/.placeholder0000664000175000017500000000000000000000000023560 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/conf.py0000664000175000017500000002056600000000000020462 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # oslo.db Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/oslo.db' openstackdocs_bug_project = 'oslo.db' openstackdocs_bug_tag = '' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2016, oslo.db Developers' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'oslo.configReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'oslo.configReleaseNotes.tex', 'oslo.db Release Notes Documentation', 'oslo.db Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'oslo.configreleasenotes', 'oslo.db Release Notes Documentation', ['oslo.db Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'oslo.dbReleaseNotes', 'oslo.db Release Notes Documentation', 'oslo.db Developers', 'oslo.configReleaseNotes', 'An OpenStack library for parsing configuration options from the command' ' line and configuration files.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/index.rst0000664000175000017500000000043700000000000021017 0ustar00zuulzuul00000000000000======================= oslo.db Release Notes ======================= .. toctree:: :maxdepth: 1 unreleased 2024.1 2023.2 2023.1 zed yoga xena wallaby victoria ussuri train stein rocky queens pike ocata newton mitaka liberty ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/liberty.rst0000664000175000017500000000022200000000000021352 0ustar00zuulzuul00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5205948 oslo.db-16.0.0/releasenotes/source/locale/0000775000175000017500000000000000000000000020411 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5205948 oslo.db-16.0.0/releasenotes/source/locale/en_GB/0000775000175000017500000000000000000000000021363 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1720086402.548608 oslo.db-16.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000023150 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000664000175000017500000006343300000000000026212 0ustar00zuulzuul00000000000000# Andi Chandler , 2016. #zanata # Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2019. #zanata # Andi Chandler , 2020. #zanata # Andi Chandler , 2022. #zanata # Andi Chandler , 2023. #zanata msgid "" msgstr "" "Project-Id-Version: oslo.db\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2023-05-08 10:55+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2023-05-09 12:04+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "10.0.0" msgstr "10.0.0" msgid "11.0.0" msgstr "11.0.0" msgid "11.2.0" msgstr "11.2.0" msgid "12.2.0" msgstr "12.2.0" msgid "12.3.0" msgstr "12.3.0" msgid "12.3.1" msgstr "12.3.1" msgid "13.0.0" msgstr "13.0.0" msgid "2.6.0-9" msgstr "2.6.0-9" msgid "2023.1 Series Release Notes" msgstr "2023.1 Series Release Notes" msgid "4.12.0" msgstr "4.12.0" msgid "4.15.0" msgstr "4.15.0" msgid "4.17.1-6" msgstr "4.17.1-6" msgid "4.19.0" msgstr "4.19.0" msgid "4.22.0" msgstr "4.22.0" msgid "4.25.2" msgstr "4.25.2" msgid "4.26.0" msgstr "4.26.0" msgid "4.30.0" msgstr "4.30.0" msgid "4.33.1" msgstr "4.33.1" msgid "4.34.0" msgstr "4.34.0" msgid "4.42.0" msgstr "4.42.0" msgid "4.6.0" msgstr "4.6.0" msgid "4.8.0" msgstr "4.8.0" msgid "4.9.0" msgstr "4.9.0" msgid "5.0.0" msgstr "5.0.0" msgid "7.0.0" msgstr "7.0.0" msgid "8.1.1" msgstr "8.1.1" msgid "8.4.1" msgstr "8.4.1" msgid "8.5.0" msgstr "8.5.0" msgid "8.5.1" msgstr "8.5.1" msgid "9.0.0" msgstr "9.0.0" msgid "9.1.0" msgstr "9.1.0" msgid "" "A new ``oslo_db.compat`` module has been added. This provides a number of " "shims for handling differences between SQLAlchemy 1.x and 2.x." msgstr "" "A new ``oslo_db.compat`` module has been added. This provides a number of " "shims for handling differences between SQLAlchemy 1.x and 2.x." msgid "" "Added new ``.is_started`` boolean flag to enginefacade context manager and " "factory objects, so that double-configure scenarios can be prevented by " "calling code. Additionally, the ``TypeError`` raised when configure is " "called after the factory is started is now a specific subclass " "``enginefacade.AlreadyStartedError``." msgstr "" "Added new ``.is_started`` boolean flag to enginefacade context manager and " "factory objects, so that double-configure scenarios can be prevented by " "calling code. Additionally, the ``TypeError`` raised when configure is " "called after the factory is started is now a specific subclass " "``enginefacade.AlreadyStartedError``." msgid "" "Added new option connection_parameters which allows SQLAlchemy query " "parameters to be stated separately from the URL itself, to allow URL-" "persistence schemes like Nova cells to use controller-local query parameters " "that aren't broadcast to all other servers." msgstr "" "Added new option connection_parameters which allows SQLAlchemy query " "parameters to be stated separately from the URL itself, to allow URL-" "persistence schemes like Nova cells to use controller-local query parameters " "that aren't broadcast to all other servers." msgid "" "Added new option mysql_wsrep_sync_wait which sets the Galera " "\"wsrep_sync_wait\" variable on server login. This session-level variable " "allows Galera to ensure that writesets are fully up to date before running " "new queries, and may be used to tune application behavior when multiple " "Galera masters are targeted for SQL operations simultaneously." msgstr "" "Added new option mysql_wsrep_sync_wait which sets the Galera " "\"wsrep_sync_wait\" variable on server login. This session-level variable " "allows Galera to ensure that writesets are fully up to date before running " "new queries, and may be used to tune application behaviour when multiple " "Galera masters are targeted for SQL operations simultaneously." msgid "Bug Fixes" msgstr "Bug Fixes" msgid "" "Checks specific to the DB2 database have been removed. This database has not " "been supported by any OpenStack project for many years." msgstr "" "Checks specific to the DB2 database have been removed. This database has not " "been supported by any OpenStack project for many years." msgid "" "Decorator ``oslo_db.api.wrap_db_retry`` now defaults to 10 retries. " "Previously the number of attempts was 0, and users had to explicitly pass " "``max_retry_interval`` value greater than 0 to actually enable retries on " "errors." msgstr "" "Decorator ``oslo_db.api.wrap_db_retry`` now defaults to 10 retries. " "Previously the number of attempts was 0, and users had to explicitly pass " "``max_retry_interval`` value greater than 0 to actually enable retries on " "errors." msgid "Deprecation Notes" msgstr "Deprecation Notes" msgid "For details, please see the following LP:" msgstr "For details, please see the following LP:" msgid "" "In addition, the following test cases were being inadvertently used publicly " "despite being private to oslo.db. They were also deprecated and have now " "been removed:" msgstr "" "In addition, the following test cases were being inadvertently used publicly " "despite being private to oslo.db. They were also deprecated and have now " "been removed:" msgid "" "In mysql 8.0.19, duplicate key error information is extended to include the " "table name of the key. Previously, duplicate key error information included " "only the key value and key name. This extends capabilities to handle changes " "in duplicate key error information with newer mysql version since 8.0.19." msgstr "" "In MySQL 8.0.19, duplicate key error information is extended to include the " "table name of the key. Previously, duplicate key error information included " "only the key value and key name. This extends capabilities to handle changes " "in duplicate key error information with newer MySQL version since 8.0.19." msgid "Introduce reno for deployer release notes." msgstr "Introduce reno for deployer release notes." msgid "" "It is strongly recommended to use the `PyMySQL `__ driver when connecting to a MySQL-compatible database to ensure " "the best compatibility with the concurrency library eventlet. To use " "PyMySQL, ensure the connection URL is specified with ``mysql+pymysql://`` as " "the scheme." msgstr "" "It is strongly recommended to use the `PyMySQL `__ driver when connecting to a MySQL-compatible database to ensure " "the best compatibility with the concurrency library eventlet. To use " "PyMySQL, ensure the connection URL is specified with ``mysql+pymysql://`` as " "the scheme." msgid "Liberty Series Release Notes" msgstr "Liberty Series Release Notes" msgid "Mitaka Series Release Notes" msgstr "Mitaka Series Release Notes" msgid "" "MySQL NDB Cluster support has been deprecated for removal. It appears no one " "is using this functionality and it's poorly understood." msgstr "" "MySQL NDB Cluster support has been deprecated for removal. It appears no one " "is using this functionality and it's poorly understood." msgid "New Features" msgstr "New Features" msgid "Newton Series Release Notes" msgstr "Newton Series Release Notes" msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "Other Notes" msgstr "Other Notes" msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "" "PyMySQL is a default MySQL DB API driver for oslo.db, as well as for the " "whole OpenStack. So far it was possible to use MySQL-python as an " "alternative DB API driver. This driver is no longer being tested in this " "release, hence it should be considered unsupported. Please switch to " "PyMySQL, which is an adequate replacement. Refer to https://wiki.openstack." "org/wiki/PyMySQL_evaluation for details." msgstr "" "PyMySQL is a default MySQL DB API driver for oslo.db, as well as for the " "whole OpenStack. So far it was possible to use MySQL-python as an " "alternative DB API driver. This driver is no longer being tested in this " "release, hence it should be considered unsupported. Please switch to " "PyMySQL, which is an adequate replacement. Refer to https://wiki.openstack." "org/wiki/PyMySQL_evaluation for details." msgid "Queens Series Release Notes" msgstr "Queens Series Release Notes" msgid "Removed deprecated database option ``min_pool_size``." msgstr "Removed deprecated database option ``min_pool_size``." msgid "Removed deprecated database option ``sql_max_pool_size``." msgstr "Removed deprecated database option ``sql_max_pool_size``." msgid "" "Removed the ``[DATABASE] idle_timeout``, ``[database] idle_timeout``, " "``[sql] idle_timeout``, ``[DEFAULT] sql_idle_timeout`` and ``[DATABASE] " "sql_idle_timeout`` options. These were all legacy aliases for ``[database] " "connection_recycle_time``." msgstr "" "Removed the ``[DATABASE] idle_timeout``, ``[database] idle_timeout``, " "``[sql] idle_timeout``, ``[DEFAULT] sql_idle_timeout`` and ``[DATABASE] " "sql_idle_timeout`` options. These were all legacy aliases for ``[database] " "connection_recycle_time``." msgid "" "Repaired the \"synchronous_reader\" modifier of enginefacade so that it " "refers to the \"writer\" engine when set to True, thereby allowing " "\"synchronous\" behavior with the writer. When set to False, this is " "\"asynchronous\", so this should be associated with the async engines. The " "flag had the reverse behavior previously." msgstr "" "Repaired the \"synchronous_reader\" modifier of enginefacade so that it " "refers to the \"writer\" engine when set to True, thereby allowing " "\"synchronous\" behaviour with the writer. When set to False, this is " "\"asynchronous\", so this should be associated with the async engines. The " "flag had the reverse behaviour previously." msgid "Rocky Series Release Notes" msgstr "Rocky Series Release Notes" msgid "Stein Series Release Notes" msgstr "Stein Series Release Notes" msgid "" "Support for Python 2.7 has been dropped. The minimum version of Python now " "supported is Python 3.6." msgstr "" "Support for Python 2.7 has been dropped. The minimum version of Python now " "supported is Python 3.6." msgid "" "The ``_walk_versions``, ``_migrate_down``, and ``_migrate_up`` methods of " "the ``oslo_db.sqlalchemy.test_migrations.ModelsMigrationsSync`` base test " "class have been removed. These were deprecated in 0.5.0 in favour of their " "non-private equivalents, ``walk_versions``, ``migrate_down``, and " "``migrate_up`` respectively." msgstr "" "The ``_walk_versions``, ``_migrate_down``, and ``_migrate_up`` methods of " "the ``oslo_db.sqlalchemy.test_migrations.ModelsMigrationsSync`` base test " "class have been removed. These were deprecated in 0.5.0 in favour of their " "non-private equivalents, ``walk_versions``, ``migrate_down``, and " "``migrate_up`` respectively." msgid "" "The ``check_foreign_keys`` helper of the ``oslo_db.sqlalchemy." "test_migrations.ModelsMigrationsSync`` base test class has been removed. " "This was deprecated in 1.4.1 as alembic now supports this capability." msgstr "" "The ``check_foreign_keys`` helper of the ``oslo_db.sqlalchemy." "test_migrations.ModelsMigrationsSync`` base test class has been removed. " "This was deprecated in 1.4.1 as alembic now supports this capability." msgid "" "The ``oslo_db.concurrency.TpoolDbapiWrapper`` class and supporting " "``[database] use_tpool`` config option are now deprecated. This feature " "never graduated from experimental status and is slated for removal due to " "lack of maintenance and test coverage. Users should switch to ``oslo_db.api." "DBAPI.from_config`` and remove references to the deprecated config option " "from their documentation." msgstr "" "The ``oslo_db.concurrency.TpoolDbapiWrapper`` class and supporting " "``[database] use_tpool`` config option are now deprecated. This feature " "never graduated from experimental status and is slated for removal due to a " "lack of maintenance and test coverage. Users should switch to ``oslo_db.api." "DBAPI.from_config`` and remove references to the deprecated config option " "from their documentation." msgid "" "The ``oslo_db.sqlalchemy.migration_cli`` module is deprecated for removal. " "It was intended to provide an abstraction layer over different migration " "backends - specifically ``sqlalchemy-migrate`` and ``alembic`` - however, " "takeup has been limited and its expected that users will use ``alembic`` " "directly nowadays." msgstr "" "The ``oslo_db.sqlalchemy.migration_cli`` module is deprecated for removal. " "It was intended to provide an abstraction layer over different migration " "backends - specifically ``sqlalchemy-migrate`` and ``alembic`` - however, " "takeup has been limited and its expected that users will use ``alembic`` " "directly nowadays." msgid "" "The ``oslo_db.sqlalchemy.migration`` module and ``migrate`` backend for the " "``oslo_db.sqalchemy.migration_cli`` module, both of which were first " "deprecated in the 8.5.0 release, have now been removed. ``sqlalchemy-" "migrate`` is no longer under active development, does not support SQLAlchemy " "2.0, and has been effectively replaced by ``alembic``." msgstr "" "The ``oslo_db.sqlalchemy.migration`` module and ``migrate`` backend for the " "``oslo_db.sqalchemy.migration_cli`` module, both of which were first " "deprecated in the 8.5.0 release, have now been removed. ``sqlalchemy-" "migrate`` is no longer under active development, does not support SQLAlchemy " "2.0, and has been effectively replaced by ``alembic``." msgid "" "The ``oslo_db.sqlalchemy.migration`` module is deprecated for removal. It " "only supports ``sqlalchemy-migrate``, which is no longer under active " "development and has been effectively replaced by ``alembic``. Users of this " "module should consider switching to ``alembic`` or, if necessary, using " "``sqlalchemy-migrate`` directly." msgstr "" "The ``oslo_db.sqlalchemy.migration`` module is deprecated for removal. It " "only supports ``sqlalchemy-migrate``, which is no longer under active " "development and has been effectively replaced by ``alembic``. Users of this " "module should consider switching to ``alembic`` or, if necessary, using " "``sqlalchemy-migrate`` directly." msgid "" "The ability to create engine facades that used autocommit, first deprecated " "in 12.1.0, has now been removed. This is not supported in SQLAlchemy 2.x." msgstr "" "The ability to create engine facades that used autocommit, first deprecated " "in 12.1.0, has now been removed. This is not supported in SQLAlchemy 2.x." msgid "" "The allowed values for the ``connection_debug`` option are now restricted to " "the range between 0 and 100 (inclusive). Previously a number lower than 0 or " "higher than 100 could be given without error. But now, a " "``ConfigFileValueError`` will be raised when the option value is outside " "this range." msgstr "" "The allowed values for the ``connection_debug`` option are now restricted to " "the range between 0 and 100 (inclusive). Previously a number lower than 0 or " "higher than 100 could be given without error. But now, a " "``ConfigFileValueError`` will be raised when the option value is outside " "this range." msgid "" "The configuration option ``idle_timeout`` is now deprecated and has been " "renamed to ``connection_recycle_time``, including within the main oslo.db " "options, as well as in the keyword arguments to ``engines.create_engine()``, " "``enginefacade.configure()`` and ``enginefacade.configure_defaults()``. The " "new name more accurately describes what this option does, in that it is not " "directly related to the \"idle\" time of the connection itself, nor is the " "connection disconnected at any specific time. It refers to a rule stating " "that any connection which has been present more than N seconds as a member " "of the connection pool will be automatically discarded and replaced the next " "time it is checked out from the pool." msgstr "" "The configuration option ``idle_timeout`` is now deprecated and has been " "renamed to ``connection_recycle_time``, including within the main oslo.db " "options, as well as in the keyword arguments to ``engines.create_engine()``, " "``enginefacade.configure()`` and ``enginefacade.configure_defaults()``. The " "new name more accurately describes what this option does, in that it is not " "directly related to the \"idle\" time of the connection itself, nor is the " "connection disconnected at any specific time. It refers to a rule stating " "that any connection which has been present more than N seconds as a member " "of the connection pool will be automatically discarded and replaced the next " "time it is checked out from the pool." msgid "" "The configuration option ``sqlite_db`` is now deprecated and will be removed " "in the future. Please use configuration option ``connection`` or " "``slave_connection`` to connect to the database." msgstr "" "The configuration option ``sqlite_db`` is now deprecated and will be removed " "in the future. Please use configuration option ``connection`` or " "``slave_connection`` to connect to the database." msgid "" "The configuration option ``sqlite_db`` is removed. Pease use configuration " "option ``connection`` or ``slave_connection`` to connect to the database." msgstr "" "The configuration option ``sqlite_db`` is removed. Please use configuration " "option ``connection`` or ``slave_connection`` to connect to the database." msgid "" "The default value of ``max_overflow`` config option has been increased from " "10 to 50 in order to allow OpenStack services heavily using DBs to better " "handle spikes of concurrent requests and lower the probability of getting a " "pool timeout issue." msgstr "" "The default value of ``max_overflow`` config option has been increased from " "10 to 50 in order to allow OpenStack services heavily using DBs to better " "handle spikes of concurrent requests and lower the probability of getting a " "pool timeout issue." msgid "" "The following helpers have been removed from the ``oslo_db.sqlalchemy." "utils`` module:" msgstr "" "The following helpers have been removed from the ``oslo_db.sqlalchemy." "utils`` module:" msgid "" "The following test fixtures and base test classes were deprecated and have " "now been removed:" msgstr "" "The following test fixtures and base test classes were deprecated and have " "now been removed:" msgid "" "The newly added mysql_wsrep_sync_wait parameter now defaults to non-present " "in the enginefacade's default configuration options, so that it is not " "configured in a MySQL / MariaDB database by default, unless passed in the " "options explicitly. Previously, the default value was \"0\", meaning the " "wsrep_sync_wait parameter would be set unconditionally on new connections, " "which would fail for MySQL backends that don't provide for this setting." msgstr "" "The newly added mysql_wsrep_sync_wait parameter now defaults to non-present " "in the enginefacade's default configuration options, so that it is not " "configured in a MySQL / MariaDB database by default, unless passed in the " "options explicitly. Previously, the default value was \"0\", meaning the " "wsrep_sync_wait parameter would be set unconditionally on new connections, " "which would fail for MySQL backends that don't provide for this setting." msgid "" "These were unused outside of oslo.db and were not compatible with SQLAlchemy " "2.0. In addition, the ``RollsBackTransaction`` fixture has been removed from " "``oslo_db.sqlalchemy.test_fixtures``. This was similarly unused and " "presented similar compatibility issues." msgstr "" "These were unused outside of oslo.db and were not compatible with SQLAlchemy " "2.0. In addition, the ``RollsBackTransaction`` fixture has been removed from " "``oslo_db.sqlalchemy.test_fixtures``. This was similarly unused and " "presented similar compatibility issues." msgid "" "They have all been replaced by equivalent test fixtures and test class " "mixins in ``oslo_db.sqlalchemy.test_fixtures``." msgstr "" "They have all been replaced by equivalent test fixtures and test class " "mixins in ``oslo_db.sqlalchemy.test_fixtures``." msgid "" "This change potentially leads to increasing of the number of open " "connections to an RDBMS server. Depending on the configuration, you may see " "\"too many connections\" errors in logs of OpenStack services / RDBMS " "server. The max limit of connections can be set by the means of these config " "options:" msgstr "" "This change potentially leads to increasing of the number of open " "connections to an RDBMS server. Depending on the configuration, you may see " "\"too many connections\" errors in logs of OpenStack services / RDBMS " "server. The max limit of connections can be set by the means of these config " "options:" msgid "" "This option had no effect and was deprecated in Rocky. For more information " "see bug `1764786 `_." msgstr "" "This option had no effect and was deprecated in Rocky. For more information " "see bug `1764786 `_." msgid "Train Series Release Notes" msgstr "Train Series Release Notes" msgid "Unreleased Release Notes" msgstr "Unreleased Release Notes" msgid "Upgrade Notes" msgstr "Upgrade Notes" msgid "Ussuri Series Release Notes" msgstr "Ussuri Series Release Notes" msgid "Victoria Series Release Notes" msgstr "Victoria Series Release Notes" msgid "Wallaby Series Release Notes" msgstr "Wallaby Series Release Notes" msgid "Xena Series Release Notes" msgstr "Xena Series Release Notes" msgid "Yoga Series Release Notes" msgstr "Yoga Series Release Notes" msgid "Zed Series Release Notes" msgstr "Zed Series Release Notes" msgid "``NonCommittingConnectable``" msgstr "``NonCommittingConnectable``" msgid "``NonCommittingConnection``" msgstr "``NonCommittingConnection``" msgid "``NonCommittingEngine``" msgstr "``NonCommittingEngine``" msgid "``NonCommittingTransaction``" msgstr "``NonCommittingTransaction``" msgid "``oslo_db.sqlalchemy.test_base.DbFixture``" msgstr "``oslo_db.sqlalchemy.test_base.DbFixture``" msgid "``oslo_db.sqlalchemy.test_base.DbTestCase``" msgstr "``oslo_db.sqlalchemy.test_base.DbTestCase``" msgid "``oslo_db.sqlalchemy.test_base.MySQLOpportunisticFixture``" msgstr "``oslo_db.sqlalchemy.test_base.MySQLOpportunisticFixture``" msgid "``oslo_db.sqlalchemy.test_base.MySQLOpportunisticTestCase``" msgstr "``oslo_db.sqlalchemy.test_base.MySQLOpportunisticTestCase``" msgid "``oslo_db.sqlalchemy.test_base.OpportunisticTestCase``" msgstr "``oslo_db.sqlalchemy.test_base.OpportunisticTestCase``" msgid "``oslo_db.sqlalchemy.test_base.PostgreSQLOpportunisticFixture``" msgstr "``oslo_db.sqlalchemy.test_base.PostgreSQLOpportunisticFixture``" msgid "``oslo_db.sqlalchemy.test_base.PostgreSQLOpportunisticTestCase``" msgstr "``oslo_db.sqlalchemy.test_base.PostgreSQLOpportunisticTestCase``" msgid "``oslo_db.tests.sqlalchemy.base.DbTestCase``" msgstr "``oslo_db.tests.sqlalchemy.base.DbTestCase``" msgid "``oslo_db.tests.sqlalchemy.base.MySQLOpportunisticTestCase``" msgstr "``oslo_db.tests.sqlalchemy.base.MySQLOpportunisticTestCase``" msgid "``oslo_db.tests.sqlalchemy.base.PostgreSQLOpportunisticTestCase``" msgstr "``oslo_db.tests.sqlalchemy.base.PostgreSQLOpportunisticTestCase``" msgid "and the ML thread:" msgstr "and the ML thread:" msgid "" "base test classes from ``oslo_db.sqlalchemy.test_base`` are deprecated in " "favor of new fixtures introduced in ``oslo_db.sqlalchemy.test_fixtures`` " "module" msgstr "" "base test classes from ``oslo_db.sqlalchemy.test_base`` are deprecated in " "flavour of new fixtures introduced in ``oslo_db.sqlalchemy.test_fixtures`` " "module" msgid "" "class ``InsertFromSelect`` from module ``oslo_db.sqlalchemy.utils`` is " "deprecated in favor of ``sqlalchemy.sql.expression.Insert.from_select()`` " "method of Insert expression, that is available in SQLAlchemy versions 1.0.0 " "and newer" msgstr "" "class ``InsertFromSelect`` from module ``oslo_db.sqlalchemy.utils`` is " "deprecated in favor of ``sqlalchemy.sql.expression.Insert.from_select()`` " "method of Insert expression, that is available in SQLAlchemy versions 1.0.0 " "and newer" msgid "" "enginefacade decorators can now be used for class and instance methods, " "which implicitly receive the first positional argument. Previously, it was " "required that all decorated functions receive a context value as the first " "argument." msgstr "" "enginefacade decorators can now be used for class and instance methods, " "which implicitly receive the first positional argument. Previously, it was " "required that all decorated functions receive a context value as the first " "argument." msgid "" "http://dev.mysql.com/doc/refman/5.7/en/server-system-variables." "html#sysvar_max_connections http://www.postgresql.org/docs/current/static/" "runtime-config-connection.html#GUC-MAX-CONNECTIONS" msgstr "" "http://dev.mysql.com/doc/refman/5.7/en/server-system-variables." "html#sysvar_max_connections http://www.postgresql.org/docs/current/static/" "runtime-config-connection.html#GUC-MAX-CONNECTIONS" msgid "" "http://lists.openstack.org/pipermail/openstack-dev/2015-December/082717.html" msgstr "" "http://lists.openstack.org/pipermail/openstack-dev/2015-December/082717.html" msgid "https://bugs.launchpad.net/oslo.db/+bug/1535375" msgstr "https://bugs.launchpad.net/oslo.db/+bug/1535375" msgid "oslo.db Release Notes" msgstr "oslo.db Release Notes" msgid "" "oslo.db now logs a warning when the connection URL does not explicitly " "mention a driver. The default driver is still used, but in some cases, such " "as MySQL, the default is incompatible with the concurrency library eventlet." msgstr "" "oslo.db now logs a warning when the connection URL does not explicitly " "mention a driver. The default driver is still used, but in some cases, such " "as MySQL, the default is incompatible with the concurrency library eventlet." msgid "oslo.db now supports SQLAlchemy 2.0." msgstr "oslo.db now supports SQLAlchemy 2.0." ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1720086402.5205948 oslo.db-16.0.0/releasenotes/source/locale/fr/0000775000175000017500000000000000000000000021020 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1720086402.548608 oslo.db-16.0.0/releasenotes/source/locale/fr/LC_MESSAGES/0000775000175000017500000000000000000000000022605 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po0000664000175000017500000000312600000000000025640 0ustar00zuulzuul00000000000000# Gérald LONLAS , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: oslo.db Release Notes 4.18.1.dev1\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2017-03-14 11:56+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-10-22 05:59+0000\n" "Last-Translator: Gérald LONLAS \n" "Language-Team: French\n" "Language: fr\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=2; plural=(n > 1)\n" msgid "2.6.0-9" msgstr "2.6.0-9" msgid "4.12.0" msgstr "4.12.0" msgid "4.6.0" msgstr "4.6.0" msgid "4.8.0" msgstr "4.8.0" msgid "4.9.0" msgstr "4.9.0" msgid "Bug Fixes" msgstr "Corrections de bugs" msgid "Deprecation Notes" msgstr "Notes dépréciées " msgid "Liberty Series Release Notes" msgstr "Note de release pour Liberty" msgid "Mitaka Series Release Notes" msgstr "Note de release pour Mitaka" msgid "New Features" msgstr "Nouvelles fonctionnalités" msgid "Newton Series Release Notes" msgstr "Note de release pour Newton" msgid "Other Notes" msgstr "Autres notes" msgid "Unreleased Release Notes" msgstr "Note de release pour les changements non déployées" msgid "Upgrade Notes" msgstr "Notes de mises à jours" msgid "" "http://lists.openstack.org/pipermail/openstack-dev/2015-December/082717.html" msgstr "" "http://lists.openstack.org/pipermail/openstack-dev/2015-December/082717.html" msgid "https://bugs.launchpad.net/oslo.db/+bug/1535375" msgstr "https://bugs.launchpad.net/oslo.db/+bug/1535375" msgid "oslo.db Release Notes" msgstr "Note de release pour oslo.db" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/mitaka.rst0000664000175000017500000000023200000000000021147 0ustar00zuulzuul00000000000000=================================== Mitaka Series Release Notes =================================== .. release-notes:: :branch: origin/stable/mitaka ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/newton.rst0000664000175000017500000000021600000000000021215 0ustar00zuulzuul00000000000000============================= Newton Series Release Notes ============================= .. release-notes:: :branch: origin/stable/newton ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/ocata.rst0000664000175000017500000000023000000000000020766 0ustar00zuulzuul00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/pike.rst0000664000175000017500000000021700000000000020634 0ustar00zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/queens.rst0000664000175000017500000000022300000000000021201 0ustar00zuulzuul00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/rocky.rst0000664000175000017500000000022100000000000021026 0ustar00zuulzuul00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/stein.rst0000664000175000017500000000022100000000000021021 0ustar00zuulzuul00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/train.rst0000664000175000017500000000017600000000000021025 0ustar00zuulzuul00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/unreleased.rst0000664000175000017500000000014400000000000022032 0ustar00zuulzuul00000000000000========================== Unreleased Release Notes ========================== .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/ussuri.rst0000664000175000017500000000020200000000000021230 0ustar00zuulzuul00000000000000=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/victoria.rst0000664000175000017500000000022000000000000021516 0ustar00zuulzuul00000000000000============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: unmaintained/victoria ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/wallaby.rst0000664000175000017500000000021400000000000021334 0ustar00zuulzuul00000000000000============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: unmaintained/wallaby ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/xena.rst0000664000175000017500000000020000000000000020627 0ustar00zuulzuul00000000000000========================= Xena Series Release Notes ========================= .. release-notes:: :branch: unmaintained/xena ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/yoga.rst0000664000175000017500000000020000000000000020633 0ustar00zuulzuul00000000000000========================= Yoga Series Release Notes ========================= .. release-notes:: :branch: unmaintained/yoga ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/releasenotes/source/zed.rst0000664000175000017500000000017400000000000020470 0ustar00zuulzuul00000000000000======================== Zed Series Release Notes ======================== .. release-notes:: :branch: unmaintained/zed ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/requirements.txt0000664000175000017500000000064200000000000016447 0ustar00zuulzuul00000000000000pbr>=2.0.0 # Apache-2.0 alembic>=0.9.6 # MIT debtcollector>=1.2.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.config>=5.2.0 # Apache-2.0 oslo.utils>=3.33.0 # Apache-2.0 SQLAlchemy>=1.4.0 # MIT stevedore>=1.20.0 # Apache-2.0 # these are used by downstream libraries that require # oslo.db as one of their test requirements - do not remove! testresources>=2.0.0 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1720086402.548608 oslo.db-16.0.0/setup.cfg0000664000175000017500000000213500000000000015003 0ustar00zuulzuul00000000000000[metadata] name = oslo.db summary = Oslo Database library description_file = README.rst author = OpenStack author_email = openstack-discuss@lists.openstack.org home_page = https://docs.openstack.org/oslo.db/latest python_requires = >=3.8 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3 :: Only Programming Language :: Python :: Implementation :: CPython [extras] mysql = PyMySQL>=0.7.6 # MIT License postgresql = psycopg2>=2.8.0 # LGPL/ZPL [files] packages = oslo_db [entry_points] oslo.config.opts = oslo.db = oslo_db.options:list_opts oslo.db.migration = alembic = oslo_db.sqlalchemy.migration_cli.ext_alembic:AlembicExtension [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/setup.py0000664000175000017500000000127100000000000014674 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/test-requirements.txt0000664000175000017500000000063400000000000017425 0ustar00zuulzuul00000000000000hacking>=6.1.0,<6.2.0 # Apache-2.0 coverage>=4.0 # Apache-2.0 eventlet>=0.18.2 # MIT fixtures>=3.0.0 # Apache-2.0/BSD python-subunit>=1.0.0 # Apache-2.0/BSD oslotest>=3.2.0 # Apache-2.0 oslo.context>=2.19.2 # Apache-2.0 stestr>=2.0.0 # Apache-2.0 testtools>=2.2.0 # MIT bandit>=1.7.0,<1.8.0 # Apache-2.0 pifpaf>=0.10.0 # Apache-2.0 PyMySQL>=0.7.6 # MIT License psycopg2>=2.8.0 # LGPL/ZPL pre-commit>=2.6.0 # MIT ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1720086402.548608 oslo.db-16.0.0/tools/0000775000175000017500000000000000000000000014321 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/tools/run-pifpaf-tests.sh0000775000175000017500000000043700000000000020073 0ustar00zuulzuul00000000000000#!/bin/bash set -e # Replace mysql:// by mysql+pymysql:// and add sqlite export OS_TEST_DBAPI_ADMIN_CONNECTION="${OS_TEST_DBAPI_ADMIN_CONNECTION/#mysql:/mysql+pymysql:};sqlite://" echo $OS_TEST_DBAPI_ADMIN_CONNECTION TEST_EVENTLET=0 stestr run $* TEST_EVENTLET=1 stestr run --combine $* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/tools/test-setup.sh0000775000175000017500000000373600000000000017006 0ustar00zuulzuul00000000000000#!/bin/bash -xe # This script will be run by OpenStack CI before unit tests are run, # it sets up the test system as needed. # Developers should setup their test systems in a similar way. # This setup needs to be run as a user that can run sudo. # The root password for the MySQL database; pass it in via # MYSQL_ROOT_PW. DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} # This user and its password are used by the tests, if you change it, # your tests might fail. DB_USER=openstack_citest DB_PW=openstack_citest sudo -H mysqladmin -u root password $DB_ROOT_PW # It's best practice to remove anonymous users from the database. If # a anonymous user exists, then it matches first for connections and # other connections from that host will not work. sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " DELETE FROM mysql.user WHERE User=''; FLUSH PRIVILEGES; CREATE USER '$DB_USER'@'%' IDENTIFIED BY '$DB_PW'; GRANT ALL PRIVILEGES ON *.* TO '$DB_USER'@'%' WITH GRANT OPTION;" # Now create our database. mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " SET default_storage_engine=MYISAM; DROP DATABASE IF EXISTS openstack_citest; CREATE DATABASE openstack_citest CHARACTER SET utf8;" # Same for PostgreSQL # The root password for the PostgreSQL database; pass it in via # POSTGRES_ROOT_PW. DB_ROOT_PW=${POSTGRES_ROOT_PW:-insecure_slave} # Setup user root_roles=$(sudo -H -u postgres psql -t -c " SELECT 'HERE' from pg_roles where rolname='$DB_USER'") if [[ ${root_roles} == *HERE ]];then sudo -H -u postgres psql -c "ALTER ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" else sudo -H -u postgres psql -c "CREATE ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" fi # Store password for tests cat << EOF > $HOME/.pgpass *:*:*:$DB_USER:$DB_PW EOF chmod 0600 $HOME/.pgpass # Now create our database psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest" createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1720086376.0 oslo.db-16.0.0/tox.ini0000664000175000017500000000511400000000000014475 0ustar00zuulzuul00000000000000[tox] minversion = 3.18.0 envlist = py3,pep8 ignore_basepython_conflict = true [testenv] basepython = python3 allowlist_externals = env passenv = OS_TEST_DBAPI_ADMIN_CONNECTION setenv = OS_STDOUT_CAPTURE=true OS_STDERR_CAPTURE=true BASECOMMAND=stestr run {postgresql,all}: PIFPAF_POSTGRESQL=pifpaf -g OS_TEST_DBAPI_ADMIN_CONNECTION run postgresql -- {mysql,all}: PIFPAF_MYSQL=pifpaf -g OS_TEST_DBAPI_ADMIN_CONNECTION run mysql -- {mysql,postgresql,all}: BASECOMMAND={toxinidir}/tools/run-pifpaf-tests.sh # TODO(stephenfin): Remove once we bump our upper-constraint to SQLAlchemy 2.0 SQLALCHEMY_WARN_20=1 deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt commands = {env:PIFPAF_MYSQL:} {env:PIFPAF_POSTGRESQL:} {env:BASECOMMAND:} {posargs} [testenv:pep8] commands = pre-commit run -a # Run security linter bandit -r oslo_db -x tests -x oslo_db/tests -n5 --skip B105,B311 [testenv:venv] commands = {posargs} [testenv:cover] setenv = PYTHON=coverage run --source oslo_db --parallel-mode commands = coverage erase stestr run {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml coverage report --show-missing [testenv:docs] allowlist_externals = rm deps = {[testenv]deps} -r{toxinidir}/doc/requirements.txt commands = rm -rf doc/build doc/source/reference/api doc8 -e .rst CONTRIBUTING.rst HACKING.rst README.rst doc/source sphinx-build -W --keep-going -b html doc/source doc/build/html [testenv:releasenotes] allowlist_externals = rm deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt commands = rm -rf releasenotes/build sphinx-build -a -E -W -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html [flake8] # E123, E125 skipped as they are invalid PEP-8. ignore = E123,E125,W504 show-source = True exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build [hacking] import_exceptions = oslo_db._i18n [testenv:bindep] # Do not install any requirements. We want this to be fast and work even if # system dependencies are missing, since it's used to tell you what system # dependencies are missing! This also means that bindep must be installed # separately, outside of the requirements files, and develop mode disabled # explicitly to avoid unnecessarily installing the checked-out repo too (this # further relies on "tox.skipsdist = True" above). deps = bindep commands = bindep test usedevelop = False